1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate *
4*7c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate * with the License.
8*7c478bd9Sstevel@tonic-gate *
9*7c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate *
14*7c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate *
20*7c478bd9Sstevel@tonic-gate * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24*7c478bd9Sstevel@tonic-gate * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate */
26*7c478bd9Sstevel@tonic-gate
27*7c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
28*7c478bd9Sstevel@tonic-gate
29*7c478bd9Sstevel@tonic-gate /*
30*7c478bd9Sstevel@tonic-gate * STREAMS Buffering module
31*7c478bd9Sstevel@tonic-gate *
32*7c478bd9Sstevel@tonic-gate * This streams module collects incoming messages from modules below
33*7c478bd9Sstevel@tonic-gate * it on the stream and buffers them up into a smaller number of
34*7c478bd9Sstevel@tonic-gate * aggregated messages. Its main purpose is to reduce overhead by
35*7c478bd9Sstevel@tonic-gate * cutting down on the number of read (or getmsg) calls its client
36*7c478bd9Sstevel@tonic-gate * user process makes.
37*7c478bd9Sstevel@tonic-gate * - only M_DATA is buffered.
38*7c478bd9Sstevel@tonic-gate * - multithreading assumes configured as D_MTQPAIR
39*7c478bd9Sstevel@tonic-gate * - packets are lost only if flag SB_NO_HEADER is clear and buffer
40*7c478bd9Sstevel@tonic-gate * allocation fails.
41*7c478bd9Sstevel@tonic-gate * - in order message transmission. This is enforced for messages other
42*7c478bd9Sstevel@tonic-gate * than high priority messages.
43*7c478bd9Sstevel@tonic-gate * - zero length messages on the read side are not passed up the
44*7c478bd9Sstevel@tonic-gate * stream but used internally for synchronization.
45*7c478bd9Sstevel@tonic-gate * FLAGS:
46*7c478bd9Sstevel@tonic-gate * - SB_NO_PROTO_CVT - no conversion of M_PROTO messages to M_DATA.
47*7c478bd9Sstevel@tonic-gate * (conversion is the default for backwards compatibility
48*7c478bd9Sstevel@tonic-gate * hence the negative logic).
49*7c478bd9Sstevel@tonic-gate * - SB_NO_HEADER - no headers in buffered data.
50*7c478bd9Sstevel@tonic-gate * (adding headers is the default for backwards compatibility
51*7c478bd9Sstevel@tonic-gate * hence the negative logic).
52*7c478bd9Sstevel@tonic-gate * - SB_DEFER_CHUNK - provides improved response time in question-answer
53*7c478bd9Sstevel@tonic-gate * applications. Buffering is not enabled until the second message
54*7c478bd9Sstevel@tonic-gate * is received on the read side within the sb_ticks interval.
55*7c478bd9Sstevel@tonic-gate * This option will often be used in combination with flag SB_SEND_ON_WRITE.
56*7c478bd9Sstevel@tonic-gate * - SB_SEND_ON_WRITE - a write message results in any pending buffered read
57*7c478bd9Sstevel@tonic-gate * data being immediately sent upstream.
58*7c478bd9Sstevel@tonic-gate * - SB_NO_DROPS - bufmod behaves transparently in flow control and propagates
59*7c478bd9Sstevel@tonic-gate * the blocked flow condition downstream. If this flag is clear (default)
60*7c478bd9Sstevel@tonic-gate * messages will be dropped if the upstream flow is blocked.
61*7c478bd9Sstevel@tonic-gate */
62*7c478bd9Sstevel@tonic-gate
63*7c478bd9Sstevel@tonic-gate
64*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
65*7c478bd9Sstevel@tonic-gate #include <sys/errno.h>
66*7c478bd9Sstevel@tonic-gate #include <sys/debug.h>
67*7c478bd9Sstevel@tonic-gate #include <sys/stropts.h>
68*7c478bd9Sstevel@tonic-gate #include <sys/time.h>
69*7c478bd9Sstevel@tonic-gate #include <sys/stream.h>
70*7c478bd9Sstevel@tonic-gate #include <sys/conf.h>
71*7c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
72*7c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
73*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
74*7c478bd9Sstevel@tonic-gate #include <sys/strsun.h>
75*7c478bd9Sstevel@tonic-gate #include <sys/bufmod.h>
76*7c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
77*7c478bd9Sstevel@tonic-gate #include <sys/isa_defs.h>
78*7c478bd9Sstevel@tonic-gate
79*7c478bd9Sstevel@tonic-gate /*
80*7c478bd9Sstevel@tonic-gate * Per-Stream state information.
81*7c478bd9Sstevel@tonic-gate *
82*7c478bd9Sstevel@tonic-gate * If sb_ticks is negative, we don't deliver chunks until they're
83*7c478bd9Sstevel@tonic-gate * full. If it's zero, we deliver every packet as it arrives. (In
84*7c478bd9Sstevel@tonic-gate * this case we force sb_chunk to zero, to make the implementation
85*7c478bd9Sstevel@tonic-gate * easier.) Otherwise, sb_ticks gives the number of ticks in a
86*7c478bd9Sstevel@tonic-gate * buffering interval. The interval begins when the a read side data
87*7c478bd9Sstevel@tonic-gate * message is received and a timeout is not active. If sb_snap is
88*7c478bd9Sstevel@tonic-gate * zero, no truncation of the msg is done.
89*7c478bd9Sstevel@tonic-gate */
90*7c478bd9Sstevel@tonic-gate struct sb {
91*7c478bd9Sstevel@tonic-gate queue_t *sb_rq; /* our rq */
92*7c478bd9Sstevel@tonic-gate mblk_t *sb_mp; /* partial chunk */
93*7c478bd9Sstevel@tonic-gate mblk_t *sb_head; /* pre-allocated space for the next header */
94*7c478bd9Sstevel@tonic-gate mblk_t *sb_tail; /* first mblk of last message appended */
95*7c478bd9Sstevel@tonic-gate uint_t sb_mlen; /* sb_mp length */
96*7c478bd9Sstevel@tonic-gate uint_t sb_mcount; /* input msg count in sb_mp */
97*7c478bd9Sstevel@tonic-gate uint_t sb_chunk; /* max chunk size */
98*7c478bd9Sstevel@tonic-gate clock_t sb_ticks; /* timeout interval */
99*7c478bd9Sstevel@tonic-gate timeout_id_t sb_timeoutid; /* qtimeout() id */
100*7c478bd9Sstevel@tonic-gate uint_t sb_drops; /* cumulative # discarded msgs */
101*7c478bd9Sstevel@tonic-gate uint_t sb_snap; /* snapshot length */
102*7c478bd9Sstevel@tonic-gate uint_t sb_flags; /* flags field */
103*7c478bd9Sstevel@tonic-gate uint_t sb_state; /* state variable */
104*7c478bd9Sstevel@tonic-gate };
105*7c478bd9Sstevel@tonic-gate
106*7c478bd9Sstevel@tonic-gate /*
107*7c478bd9Sstevel@tonic-gate * Function prototypes.
108*7c478bd9Sstevel@tonic-gate */
109*7c478bd9Sstevel@tonic-gate static int sbopen(queue_t *, dev_t *, int, int, cred_t *);
110*7c478bd9Sstevel@tonic-gate static int sbclose(queue_t *, int, cred_t *);
111*7c478bd9Sstevel@tonic-gate static void sbwput(queue_t *, mblk_t *);
112*7c478bd9Sstevel@tonic-gate static void sbrput(queue_t *, mblk_t *);
113*7c478bd9Sstevel@tonic-gate static void sbrsrv(queue_t *);
114*7c478bd9Sstevel@tonic-gate static void sbioctl(queue_t *, mblk_t *);
115*7c478bd9Sstevel@tonic-gate static void sbaddmsg(queue_t *, mblk_t *);
116*7c478bd9Sstevel@tonic-gate static void sbtick(void *);
117*7c478bd9Sstevel@tonic-gate static void sbclosechunk(struct sb *);
118*7c478bd9Sstevel@tonic-gate static void sbsendit(queue_t *, mblk_t *);
119*7c478bd9Sstevel@tonic-gate
120*7c478bd9Sstevel@tonic-gate static struct module_info sb_minfo = {
121*7c478bd9Sstevel@tonic-gate 21, /* mi_idnum */
122*7c478bd9Sstevel@tonic-gate "bufmod", /* mi_idname */
123*7c478bd9Sstevel@tonic-gate 0, /* mi_minpsz */
124*7c478bd9Sstevel@tonic-gate INFPSZ, /* mi_maxpsz */
125*7c478bd9Sstevel@tonic-gate 1, /* mi_hiwat */
126*7c478bd9Sstevel@tonic-gate 0 /* mi_lowat */
127*7c478bd9Sstevel@tonic-gate };
128*7c478bd9Sstevel@tonic-gate
129*7c478bd9Sstevel@tonic-gate static struct qinit sb_rinit = {
130*7c478bd9Sstevel@tonic-gate (int (*)())sbrput, /* qi_putp */
131*7c478bd9Sstevel@tonic-gate (int (*)())sbrsrv, /* qi_srvp */
132*7c478bd9Sstevel@tonic-gate sbopen, /* qi_qopen */
133*7c478bd9Sstevel@tonic-gate sbclose, /* qi_qclose */
134*7c478bd9Sstevel@tonic-gate NULL, /* qi_qadmin */
135*7c478bd9Sstevel@tonic-gate &sb_minfo, /* qi_minfo */
136*7c478bd9Sstevel@tonic-gate NULL /* qi_mstat */
137*7c478bd9Sstevel@tonic-gate };
138*7c478bd9Sstevel@tonic-gate
139*7c478bd9Sstevel@tonic-gate static struct qinit sb_winit = {
140*7c478bd9Sstevel@tonic-gate (int (*)())sbwput, /* qi_putp */
141*7c478bd9Sstevel@tonic-gate NULL, /* qi_srvp */
142*7c478bd9Sstevel@tonic-gate NULL, /* qi_qopen */
143*7c478bd9Sstevel@tonic-gate NULL, /* qi_qclose */
144*7c478bd9Sstevel@tonic-gate NULL, /* qi_qadmin */
145*7c478bd9Sstevel@tonic-gate &sb_minfo, /* qi_minfo */
146*7c478bd9Sstevel@tonic-gate NULL /* qi_mstat */
147*7c478bd9Sstevel@tonic-gate };
148*7c478bd9Sstevel@tonic-gate
149*7c478bd9Sstevel@tonic-gate static struct streamtab sb_info = {
150*7c478bd9Sstevel@tonic-gate &sb_rinit, /* st_rdinit */
151*7c478bd9Sstevel@tonic-gate &sb_winit, /* st_wrinit */
152*7c478bd9Sstevel@tonic-gate NULL, /* st_muxrinit */
153*7c478bd9Sstevel@tonic-gate NULL /* st_muxwinit */
154*7c478bd9Sstevel@tonic-gate };
155*7c478bd9Sstevel@tonic-gate
156*7c478bd9Sstevel@tonic-gate
157*7c478bd9Sstevel@tonic-gate /*
158*7c478bd9Sstevel@tonic-gate * This is the loadable module wrapper.
159*7c478bd9Sstevel@tonic-gate */
160*7c478bd9Sstevel@tonic-gate
161*7c478bd9Sstevel@tonic-gate static struct fmodsw fsw = {
162*7c478bd9Sstevel@tonic-gate "bufmod",
163*7c478bd9Sstevel@tonic-gate &sb_info,
164*7c478bd9Sstevel@tonic-gate D_MTQPAIR | D_MP
165*7c478bd9Sstevel@tonic-gate };
166*7c478bd9Sstevel@tonic-gate
167*7c478bd9Sstevel@tonic-gate /*
168*7c478bd9Sstevel@tonic-gate * Module linkage information for the kernel.
169*7c478bd9Sstevel@tonic-gate */
170*7c478bd9Sstevel@tonic-gate
171*7c478bd9Sstevel@tonic-gate static struct modlstrmod modlstrmod = {
172*7c478bd9Sstevel@tonic-gate &mod_strmodops, "streams buffer mod", &fsw
173*7c478bd9Sstevel@tonic-gate };
174*7c478bd9Sstevel@tonic-gate
175*7c478bd9Sstevel@tonic-gate static struct modlinkage modlinkage = {
176*7c478bd9Sstevel@tonic-gate MODREV_1, &modlstrmod, NULL
177*7c478bd9Sstevel@tonic-gate };
178*7c478bd9Sstevel@tonic-gate
179*7c478bd9Sstevel@tonic-gate
180*7c478bd9Sstevel@tonic-gate int
_init(void)181*7c478bd9Sstevel@tonic-gate _init(void)
182*7c478bd9Sstevel@tonic-gate {
183*7c478bd9Sstevel@tonic-gate return (mod_install(&modlinkage));
184*7c478bd9Sstevel@tonic-gate }
185*7c478bd9Sstevel@tonic-gate
186*7c478bd9Sstevel@tonic-gate int
_fini(void)187*7c478bd9Sstevel@tonic-gate _fini(void)
188*7c478bd9Sstevel@tonic-gate {
189*7c478bd9Sstevel@tonic-gate return (mod_remove(&modlinkage));
190*7c478bd9Sstevel@tonic-gate }
191*7c478bd9Sstevel@tonic-gate
192*7c478bd9Sstevel@tonic-gate int
_info(struct modinfo * modinfop)193*7c478bd9Sstevel@tonic-gate _info(struct modinfo *modinfop)
194*7c478bd9Sstevel@tonic-gate {
195*7c478bd9Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop));
196*7c478bd9Sstevel@tonic-gate }
197*7c478bd9Sstevel@tonic-gate
198*7c478bd9Sstevel@tonic-gate
199*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
200*7c478bd9Sstevel@tonic-gate static int
sbopen(queue_t * rq,dev_t * dev,int oflag,int sflag,cred_t * crp)201*7c478bd9Sstevel@tonic-gate sbopen(queue_t *rq, dev_t *dev, int oflag, int sflag, cred_t *crp)
202*7c478bd9Sstevel@tonic-gate {
203*7c478bd9Sstevel@tonic-gate struct sb *sbp;
204*7c478bd9Sstevel@tonic-gate ASSERT(rq);
205*7c478bd9Sstevel@tonic-gate
206*7c478bd9Sstevel@tonic-gate if (sflag != MODOPEN)
207*7c478bd9Sstevel@tonic-gate return (EINVAL);
208*7c478bd9Sstevel@tonic-gate
209*7c478bd9Sstevel@tonic-gate if (rq->q_ptr)
210*7c478bd9Sstevel@tonic-gate return (0);
211*7c478bd9Sstevel@tonic-gate
212*7c478bd9Sstevel@tonic-gate /*
213*7c478bd9Sstevel@tonic-gate * Allocate and initialize per-Stream structure.
214*7c478bd9Sstevel@tonic-gate */
215*7c478bd9Sstevel@tonic-gate sbp = kmem_alloc(sizeof (struct sb), KM_SLEEP);
216*7c478bd9Sstevel@tonic-gate sbp->sb_rq = rq;
217*7c478bd9Sstevel@tonic-gate sbp->sb_ticks = -1;
218*7c478bd9Sstevel@tonic-gate sbp->sb_chunk = SB_DFLT_CHUNK;
219*7c478bd9Sstevel@tonic-gate sbp->sb_tail = sbp->sb_mp = sbp->sb_head = NULL;
220*7c478bd9Sstevel@tonic-gate sbp->sb_mlen = 0;
221*7c478bd9Sstevel@tonic-gate sbp->sb_mcount = 0;
222*7c478bd9Sstevel@tonic-gate sbp->sb_timeoutid = 0;
223*7c478bd9Sstevel@tonic-gate sbp->sb_drops = 0;
224*7c478bd9Sstevel@tonic-gate sbp->sb_snap = 0;
225*7c478bd9Sstevel@tonic-gate sbp->sb_flags = 0;
226*7c478bd9Sstevel@tonic-gate sbp->sb_state = 0;
227*7c478bd9Sstevel@tonic-gate
228*7c478bd9Sstevel@tonic-gate rq->q_ptr = WR(rq)->q_ptr = sbp;
229*7c478bd9Sstevel@tonic-gate
230*7c478bd9Sstevel@tonic-gate qprocson(rq);
231*7c478bd9Sstevel@tonic-gate
232*7c478bd9Sstevel@tonic-gate
233*7c478bd9Sstevel@tonic-gate return (0);
234*7c478bd9Sstevel@tonic-gate }
235*7c478bd9Sstevel@tonic-gate
236*7c478bd9Sstevel@tonic-gate /* ARGSUSED1 */
237*7c478bd9Sstevel@tonic-gate static int
sbclose(queue_t * rq,int flag,cred_t * credp)238*7c478bd9Sstevel@tonic-gate sbclose(queue_t *rq, int flag, cred_t *credp)
239*7c478bd9Sstevel@tonic-gate {
240*7c478bd9Sstevel@tonic-gate struct sb *sbp = (struct sb *)rq->q_ptr;
241*7c478bd9Sstevel@tonic-gate
242*7c478bd9Sstevel@tonic-gate ASSERT(sbp);
243*7c478bd9Sstevel@tonic-gate
244*7c478bd9Sstevel@tonic-gate qprocsoff(rq);
245*7c478bd9Sstevel@tonic-gate /*
246*7c478bd9Sstevel@tonic-gate * Cancel an outstanding timeout
247*7c478bd9Sstevel@tonic-gate */
248*7c478bd9Sstevel@tonic-gate if (sbp->sb_timeoutid != 0) {
249*7c478bd9Sstevel@tonic-gate (void) quntimeout(rq, sbp->sb_timeoutid);
250*7c478bd9Sstevel@tonic-gate sbp->sb_timeoutid = 0;
251*7c478bd9Sstevel@tonic-gate }
252*7c478bd9Sstevel@tonic-gate /*
253*7c478bd9Sstevel@tonic-gate * Free the current chunk.
254*7c478bd9Sstevel@tonic-gate */
255*7c478bd9Sstevel@tonic-gate if (sbp->sb_mp) {
256*7c478bd9Sstevel@tonic-gate freemsg(sbp->sb_mp);
257*7c478bd9Sstevel@tonic-gate sbp->sb_tail = sbp->sb_mp = sbp->sb_head = NULL;
258*7c478bd9Sstevel@tonic-gate sbp->sb_mlen = 0;
259*7c478bd9Sstevel@tonic-gate }
260*7c478bd9Sstevel@tonic-gate
261*7c478bd9Sstevel@tonic-gate /*
262*7c478bd9Sstevel@tonic-gate * Free the per-Stream structure.
263*7c478bd9Sstevel@tonic-gate */
264*7c478bd9Sstevel@tonic-gate kmem_free((caddr_t)sbp, sizeof (struct sb));
265*7c478bd9Sstevel@tonic-gate rq->q_ptr = WR(rq)->q_ptr = NULL;
266*7c478bd9Sstevel@tonic-gate
267*7c478bd9Sstevel@tonic-gate return (0);
268*7c478bd9Sstevel@tonic-gate }
269*7c478bd9Sstevel@tonic-gate
270*7c478bd9Sstevel@tonic-gate /*
271*7c478bd9Sstevel@tonic-gate * the correction factor is introduced to compensate for
272*7c478bd9Sstevel@tonic-gate * whatever assumptions the modules below have made about
273*7c478bd9Sstevel@tonic-gate * how much traffic is flowing through the stream and the fact
274*7c478bd9Sstevel@tonic-gate * that bufmod may be snipping messages with the sb_snap length.
275*7c478bd9Sstevel@tonic-gate */
276*7c478bd9Sstevel@tonic-gate #define SNIT_HIWAT(msgsize, fudge) ((4 * msgsize * fudge) + 512)
277*7c478bd9Sstevel@tonic-gate #define SNIT_LOWAT(msgsize, fudge) ((2 * msgsize * fudge) + 256)
278*7c478bd9Sstevel@tonic-gate
279*7c478bd9Sstevel@tonic-gate
280*7c478bd9Sstevel@tonic-gate static void
sbioc(queue_t * wq,mblk_t * mp)281*7c478bd9Sstevel@tonic-gate sbioc(queue_t *wq, mblk_t *mp)
282*7c478bd9Sstevel@tonic-gate {
283*7c478bd9Sstevel@tonic-gate struct iocblk *iocp;
284*7c478bd9Sstevel@tonic-gate struct sb *sbp = (struct sb *)wq->q_ptr;
285*7c478bd9Sstevel@tonic-gate clock_t ticks;
286*7c478bd9Sstevel@tonic-gate mblk_t *mop;
287*7c478bd9Sstevel@tonic-gate
288*7c478bd9Sstevel@tonic-gate iocp = (struct iocblk *)mp->b_rptr;
289*7c478bd9Sstevel@tonic-gate
290*7c478bd9Sstevel@tonic-gate switch (iocp->ioc_cmd) {
291*7c478bd9Sstevel@tonic-gate case SBIOCGCHUNK:
292*7c478bd9Sstevel@tonic-gate case SBIOCGSNAP:
293*7c478bd9Sstevel@tonic-gate case SBIOCGFLAGS:
294*7c478bd9Sstevel@tonic-gate case SBIOCGTIME:
295*7c478bd9Sstevel@tonic-gate miocack(wq, mp, 0, 0);
296*7c478bd9Sstevel@tonic-gate return;
297*7c478bd9Sstevel@tonic-gate
298*7c478bd9Sstevel@tonic-gate case SBIOCSTIME:
299*7c478bd9Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
300*7c478bd9Sstevel@tonic-gate if ((iocp->ioc_flag & IOC_MODELS) != IOC_NATIVE) {
301*7c478bd9Sstevel@tonic-gate struct timeval32 *t32;
302*7c478bd9Sstevel@tonic-gate
303*7c478bd9Sstevel@tonic-gate t32 = (struct timeval32 *)mp->b_cont->b_rptr;
304*7c478bd9Sstevel@tonic-gate if (t32->tv_sec < 0 || t32->tv_usec < 0) {
305*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL);
306*7c478bd9Sstevel@tonic-gate break;
307*7c478bd9Sstevel@tonic-gate }
308*7c478bd9Sstevel@tonic-gate ticks = TIMEVAL_TO_TICK(t32);
309*7c478bd9Sstevel@tonic-gate } else
310*7c478bd9Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
311*7c478bd9Sstevel@tonic-gate {
312*7c478bd9Sstevel@tonic-gate struct timeval *tb;
313*7c478bd9Sstevel@tonic-gate
314*7c478bd9Sstevel@tonic-gate tb = (struct timeval *)mp->b_cont->b_rptr;
315*7c478bd9Sstevel@tonic-gate
316*7c478bd9Sstevel@tonic-gate if (tb->tv_sec < 0 || tb->tv_usec < 0) {
317*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL);
318*7c478bd9Sstevel@tonic-gate break;
319*7c478bd9Sstevel@tonic-gate }
320*7c478bd9Sstevel@tonic-gate ticks = TIMEVAL_TO_TICK(tb);
321*7c478bd9Sstevel@tonic-gate }
322*7c478bd9Sstevel@tonic-gate sbp->sb_ticks = ticks;
323*7c478bd9Sstevel@tonic-gate if (ticks == 0)
324*7c478bd9Sstevel@tonic-gate sbp->sb_chunk = 0;
325*7c478bd9Sstevel@tonic-gate miocack(wq, mp, 0, 0);
326*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
327*7c478bd9Sstevel@tonic-gate return;
328*7c478bd9Sstevel@tonic-gate
329*7c478bd9Sstevel@tonic-gate case SBIOCSCHUNK:
330*7c478bd9Sstevel@tonic-gate /*
331*7c478bd9Sstevel@tonic-gate * set up hi/lo water marks on stream head read queue.
332*7c478bd9Sstevel@tonic-gate * unlikely to run out of resources. Fix at later date.
333*7c478bd9Sstevel@tonic-gate */
334*7c478bd9Sstevel@tonic-gate if ((mop = allocb(sizeof (struct stroptions),
335*7c478bd9Sstevel@tonic-gate BPRI_MED)) != NULL) {
336*7c478bd9Sstevel@tonic-gate struct stroptions *sop;
337*7c478bd9Sstevel@tonic-gate uint_t chunk;
338*7c478bd9Sstevel@tonic-gate
339*7c478bd9Sstevel@tonic-gate chunk = *(uint_t *)mp->b_cont->b_rptr;
340*7c478bd9Sstevel@tonic-gate mop->b_datap->db_type = M_SETOPTS;
341*7c478bd9Sstevel@tonic-gate mop->b_wptr += sizeof (struct stroptions);
342*7c478bd9Sstevel@tonic-gate sop = (struct stroptions *)mop->b_rptr;
343*7c478bd9Sstevel@tonic-gate sop->so_flags = SO_HIWAT | SO_LOWAT;
344*7c478bd9Sstevel@tonic-gate sop->so_hiwat = SNIT_HIWAT(chunk, 1);
345*7c478bd9Sstevel@tonic-gate sop->so_lowat = SNIT_LOWAT(chunk, 1);
346*7c478bd9Sstevel@tonic-gate qreply(wq, mop);
347*7c478bd9Sstevel@tonic-gate }
348*7c478bd9Sstevel@tonic-gate
349*7c478bd9Sstevel@tonic-gate sbp->sb_chunk = *(uint_t *)mp->b_cont->b_rptr;
350*7c478bd9Sstevel@tonic-gate miocack(wq, mp, 0, 0);
351*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
352*7c478bd9Sstevel@tonic-gate return;
353*7c478bd9Sstevel@tonic-gate
354*7c478bd9Sstevel@tonic-gate case SBIOCSFLAGS:
355*7c478bd9Sstevel@tonic-gate sbp->sb_flags = *(uint_t *)mp->b_cont->b_rptr;
356*7c478bd9Sstevel@tonic-gate miocack(wq, mp, 0, 0);
357*7c478bd9Sstevel@tonic-gate return;
358*7c478bd9Sstevel@tonic-gate
359*7c478bd9Sstevel@tonic-gate case SBIOCSSNAP:
360*7c478bd9Sstevel@tonic-gate /*
361*7c478bd9Sstevel@tonic-gate * if chunking dont worry about effects of
362*7c478bd9Sstevel@tonic-gate * snipping of message size on head flow control
363*7c478bd9Sstevel@tonic-gate * since it has a relatively small bearing on the
364*7c478bd9Sstevel@tonic-gate * data rate onto the streamn head.
365*7c478bd9Sstevel@tonic-gate */
366*7c478bd9Sstevel@tonic-gate if (!sbp->sb_chunk) {
367*7c478bd9Sstevel@tonic-gate /*
368*7c478bd9Sstevel@tonic-gate * set up hi/lo water marks on stream head read queue.
369*7c478bd9Sstevel@tonic-gate * unlikely to run out of resources. Fix at later date.
370*7c478bd9Sstevel@tonic-gate */
371*7c478bd9Sstevel@tonic-gate if ((mop = allocb(sizeof (struct stroptions),
372*7c478bd9Sstevel@tonic-gate BPRI_MED)) != NULL) {
373*7c478bd9Sstevel@tonic-gate struct stroptions *sop;
374*7c478bd9Sstevel@tonic-gate uint_t snap;
375*7c478bd9Sstevel@tonic-gate int fudge;
376*7c478bd9Sstevel@tonic-gate
377*7c478bd9Sstevel@tonic-gate snap = *(uint_t *)mp->b_cont->b_rptr;
378*7c478bd9Sstevel@tonic-gate mop->b_datap->db_type = M_SETOPTS;
379*7c478bd9Sstevel@tonic-gate mop->b_wptr += sizeof (struct stroptions);
380*7c478bd9Sstevel@tonic-gate sop = (struct stroptions *)mop->b_rptr;
381*7c478bd9Sstevel@tonic-gate sop->so_flags = SO_HIWAT | SO_LOWAT;
382*7c478bd9Sstevel@tonic-gate fudge = snap <= 100 ? 4 :
383*7c478bd9Sstevel@tonic-gate snap <= 400 ? 2 :
384*7c478bd9Sstevel@tonic-gate 1;
385*7c478bd9Sstevel@tonic-gate sop->so_hiwat = SNIT_HIWAT(snap, fudge);
386*7c478bd9Sstevel@tonic-gate sop->so_lowat = SNIT_LOWAT(snap, fudge);
387*7c478bd9Sstevel@tonic-gate qreply(wq, mop);
388*7c478bd9Sstevel@tonic-gate }
389*7c478bd9Sstevel@tonic-gate }
390*7c478bd9Sstevel@tonic-gate
391*7c478bd9Sstevel@tonic-gate sbp->sb_snap = *(uint_t *)mp->b_cont->b_rptr;
392*7c478bd9Sstevel@tonic-gate miocack(wq, mp, 0, 0);
393*7c478bd9Sstevel@tonic-gate return;
394*7c478bd9Sstevel@tonic-gate
395*7c478bd9Sstevel@tonic-gate default:
396*7c478bd9Sstevel@tonic-gate ASSERT(0);
397*7c478bd9Sstevel@tonic-gate return;
398*7c478bd9Sstevel@tonic-gate }
399*7c478bd9Sstevel@tonic-gate }
400*7c478bd9Sstevel@tonic-gate
401*7c478bd9Sstevel@tonic-gate /*
402*7c478bd9Sstevel@tonic-gate * Write-side put procedure. Its main task is to detect ioctls
403*7c478bd9Sstevel@tonic-gate * for manipulating the buffering state and hand them to sbioctl.
404*7c478bd9Sstevel@tonic-gate * Other message types are passed on through.
405*7c478bd9Sstevel@tonic-gate */
406*7c478bd9Sstevel@tonic-gate static void
sbwput(queue_t * wq,mblk_t * mp)407*7c478bd9Sstevel@tonic-gate sbwput(queue_t *wq, mblk_t *mp)
408*7c478bd9Sstevel@tonic-gate {
409*7c478bd9Sstevel@tonic-gate struct sb *sbp = (struct sb *)wq->q_ptr;
410*7c478bd9Sstevel@tonic-gate struct copyresp *resp;
411*7c478bd9Sstevel@tonic-gate
412*7c478bd9Sstevel@tonic-gate if (sbp->sb_flags & SB_SEND_ON_WRITE)
413*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
414*7c478bd9Sstevel@tonic-gate switch (mp->b_datap->db_type) {
415*7c478bd9Sstevel@tonic-gate case M_IOCTL:
416*7c478bd9Sstevel@tonic-gate sbioctl(wq, mp);
417*7c478bd9Sstevel@tonic-gate break;
418*7c478bd9Sstevel@tonic-gate
419*7c478bd9Sstevel@tonic-gate case M_IOCDATA:
420*7c478bd9Sstevel@tonic-gate resp = (struct copyresp *)mp->b_rptr;
421*7c478bd9Sstevel@tonic-gate if (resp->cp_rval) {
422*7c478bd9Sstevel@tonic-gate /*
423*7c478bd9Sstevel@tonic-gate * Just free message on failure.
424*7c478bd9Sstevel@tonic-gate */
425*7c478bd9Sstevel@tonic-gate freemsg(mp);
426*7c478bd9Sstevel@tonic-gate break;
427*7c478bd9Sstevel@tonic-gate }
428*7c478bd9Sstevel@tonic-gate
429*7c478bd9Sstevel@tonic-gate switch (resp->cp_cmd) {
430*7c478bd9Sstevel@tonic-gate case SBIOCSTIME:
431*7c478bd9Sstevel@tonic-gate case SBIOCSCHUNK:
432*7c478bd9Sstevel@tonic-gate case SBIOCSFLAGS:
433*7c478bd9Sstevel@tonic-gate case SBIOCSSNAP:
434*7c478bd9Sstevel@tonic-gate case SBIOCGTIME:
435*7c478bd9Sstevel@tonic-gate case SBIOCGCHUNK:
436*7c478bd9Sstevel@tonic-gate case SBIOCGSNAP:
437*7c478bd9Sstevel@tonic-gate case SBIOCGFLAGS:
438*7c478bd9Sstevel@tonic-gate sbioc(wq, mp);
439*7c478bd9Sstevel@tonic-gate break;
440*7c478bd9Sstevel@tonic-gate
441*7c478bd9Sstevel@tonic-gate default:
442*7c478bd9Sstevel@tonic-gate putnext(wq, mp);
443*7c478bd9Sstevel@tonic-gate break;
444*7c478bd9Sstevel@tonic-gate }
445*7c478bd9Sstevel@tonic-gate break;
446*7c478bd9Sstevel@tonic-gate
447*7c478bd9Sstevel@tonic-gate default:
448*7c478bd9Sstevel@tonic-gate putnext(wq, mp);
449*7c478bd9Sstevel@tonic-gate break;
450*7c478bd9Sstevel@tonic-gate }
451*7c478bd9Sstevel@tonic-gate }
452*7c478bd9Sstevel@tonic-gate
453*7c478bd9Sstevel@tonic-gate /*
454*7c478bd9Sstevel@tonic-gate * Read-side put procedure. It's responsible for buffering up incoming
455*7c478bd9Sstevel@tonic-gate * messages and grouping them into aggregates according to the current
456*7c478bd9Sstevel@tonic-gate * buffering parameters.
457*7c478bd9Sstevel@tonic-gate */
458*7c478bd9Sstevel@tonic-gate static void
sbrput(queue_t * rq,mblk_t * mp)459*7c478bd9Sstevel@tonic-gate sbrput(queue_t *rq, mblk_t *mp)
460*7c478bd9Sstevel@tonic-gate {
461*7c478bd9Sstevel@tonic-gate struct sb *sbp = (struct sb *)rq->q_ptr;
462*7c478bd9Sstevel@tonic-gate
463*7c478bd9Sstevel@tonic-gate ASSERT(sbp);
464*7c478bd9Sstevel@tonic-gate
465*7c478bd9Sstevel@tonic-gate switch (mp->b_datap->db_type) {
466*7c478bd9Sstevel@tonic-gate case M_PROTO:
467*7c478bd9Sstevel@tonic-gate if (sbp->sb_flags & SB_NO_PROTO_CVT) {
468*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
469*7c478bd9Sstevel@tonic-gate sbsendit(rq, mp);
470*7c478bd9Sstevel@tonic-gate break;
471*7c478bd9Sstevel@tonic-gate } else {
472*7c478bd9Sstevel@tonic-gate /*
473*7c478bd9Sstevel@tonic-gate * Convert M_PROTO to M_DATA.
474*7c478bd9Sstevel@tonic-gate */
475*7c478bd9Sstevel@tonic-gate mp->b_datap->db_type = M_DATA;
476*7c478bd9Sstevel@tonic-gate }
477*7c478bd9Sstevel@tonic-gate /* FALLTHRU */
478*7c478bd9Sstevel@tonic-gate
479*7c478bd9Sstevel@tonic-gate case M_DATA:
480*7c478bd9Sstevel@tonic-gate if ((sbp->sb_flags & SB_DEFER_CHUNK) &&
481*7c478bd9Sstevel@tonic-gate !(sbp->sb_state & SB_FRCVD)) {
482*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
483*7c478bd9Sstevel@tonic-gate sbsendit(rq, mp);
484*7c478bd9Sstevel@tonic-gate sbp->sb_state |= SB_FRCVD;
485*7c478bd9Sstevel@tonic-gate } else
486*7c478bd9Sstevel@tonic-gate sbaddmsg(rq, mp);
487*7c478bd9Sstevel@tonic-gate
488*7c478bd9Sstevel@tonic-gate if ((sbp->sb_ticks > 0) && !(sbp->sb_timeoutid))
489*7c478bd9Sstevel@tonic-gate sbp->sb_timeoutid = qtimeout(sbp->sb_rq, sbtick,
490*7c478bd9Sstevel@tonic-gate sbp, sbp->sb_ticks);
491*7c478bd9Sstevel@tonic-gate
492*7c478bd9Sstevel@tonic-gate break;
493*7c478bd9Sstevel@tonic-gate
494*7c478bd9Sstevel@tonic-gate case M_FLUSH:
495*7c478bd9Sstevel@tonic-gate if (*mp->b_rptr & FLUSHR) {
496*7c478bd9Sstevel@tonic-gate /*
497*7c478bd9Sstevel@tonic-gate * Reset timeout, flush the chunk currently in
498*7c478bd9Sstevel@tonic-gate * progress, and start a new chunk.
499*7c478bd9Sstevel@tonic-gate */
500*7c478bd9Sstevel@tonic-gate if (sbp->sb_timeoutid) {
501*7c478bd9Sstevel@tonic-gate (void) quntimeout(sbp->sb_rq,
502*7c478bd9Sstevel@tonic-gate sbp->sb_timeoutid);
503*7c478bd9Sstevel@tonic-gate sbp->sb_timeoutid = 0;
504*7c478bd9Sstevel@tonic-gate }
505*7c478bd9Sstevel@tonic-gate if (sbp->sb_mp) {
506*7c478bd9Sstevel@tonic-gate freemsg(sbp->sb_mp);
507*7c478bd9Sstevel@tonic-gate sbp->sb_tail = sbp->sb_mp = sbp->sb_head = NULL;
508*7c478bd9Sstevel@tonic-gate sbp->sb_mlen = 0;
509*7c478bd9Sstevel@tonic-gate sbp->sb_mcount = 0;
510*7c478bd9Sstevel@tonic-gate }
511*7c478bd9Sstevel@tonic-gate flushq(rq, FLUSHALL);
512*7c478bd9Sstevel@tonic-gate }
513*7c478bd9Sstevel@tonic-gate putnext(rq, mp);
514*7c478bd9Sstevel@tonic-gate break;
515*7c478bd9Sstevel@tonic-gate
516*7c478bd9Sstevel@tonic-gate case M_CTL:
517*7c478bd9Sstevel@tonic-gate /*
518*7c478bd9Sstevel@tonic-gate * Zero-length M_CTL means our timeout() popped.
519*7c478bd9Sstevel@tonic-gate */
520*7c478bd9Sstevel@tonic-gate if (MBLKL(mp) == 0) {
521*7c478bd9Sstevel@tonic-gate freemsg(mp);
522*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
523*7c478bd9Sstevel@tonic-gate } else {
524*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
525*7c478bd9Sstevel@tonic-gate sbsendit(rq, mp);
526*7c478bd9Sstevel@tonic-gate }
527*7c478bd9Sstevel@tonic-gate break;
528*7c478bd9Sstevel@tonic-gate
529*7c478bd9Sstevel@tonic-gate default:
530*7c478bd9Sstevel@tonic-gate if (mp->b_datap->db_type <= QPCTL) {
531*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
532*7c478bd9Sstevel@tonic-gate sbsendit(rq, mp);
533*7c478bd9Sstevel@tonic-gate } else {
534*7c478bd9Sstevel@tonic-gate /* Note: out of band */
535*7c478bd9Sstevel@tonic-gate putnext(rq, mp);
536*7c478bd9Sstevel@tonic-gate }
537*7c478bd9Sstevel@tonic-gate break;
538*7c478bd9Sstevel@tonic-gate }
539*7c478bd9Sstevel@tonic-gate }
540*7c478bd9Sstevel@tonic-gate
541*7c478bd9Sstevel@tonic-gate /*
542*7c478bd9Sstevel@tonic-gate * read service procedure.
543*7c478bd9Sstevel@tonic-gate */
544*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
545*7c478bd9Sstevel@tonic-gate static void
sbrsrv(queue_t * rq)546*7c478bd9Sstevel@tonic-gate sbrsrv(queue_t *rq)
547*7c478bd9Sstevel@tonic-gate {
548*7c478bd9Sstevel@tonic-gate mblk_t *mp;
549*7c478bd9Sstevel@tonic-gate
550*7c478bd9Sstevel@tonic-gate /*
551*7c478bd9Sstevel@tonic-gate * High priority messages shouldn't get here but if
552*7c478bd9Sstevel@tonic-gate * one does, jam it through to avoid infinite loop.
553*7c478bd9Sstevel@tonic-gate */
554*7c478bd9Sstevel@tonic-gate while ((mp = getq(rq)) != NULL) {
555*7c478bd9Sstevel@tonic-gate if (!canputnext(rq) && (mp->b_datap->db_type <= QPCTL)) {
556*7c478bd9Sstevel@tonic-gate /* should only get here if SB_NO_SROPS */
557*7c478bd9Sstevel@tonic-gate (void) putbq(rq, mp);
558*7c478bd9Sstevel@tonic-gate return;
559*7c478bd9Sstevel@tonic-gate }
560*7c478bd9Sstevel@tonic-gate putnext(rq, mp);
561*7c478bd9Sstevel@tonic-gate }
562*7c478bd9Sstevel@tonic-gate }
563*7c478bd9Sstevel@tonic-gate
564*7c478bd9Sstevel@tonic-gate /*
565*7c478bd9Sstevel@tonic-gate * Handle write-side M_IOCTL messages.
566*7c478bd9Sstevel@tonic-gate */
567*7c478bd9Sstevel@tonic-gate static void
sbioctl(queue_t * wq,mblk_t * mp)568*7c478bd9Sstevel@tonic-gate sbioctl(queue_t *wq, mblk_t *mp)
569*7c478bd9Sstevel@tonic-gate {
570*7c478bd9Sstevel@tonic-gate struct sb *sbp = (struct sb *)wq->q_ptr;
571*7c478bd9Sstevel@tonic-gate struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
572*7c478bd9Sstevel@tonic-gate struct timeval *t;
573*7c478bd9Sstevel@tonic-gate clock_t ticks;
574*7c478bd9Sstevel@tonic-gate mblk_t *mop;
575*7c478bd9Sstevel@tonic-gate int transparent = iocp->ioc_count;
576*7c478bd9Sstevel@tonic-gate mblk_t *datamp;
577*7c478bd9Sstevel@tonic-gate int error;
578*7c478bd9Sstevel@tonic-gate
579*7c478bd9Sstevel@tonic-gate switch (iocp->ioc_cmd) {
580*7c478bd9Sstevel@tonic-gate case SBIOCSTIME:
581*7c478bd9Sstevel@tonic-gate if (iocp->ioc_count == TRANSPARENT) {
582*7c478bd9Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
583*7c478bd9Sstevel@tonic-gate if ((iocp->ioc_flag & IOC_MODELS) != IOC_NATIVE) {
584*7c478bd9Sstevel@tonic-gate mcopyin(mp, NULL, sizeof (struct timeval32),
585*7c478bd9Sstevel@tonic-gate NULL);
586*7c478bd9Sstevel@tonic-gate } else
587*7c478bd9Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
588*7c478bd9Sstevel@tonic-gate {
589*7c478bd9Sstevel@tonic-gate mcopyin(mp, NULL, sizeof (*t), NULL);
590*7c478bd9Sstevel@tonic-gate }
591*7c478bd9Sstevel@tonic-gate qreply(wq, mp);
592*7c478bd9Sstevel@tonic-gate } else {
593*7c478bd9Sstevel@tonic-gate /*
594*7c478bd9Sstevel@tonic-gate * Verify argument length.
595*7c478bd9Sstevel@tonic-gate */
596*7c478bd9Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
597*7c478bd9Sstevel@tonic-gate if ((iocp->ioc_flag & IOC_MODELS) != IOC_NATIVE) {
598*7c478bd9Sstevel@tonic-gate struct timeval32 *t32;
599*7c478bd9Sstevel@tonic-gate
600*7c478bd9Sstevel@tonic-gate error = miocpullup(mp,
601*7c478bd9Sstevel@tonic-gate sizeof (struct timeval32));
602*7c478bd9Sstevel@tonic-gate if (error != 0) {
603*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, error);
604*7c478bd9Sstevel@tonic-gate break;
605*7c478bd9Sstevel@tonic-gate }
606*7c478bd9Sstevel@tonic-gate t32 = (struct timeval32 *)mp->b_cont->b_rptr;
607*7c478bd9Sstevel@tonic-gate if (t32->tv_sec < 0 || t32->tv_usec < 0) {
608*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL);
609*7c478bd9Sstevel@tonic-gate break;
610*7c478bd9Sstevel@tonic-gate }
611*7c478bd9Sstevel@tonic-gate ticks = TIMEVAL_TO_TICK(t32);
612*7c478bd9Sstevel@tonic-gate } else
613*7c478bd9Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
614*7c478bd9Sstevel@tonic-gate {
615*7c478bd9Sstevel@tonic-gate error = miocpullup(mp, sizeof (struct timeval));
616*7c478bd9Sstevel@tonic-gate if (error != 0) {
617*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, error);
618*7c478bd9Sstevel@tonic-gate break;
619*7c478bd9Sstevel@tonic-gate }
620*7c478bd9Sstevel@tonic-gate
621*7c478bd9Sstevel@tonic-gate t = (struct timeval *)mp->b_cont->b_rptr;
622*7c478bd9Sstevel@tonic-gate if (t->tv_sec < 0 || t->tv_usec < 0) {
623*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, EINVAL);
624*7c478bd9Sstevel@tonic-gate break;
625*7c478bd9Sstevel@tonic-gate }
626*7c478bd9Sstevel@tonic-gate ticks = TIMEVAL_TO_TICK(t);
627*7c478bd9Sstevel@tonic-gate }
628*7c478bd9Sstevel@tonic-gate sbp->sb_ticks = ticks;
629*7c478bd9Sstevel@tonic-gate if (ticks == 0)
630*7c478bd9Sstevel@tonic-gate sbp->sb_chunk = 0;
631*7c478bd9Sstevel@tonic-gate miocack(wq, mp, 0, 0);
632*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
633*7c478bd9Sstevel@tonic-gate }
634*7c478bd9Sstevel@tonic-gate break;
635*7c478bd9Sstevel@tonic-gate
636*7c478bd9Sstevel@tonic-gate case SBIOCGTIME: {
637*7c478bd9Sstevel@tonic-gate struct timeval *t;
638*7c478bd9Sstevel@tonic-gate
639*7c478bd9Sstevel@tonic-gate /*
640*7c478bd9Sstevel@tonic-gate * Verify argument length.
641*7c478bd9Sstevel@tonic-gate */
642*7c478bd9Sstevel@tonic-gate if (transparent != TRANSPARENT) {
643*7c478bd9Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
644*7c478bd9Sstevel@tonic-gate if ((iocp->ioc_flag & IOC_MODELS) != IOC_NATIVE) {
645*7c478bd9Sstevel@tonic-gate error = miocpullup(mp,
646*7c478bd9Sstevel@tonic-gate sizeof (struct timeval32));
647*7c478bd9Sstevel@tonic-gate if (error != 0) {
648*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, error);
649*7c478bd9Sstevel@tonic-gate break;
650*7c478bd9Sstevel@tonic-gate }
651*7c478bd9Sstevel@tonic-gate } else
652*7c478bd9Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
653*7c478bd9Sstevel@tonic-gate error = miocpullup(mp, sizeof (struct timeval));
654*7c478bd9Sstevel@tonic-gate if (error != 0) {
655*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, error);
656*7c478bd9Sstevel@tonic-gate break;
657*7c478bd9Sstevel@tonic-gate }
658*7c478bd9Sstevel@tonic-gate }
659*7c478bd9Sstevel@tonic-gate
660*7c478bd9Sstevel@tonic-gate /*
661*7c478bd9Sstevel@tonic-gate * If infinite timeout, return range error
662*7c478bd9Sstevel@tonic-gate * for the ioctl.
663*7c478bd9Sstevel@tonic-gate */
664*7c478bd9Sstevel@tonic-gate if (sbp->sb_ticks < 0) {
665*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, ERANGE);
666*7c478bd9Sstevel@tonic-gate break;
667*7c478bd9Sstevel@tonic-gate }
668*7c478bd9Sstevel@tonic-gate
669*7c478bd9Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL
670*7c478bd9Sstevel@tonic-gate if ((iocp->ioc_flag & IOC_MODELS) != IOC_NATIVE) {
671*7c478bd9Sstevel@tonic-gate struct timeval32 *t32;
672*7c478bd9Sstevel@tonic-gate
673*7c478bd9Sstevel@tonic-gate if (transparent == TRANSPARENT) {
674*7c478bd9Sstevel@tonic-gate datamp = allocb(sizeof (*t32), BPRI_MED);
675*7c478bd9Sstevel@tonic-gate if (datamp == NULL) {
676*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, EAGAIN);
677*7c478bd9Sstevel@tonic-gate break;
678*7c478bd9Sstevel@tonic-gate }
679*7c478bd9Sstevel@tonic-gate mcopyout(mp, NULL, sizeof (*t32), NULL, datamp);
680*7c478bd9Sstevel@tonic-gate }
681*7c478bd9Sstevel@tonic-gate
682*7c478bd9Sstevel@tonic-gate t32 = (struct timeval32 *)mp->b_cont->b_rptr;
683*7c478bd9Sstevel@tonic-gate TICK_TO_TIMEVAL32(sbp->sb_ticks, t32);
684*7c478bd9Sstevel@tonic-gate
685*7c478bd9Sstevel@tonic-gate if (transparent == TRANSPARENT)
686*7c478bd9Sstevel@tonic-gate qreply(wq, mp);
687*7c478bd9Sstevel@tonic-gate else
688*7c478bd9Sstevel@tonic-gate miocack(wq, mp, sizeof (*t32), 0);
689*7c478bd9Sstevel@tonic-gate } else
690*7c478bd9Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */
691*7c478bd9Sstevel@tonic-gate {
692*7c478bd9Sstevel@tonic-gate if (transparent == TRANSPARENT) {
693*7c478bd9Sstevel@tonic-gate datamp = allocb(sizeof (*t), BPRI_MED);
694*7c478bd9Sstevel@tonic-gate if (datamp == NULL) {
695*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, EAGAIN);
696*7c478bd9Sstevel@tonic-gate break;
697*7c478bd9Sstevel@tonic-gate }
698*7c478bd9Sstevel@tonic-gate mcopyout(mp, NULL, sizeof (*t), NULL, datamp);
699*7c478bd9Sstevel@tonic-gate }
700*7c478bd9Sstevel@tonic-gate
701*7c478bd9Sstevel@tonic-gate t = (struct timeval *)mp->b_cont->b_rptr;
702*7c478bd9Sstevel@tonic-gate TICK_TO_TIMEVAL(sbp->sb_ticks, t);
703*7c478bd9Sstevel@tonic-gate
704*7c478bd9Sstevel@tonic-gate if (transparent == TRANSPARENT)
705*7c478bd9Sstevel@tonic-gate qreply(wq, mp);
706*7c478bd9Sstevel@tonic-gate else
707*7c478bd9Sstevel@tonic-gate miocack(wq, mp, sizeof (*t), 0);
708*7c478bd9Sstevel@tonic-gate }
709*7c478bd9Sstevel@tonic-gate break;
710*7c478bd9Sstevel@tonic-gate }
711*7c478bd9Sstevel@tonic-gate
712*7c478bd9Sstevel@tonic-gate case SBIOCCTIME:
713*7c478bd9Sstevel@tonic-gate sbp->sb_ticks = -1;
714*7c478bd9Sstevel@tonic-gate miocack(wq, mp, 0, 0);
715*7c478bd9Sstevel@tonic-gate break;
716*7c478bd9Sstevel@tonic-gate
717*7c478bd9Sstevel@tonic-gate case SBIOCSCHUNK:
718*7c478bd9Sstevel@tonic-gate if (iocp->ioc_count == TRANSPARENT) {
719*7c478bd9Sstevel@tonic-gate mcopyin(mp, NULL, sizeof (uint_t), NULL);
720*7c478bd9Sstevel@tonic-gate qreply(wq, mp);
721*7c478bd9Sstevel@tonic-gate } else {
722*7c478bd9Sstevel@tonic-gate /*
723*7c478bd9Sstevel@tonic-gate * Verify argument length.
724*7c478bd9Sstevel@tonic-gate */
725*7c478bd9Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t));
726*7c478bd9Sstevel@tonic-gate if (error != 0) {
727*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, error);
728*7c478bd9Sstevel@tonic-gate break;
729*7c478bd9Sstevel@tonic-gate }
730*7c478bd9Sstevel@tonic-gate
731*7c478bd9Sstevel@tonic-gate /*
732*7c478bd9Sstevel@tonic-gate * set up hi/lo water marks on stream head read queue.
733*7c478bd9Sstevel@tonic-gate * unlikely to run out of resources. Fix at later date.
734*7c478bd9Sstevel@tonic-gate */
735*7c478bd9Sstevel@tonic-gate if ((mop = allocb(sizeof (struct stroptions),
736*7c478bd9Sstevel@tonic-gate BPRI_MED)) != NULL) {
737*7c478bd9Sstevel@tonic-gate struct stroptions *sop;
738*7c478bd9Sstevel@tonic-gate uint_t chunk;
739*7c478bd9Sstevel@tonic-gate
740*7c478bd9Sstevel@tonic-gate chunk = *(uint_t *)mp->b_cont->b_rptr;
741*7c478bd9Sstevel@tonic-gate mop->b_datap->db_type = M_SETOPTS;
742*7c478bd9Sstevel@tonic-gate mop->b_wptr += sizeof (struct stroptions);
743*7c478bd9Sstevel@tonic-gate sop = (struct stroptions *)mop->b_rptr;
744*7c478bd9Sstevel@tonic-gate sop->so_flags = SO_HIWAT | SO_LOWAT;
745*7c478bd9Sstevel@tonic-gate sop->so_hiwat = SNIT_HIWAT(chunk, 1);
746*7c478bd9Sstevel@tonic-gate sop->so_lowat = SNIT_LOWAT(chunk, 1);
747*7c478bd9Sstevel@tonic-gate qreply(wq, mop);
748*7c478bd9Sstevel@tonic-gate }
749*7c478bd9Sstevel@tonic-gate
750*7c478bd9Sstevel@tonic-gate sbp->sb_chunk = *(uint_t *)mp->b_cont->b_rptr;
751*7c478bd9Sstevel@tonic-gate miocack(wq, mp, 0, 0);
752*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
753*7c478bd9Sstevel@tonic-gate }
754*7c478bd9Sstevel@tonic-gate break;
755*7c478bd9Sstevel@tonic-gate
756*7c478bd9Sstevel@tonic-gate case SBIOCGCHUNK:
757*7c478bd9Sstevel@tonic-gate /*
758*7c478bd9Sstevel@tonic-gate * Verify argument length.
759*7c478bd9Sstevel@tonic-gate */
760*7c478bd9Sstevel@tonic-gate if (transparent != TRANSPARENT) {
761*7c478bd9Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t));
762*7c478bd9Sstevel@tonic-gate if (error != 0) {
763*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, error);
764*7c478bd9Sstevel@tonic-gate break;
765*7c478bd9Sstevel@tonic-gate }
766*7c478bd9Sstevel@tonic-gate }
767*7c478bd9Sstevel@tonic-gate
768*7c478bd9Sstevel@tonic-gate if (transparent == TRANSPARENT) {
769*7c478bd9Sstevel@tonic-gate datamp = allocb(sizeof (uint_t), BPRI_MED);
770*7c478bd9Sstevel@tonic-gate if (datamp == NULL) {
771*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, EAGAIN);
772*7c478bd9Sstevel@tonic-gate break;
773*7c478bd9Sstevel@tonic-gate }
774*7c478bd9Sstevel@tonic-gate mcopyout(mp, NULL, sizeof (uint_t), NULL, datamp);
775*7c478bd9Sstevel@tonic-gate }
776*7c478bd9Sstevel@tonic-gate
777*7c478bd9Sstevel@tonic-gate *(uint_t *)mp->b_cont->b_rptr = sbp->sb_chunk;
778*7c478bd9Sstevel@tonic-gate
779*7c478bd9Sstevel@tonic-gate if (transparent == TRANSPARENT)
780*7c478bd9Sstevel@tonic-gate qreply(wq, mp);
781*7c478bd9Sstevel@tonic-gate else
782*7c478bd9Sstevel@tonic-gate miocack(wq, mp, sizeof (uint_t), 0);
783*7c478bd9Sstevel@tonic-gate break;
784*7c478bd9Sstevel@tonic-gate
785*7c478bd9Sstevel@tonic-gate case SBIOCSSNAP:
786*7c478bd9Sstevel@tonic-gate if (iocp->ioc_count == TRANSPARENT) {
787*7c478bd9Sstevel@tonic-gate mcopyin(mp, NULL, sizeof (uint_t), NULL);
788*7c478bd9Sstevel@tonic-gate qreply(wq, mp);
789*7c478bd9Sstevel@tonic-gate } else {
790*7c478bd9Sstevel@tonic-gate /*
791*7c478bd9Sstevel@tonic-gate * Verify argument length.
792*7c478bd9Sstevel@tonic-gate */
793*7c478bd9Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t));
794*7c478bd9Sstevel@tonic-gate if (error != 0) {
795*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, error);
796*7c478bd9Sstevel@tonic-gate break;
797*7c478bd9Sstevel@tonic-gate }
798*7c478bd9Sstevel@tonic-gate
799*7c478bd9Sstevel@tonic-gate /*
800*7c478bd9Sstevel@tonic-gate * if chunking dont worry about effects of
801*7c478bd9Sstevel@tonic-gate * snipping of message size on head flow control
802*7c478bd9Sstevel@tonic-gate * since it has a relatively small bearing on the
803*7c478bd9Sstevel@tonic-gate * data rate onto the streamn head.
804*7c478bd9Sstevel@tonic-gate */
805*7c478bd9Sstevel@tonic-gate if (!sbp->sb_chunk) {
806*7c478bd9Sstevel@tonic-gate /*
807*7c478bd9Sstevel@tonic-gate * set up hi/lo water marks on stream
808*7c478bd9Sstevel@tonic-gate * head read queue. unlikely to run out
809*7c478bd9Sstevel@tonic-gate * of resources. Fix at later date.
810*7c478bd9Sstevel@tonic-gate */
811*7c478bd9Sstevel@tonic-gate if ((mop = allocb(sizeof (struct stroptions),
812*7c478bd9Sstevel@tonic-gate BPRI_MED)) != NULL) {
813*7c478bd9Sstevel@tonic-gate struct stroptions *sop;
814*7c478bd9Sstevel@tonic-gate uint_t snap;
815*7c478bd9Sstevel@tonic-gate int fudge;
816*7c478bd9Sstevel@tonic-gate
817*7c478bd9Sstevel@tonic-gate snap = *(uint_t *)mp->b_cont->b_rptr;
818*7c478bd9Sstevel@tonic-gate mop->b_datap->db_type = M_SETOPTS;
819*7c478bd9Sstevel@tonic-gate mop->b_wptr += sizeof (*sop);
820*7c478bd9Sstevel@tonic-gate sop = (struct stroptions *)mop->b_rptr;
821*7c478bd9Sstevel@tonic-gate sop->so_flags = SO_HIWAT | SO_LOWAT;
822*7c478bd9Sstevel@tonic-gate fudge = (snap <= 100) ? 4 :
823*7c478bd9Sstevel@tonic-gate (snap <= 400) ? 2 : 1;
824*7c478bd9Sstevel@tonic-gate sop->so_hiwat = SNIT_HIWAT(snap, fudge);
825*7c478bd9Sstevel@tonic-gate sop->so_lowat = SNIT_LOWAT(snap, fudge);
826*7c478bd9Sstevel@tonic-gate qreply(wq, mop);
827*7c478bd9Sstevel@tonic-gate }
828*7c478bd9Sstevel@tonic-gate }
829*7c478bd9Sstevel@tonic-gate
830*7c478bd9Sstevel@tonic-gate sbp->sb_snap = *(uint_t *)mp->b_cont->b_rptr;
831*7c478bd9Sstevel@tonic-gate
832*7c478bd9Sstevel@tonic-gate miocack(wq, mp, 0, 0);
833*7c478bd9Sstevel@tonic-gate }
834*7c478bd9Sstevel@tonic-gate break;
835*7c478bd9Sstevel@tonic-gate
836*7c478bd9Sstevel@tonic-gate case SBIOCGSNAP:
837*7c478bd9Sstevel@tonic-gate /*
838*7c478bd9Sstevel@tonic-gate * Verify argument length
839*7c478bd9Sstevel@tonic-gate */
840*7c478bd9Sstevel@tonic-gate if (transparent != TRANSPARENT) {
841*7c478bd9Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t));
842*7c478bd9Sstevel@tonic-gate if (error != 0) {
843*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, error);
844*7c478bd9Sstevel@tonic-gate break;
845*7c478bd9Sstevel@tonic-gate }
846*7c478bd9Sstevel@tonic-gate }
847*7c478bd9Sstevel@tonic-gate
848*7c478bd9Sstevel@tonic-gate if (transparent == TRANSPARENT) {
849*7c478bd9Sstevel@tonic-gate datamp = allocb(sizeof (uint_t), BPRI_MED);
850*7c478bd9Sstevel@tonic-gate if (datamp == NULL) {
851*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, EAGAIN);
852*7c478bd9Sstevel@tonic-gate break;
853*7c478bd9Sstevel@tonic-gate }
854*7c478bd9Sstevel@tonic-gate mcopyout(mp, NULL, sizeof (uint_t), NULL, datamp);
855*7c478bd9Sstevel@tonic-gate }
856*7c478bd9Sstevel@tonic-gate
857*7c478bd9Sstevel@tonic-gate *(uint_t *)mp->b_cont->b_rptr = sbp->sb_snap;
858*7c478bd9Sstevel@tonic-gate
859*7c478bd9Sstevel@tonic-gate if (transparent == TRANSPARENT)
860*7c478bd9Sstevel@tonic-gate qreply(wq, mp);
861*7c478bd9Sstevel@tonic-gate else
862*7c478bd9Sstevel@tonic-gate miocack(wq, mp, sizeof (uint_t), 0);
863*7c478bd9Sstevel@tonic-gate break;
864*7c478bd9Sstevel@tonic-gate
865*7c478bd9Sstevel@tonic-gate case SBIOCSFLAGS:
866*7c478bd9Sstevel@tonic-gate /*
867*7c478bd9Sstevel@tonic-gate * set the flags.
868*7c478bd9Sstevel@tonic-gate */
869*7c478bd9Sstevel@tonic-gate if (iocp->ioc_count == TRANSPARENT) {
870*7c478bd9Sstevel@tonic-gate mcopyin(mp, NULL, sizeof (uint_t), NULL);
871*7c478bd9Sstevel@tonic-gate qreply(wq, mp);
872*7c478bd9Sstevel@tonic-gate } else {
873*7c478bd9Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t));
874*7c478bd9Sstevel@tonic-gate if (error != 0) {
875*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, error);
876*7c478bd9Sstevel@tonic-gate break;
877*7c478bd9Sstevel@tonic-gate }
878*7c478bd9Sstevel@tonic-gate sbp->sb_flags = *(uint_t *)mp->b_cont->b_rptr;
879*7c478bd9Sstevel@tonic-gate miocack(wq, mp, 0, 0);
880*7c478bd9Sstevel@tonic-gate }
881*7c478bd9Sstevel@tonic-gate break;
882*7c478bd9Sstevel@tonic-gate
883*7c478bd9Sstevel@tonic-gate case SBIOCGFLAGS:
884*7c478bd9Sstevel@tonic-gate /*
885*7c478bd9Sstevel@tonic-gate * Verify argument length
886*7c478bd9Sstevel@tonic-gate */
887*7c478bd9Sstevel@tonic-gate if (transparent != TRANSPARENT) {
888*7c478bd9Sstevel@tonic-gate error = miocpullup(mp, sizeof (uint_t));
889*7c478bd9Sstevel@tonic-gate if (error != 0) {
890*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, error);
891*7c478bd9Sstevel@tonic-gate break;
892*7c478bd9Sstevel@tonic-gate }
893*7c478bd9Sstevel@tonic-gate }
894*7c478bd9Sstevel@tonic-gate
895*7c478bd9Sstevel@tonic-gate if (transparent == TRANSPARENT) {
896*7c478bd9Sstevel@tonic-gate datamp = allocb(sizeof (uint_t), BPRI_MED);
897*7c478bd9Sstevel@tonic-gate if (datamp == NULL) {
898*7c478bd9Sstevel@tonic-gate miocnak(wq, mp, 0, EAGAIN);
899*7c478bd9Sstevel@tonic-gate break;
900*7c478bd9Sstevel@tonic-gate }
901*7c478bd9Sstevel@tonic-gate mcopyout(mp, NULL, sizeof (uint_t), NULL, datamp);
902*7c478bd9Sstevel@tonic-gate }
903*7c478bd9Sstevel@tonic-gate
904*7c478bd9Sstevel@tonic-gate *(uint_t *)mp->b_cont->b_rptr = sbp->sb_flags;
905*7c478bd9Sstevel@tonic-gate
906*7c478bd9Sstevel@tonic-gate if (transparent == TRANSPARENT)
907*7c478bd9Sstevel@tonic-gate qreply(wq, mp);
908*7c478bd9Sstevel@tonic-gate else
909*7c478bd9Sstevel@tonic-gate miocack(wq, mp, sizeof (uint_t), 0);
910*7c478bd9Sstevel@tonic-gate break;
911*7c478bd9Sstevel@tonic-gate
912*7c478bd9Sstevel@tonic-gate
913*7c478bd9Sstevel@tonic-gate default:
914*7c478bd9Sstevel@tonic-gate putnext(wq, mp);
915*7c478bd9Sstevel@tonic-gate break;
916*7c478bd9Sstevel@tonic-gate }
917*7c478bd9Sstevel@tonic-gate }
918*7c478bd9Sstevel@tonic-gate
919*7c478bd9Sstevel@tonic-gate /*
920*7c478bd9Sstevel@tonic-gate * Given a length l, calculate the amount of extra storage
921*7c478bd9Sstevel@tonic-gate * required to round it up to the next multiple of the alignment a.
922*7c478bd9Sstevel@tonic-gate */
923*7c478bd9Sstevel@tonic-gate #define RoundUpAmt(l, a) ((l) % (a) ? (a) - ((l) % (a)) : 0)
924*7c478bd9Sstevel@tonic-gate /*
925*7c478bd9Sstevel@tonic-gate * Calculate additional amount of space required for alignment.
926*7c478bd9Sstevel@tonic-gate */
927*7c478bd9Sstevel@tonic-gate #define Align(l) RoundUpAmt(l, sizeof (ulong_t))
928*7c478bd9Sstevel@tonic-gate /*
929*7c478bd9Sstevel@tonic-gate * Smallest possible message size when headers are enabled.
930*7c478bd9Sstevel@tonic-gate * This is used to calculate whether a chunk is nearly full.
931*7c478bd9Sstevel@tonic-gate */
932*7c478bd9Sstevel@tonic-gate #define SMALLEST_MESSAGE sizeof (struct sb_hdr) + _POINTER_ALIGNMENT
933*7c478bd9Sstevel@tonic-gate
934*7c478bd9Sstevel@tonic-gate /*
935*7c478bd9Sstevel@tonic-gate * Process a read-side M_DATA message.
936*7c478bd9Sstevel@tonic-gate *
937*7c478bd9Sstevel@tonic-gate * If the currently accumulating chunk doesn't have enough room
938*7c478bd9Sstevel@tonic-gate * for the message, close off the chunk, pass it upward, and start
939*7c478bd9Sstevel@tonic-gate * a new one. Then add the message to the current chunk, taking
940*7c478bd9Sstevel@tonic-gate * account of the possibility that the message's size exceeds the
941*7c478bd9Sstevel@tonic-gate * chunk size.
942*7c478bd9Sstevel@tonic-gate *
943*7c478bd9Sstevel@tonic-gate * If headers are enabled add an sb_hdr header and trailing alignment padding.
944*7c478bd9Sstevel@tonic-gate *
945*7c478bd9Sstevel@tonic-gate * To optimise performance the total number of msgbs should be kept
946*7c478bd9Sstevel@tonic-gate * to a minimum. This is achieved by using any remaining space in message N
947*7c478bd9Sstevel@tonic-gate * for both its own padding as well as the header of message N+1 if possible.
948*7c478bd9Sstevel@tonic-gate * If there's insufficient space we allocate one message to hold this 'wrapper'.
949*7c478bd9Sstevel@tonic-gate * (there's likely to be space beyond message N, since allocb would have
950*7c478bd9Sstevel@tonic-gate * rounded up the required size to one of the dblk_sizes).
951*7c478bd9Sstevel@tonic-gate *
952*7c478bd9Sstevel@tonic-gate */
953*7c478bd9Sstevel@tonic-gate static void
sbaddmsg(queue_t * rq,mblk_t * mp)954*7c478bd9Sstevel@tonic-gate sbaddmsg(queue_t *rq, mblk_t *mp)
955*7c478bd9Sstevel@tonic-gate {
956*7c478bd9Sstevel@tonic-gate struct sb *sbp;
957*7c478bd9Sstevel@tonic-gate struct timeval t;
958*7c478bd9Sstevel@tonic-gate struct sb_hdr hp;
959*7c478bd9Sstevel@tonic-gate mblk_t *wrapper; /* padding for msg N, header for msg N+1 */
960*7c478bd9Sstevel@tonic-gate mblk_t *last; /* last mblk of current message */
961*7c478bd9Sstevel@tonic-gate size_t wrapperlen; /* length of header + padding */
962*7c478bd9Sstevel@tonic-gate size_t origlen; /* data length before truncation */
963*7c478bd9Sstevel@tonic-gate size_t pad; /* bytes required to align header */
964*7c478bd9Sstevel@tonic-gate
965*7c478bd9Sstevel@tonic-gate sbp = (struct sb *)rq->q_ptr;
966*7c478bd9Sstevel@tonic-gate
967*7c478bd9Sstevel@tonic-gate origlen = msgdsize(mp);
968*7c478bd9Sstevel@tonic-gate
969*7c478bd9Sstevel@tonic-gate /*
970*7c478bd9Sstevel@tonic-gate * Truncate the message.
971*7c478bd9Sstevel@tonic-gate */
972*7c478bd9Sstevel@tonic-gate if ((sbp->sb_snap > 0) && (origlen > sbp->sb_snap) &&
973*7c478bd9Sstevel@tonic-gate (adjmsg(mp, -(origlen - sbp->sb_snap)) == 1))
974*7c478bd9Sstevel@tonic-gate hp.sbh_totlen = hp.sbh_msglen = sbp->sb_snap;
975*7c478bd9Sstevel@tonic-gate else
976*7c478bd9Sstevel@tonic-gate hp.sbh_totlen = hp.sbh_msglen = origlen;
977*7c478bd9Sstevel@tonic-gate
978*7c478bd9Sstevel@tonic-gate if (sbp->sb_flags & SB_NO_HEADER) {
979*7c478bd9Sstevel@tonic-gate
980*7c478bd9Sstevel@tonic-gate /*
981*7c478bd9Sstevel@tonic-gate * Would the inclusion of this message overflow the current
982*7c478bd9Sstevel@tonic-gate * chunk? If so close the chunk off and start a new one.
983*7c478bd9Sstevel@tonic-gate */
984*7c478bd9Sstevel@tonic-gate if ((hp.sbh_totlen + sbp->sb_mlen) > sbp->sb_chunk)
985*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
986*7c478bd9Sstevel@tonic-gate /*
987*7c478bd9Sstevel@tonic-gate * First message too big for chunk - just send it up.
988*7c478bd9Sstevel@tonic-gate * This will always be true when we're not chunking.
989*7c478bd9Sstevel@tonic-gate */
990*7c478bd9Sstevel@tonic-gate if (hp.sbh_totlen > sbp->sb_chunk) {
991*7c478bd9Sstevel@tonic-gate sbsendit(rq, mp);
992*7c478bd9Sstevel@tonic-gate return;
993*7c478bd9Sstevel@tonic-gate }
994*7c478bd9Sstevel@tonic-gate
995*7c478bd9Sstevel@tonic-gate /*
996*7c478bd9Sstevel@tonic-gate * We now know that the msg will fit in the chunk.
997*7c478bd9Sstevel@tonic-gate * Link it onto the end of the chunk.
998*7c478bd9Sstevel@tonic-gate * Since linkb() walks the entire chain, we keep a pointer to
999*7c478bd9Sstevel@tonic-gate * the first mblk of the last msgb added and call linkb on that
1000*7c478bd9Sstevel@tonic-gate * that last message, rather than performing the
1001*7c478bd9Sstevel@tonic-gate * O(n) linkb() operation on the whole chain.
1002*7c478bd9Sstevel@tonic-gate * sb_head isn't needed in this SB_NO_HEADER mode.
1003*7c478bd9Sstevel@tonic-gate */
1004*7c478bd9Sstevel@tonic-gate if (sbp->sb_mp)
1005*7c478bd9Sstevel@tonic-gate linkb(sbp->sb_tail, mp);
1006*7c478bd9Sstevel@tonic-gate else
1007*7c478bd9Sstevel@tonic-gate sbp->sb_mp = mp;
1008*7c478bd9Sstevel@tonic-gate
1009*7c478bd9Sstevel@tonic-gate sbp->sb_tail = mp;
1010*7c478bd9Sstevel@tonic-gate sbp->sb_mlen += hp.sbh_totlen;
1011*7c478bd9Sstevel@tonic-gate sbp->sb_mcount++;
1012*7c478bd9Sstevel@tonic-gate } else {
1013*7c478bd9Sstevel@tonic-gate /* Timestamp must be done immediately */
1014*7c478bd9Sstevel@tonic-gate uniqtime(&t);
1015*7c478bd9Sstevel@tonic-gate TIMEVAL_TO_TIMEVAL32(&hp.sbh_timestamp, &t);
1016*7c478bd9Sstevel@tonic-gate
1017*7c478bd9Sstevel@tonic-gate pad = Align(hp.sbh_totlen);
1018*7c478bd9Sstevel@tonic-gate hp.sbh_totlen += sizeof (hp);
1019*7c478bd9Sstevel@tonic-gate hp.sbh_totlen += pad;
1020*7c478bd9Sstevel@tonic-gate
1021*7c478bd9Sstevel@tonic-gate /*
1022*7c478bd9Sstevel@tonic-gate * Would the inclusion of this message overflow the current
1023*7c478bd9Sstevel@tonic-gate * chunk? If so close the chunk off and start a new one.
1024*7c478bd9Sstevel@tonic-gate */
1025*7c478bd9Sstevel@tonic-gate if ((hp.sbh_totlen + sbp->sb_mlen) > sbp->sb_chunk)
1026*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
1027*7c478bd9Sstevel@tonic-gate
1028*7c478bd9Sstevel@tonic-gate if (sbp->sb_head == NULL) {
1029*7c478bd9Sstevel@tonic-gate /* Allocate leading header of new chunk */
1030*7c478bd9Sstevel@tonic-gate sbp->sb_head = allocb(sizeof (hp), BPRI_MED);
1031*7c478bd9Sstevel@tonic-gate if (sbp->sb_head == NULL) {
1032*7c478bd9Sstevel@tonic-gate /*
1033*7c478bd9Sstevel@tonic-gate * Memory allocation failure.
1034*7c478bd9Sstevel@tonic-gate * This will need to be revisited
1035*7c478bd9Sstevel@tonic-gate * since using certain flag combinations
1036*7c478bd9Sstevel@tonic-gate * can result in messages being dropped
1037*7c478bd9Sstevel@tonic-gate * silently.
1038*7c478bd9Sstevel@tonic-gate */
1039*7c478bd9Sstevel@tonic-gate freemsg(mp);
1040*7c478bd9Sstevel@tonic-gate sbp->sb_drops++;
1041*7c478bd9Sstevel@tonic-gate return;
1042*7c478bd9Sstevel@tonic-gate }
1043*7c478bd9Sstevel@tonic-gate sbp->sb_mp = sbp->sb_head;
1044*7c478bd9Sstevel@tonic-gate }
1045*7c478bd9Sstevel@tonic-gate
1046*7c478bd9Sstevel@tonic-gate /*
1047*7c478bd9Sstevel@tonic-gate * Copy header into message
1048*7c478bd9Sstevel@tonic-gate */
1049*7c478bd9Sstevel@tonic-gate hp.sbh_drops = sbp->sb_drops;
1050*7c478bd9Sstevel@tonic-gate hp.sbh_origlen = origlen;
1051*7c478bd9Sstevel@tonic-gate (void) memcpy(sbp->sb_head->b_wptr, (char *)&hp, sizeof (hp));
1052*7c478bd9Sstevel@tonic-gate sbp->sb_head->b_wptr += sizeof (hp);
1053*7c478bd9Sstevel@tonic-gate
1054*7c478bd9Sstevel@tonic-gate ASSERT(sbp->sb_head->b_wptr <= sbp->sb_head->b_datap->db_lim);
1055*7c478bd9Sstevel@tonic-gate
1056*7c478bd9Sstevel@tonic-gate /*
1057*7c478bd9Sstevel@tonic-gate * Join message to the chunk
1058*7c478bd9Sstevel@tonic-gate */
1059*7c478bd9Sstevel@tonic-gate linkb(sbp->sb_head, mp);
1060*7c478bd9Sstevel@tonic-gate
1061*7c478bd9Sstevel@tonic-gate sbp->sb_mcount++;
1062*7c478bd9Sstevel@tonic-gate sbp->sb_mlen += hp.sbh_totlen;
1063*7c478bd9Sstevel@tonic-gate
1064*7c478bd9Sstevel@tonic-gate /*
1065*7c478bd9Sstevel@tonic-gate * If the first message alone is too big for the chunk close
1066*7c478bd9Sstevel@tonic-gate * the chunk now.
1067*7c478bd9Sstevel@tonic-gate * If the next message would immediately cause the chunk to
1068*7c478bd9Sstevel@tonic-gate * overflow we may as well close the chunk now. The next
1069*7c478bd9Sstevel@tonic-gate * message is certain to be at least SMALLEST_MESSAGE size.
1070*7c478bd9Sstevel@tonic-gate */
1071*7c478bd9Sstevel@tonic-gate if (hp.sbh_totlen + SMALLEST_MESSAGE > sbp->sb_chunk) {
1072*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
1073*7c478bd9Sstevel@tonic-gate return;
1074*7c478bd9Sstevel@tonic-gate }
1075*7c478bd9Sstevel@tonic-gate
1076*7c478bd9Sstevel@tonic-gate /*
1077*7c478bd9Sstevel@tonic-gate * Find space for the wrapper. The wrapper consists of:
1078*7c478bd9Sstevel@tonic-gate *
1079*7c478bd9Sstevel@tonic-gate * 1) Padding for this message (this is to ensure each header
1080*7c478bd9Sstevel@tonic-gate * begins on an 8 byte boundary in the userland buffer).
1081*7c478bd9Sstevel@tonic-gate *
1082*7c478bd9Sstevel@tonic-gate * 2) Space for the next message's header, in case the next
1083*7c478bd9Sstevel@tonic-gate * next message will fit in this chunk.
1084*7c478bd9Sstevel@tonic-gate *
1085*7c478bd9Sstevel@tonic-gate * It may be possible to append the wrapper to the last mblk
1086*7c478bd9Sstevel@tonic-gate * of the message, but only if we 'own' the data. If the dblk
1087*7c478bd9Sstevel@tonic-gate * has been shared through dupmsg() we mustn't alter it.
1088*7c478bd9Sstevel@tonic-gate */
1089*7c478bd9Sstevel@tonic-gate
1090*7c478bd9Sstevel@tonic-gate wrapperlen = (sizeof (hp) + pad);
1091*7c478bd9Sstevel@tonic-gate
1092*7c478bd9Sstevel@tonic-gate /* Is there space for the wrapper beyond the message's data ? */
1093*7c478bd9Sstevel@tonic-gate for (last = mp; last->b_cont; last = last->b_cont)
1094*7c478bd9Sstevel@tonic-gate ;
1095*7c478bd9Sstevel@tonic-gate
1096*7c478bd9Sstevel@tonic-gate if ((wrapperlen <= MBLKTAIL(last)) &&
1097*7c478bd9Sstevel@tonic-gate (last->b_datap->db_ref == 1)) {
1098*7c478bd9Sstevel@tonic-gate if (pad > 0) {
1099*7c478bd9Sstevel@tonic-gate /*
1100*7c478bd9Sstevel@tonic-gate * Pad with zeroes to the next pointer boundary
1101*7c478bd9Sstevel@tonic-gate * (we don't want to disclose kernel data to
1102*7c478bd9Sstevel@tonic-gate * users), then advance wptr.
1103*7c478bd9Sstevel@tonic-gate */
1104*7c478bd9Sstevel@tonic-gate (void) memset(last->b_wptr, 0, pad);
1105*7c478bd9Sstevel@tonic-gate last->b_wptr += pad;
1106*7c478bd9Sstevel@tonic-gate }
1107*7c478bd9Sstevel@tonic-gate /* Remember where to write the header information */
1108*7c478bd9Sstevel@tonic-gate sbp->sb_head = last;
1109*7c478bd9Sstevel@tonic-gate } else {
1110*7c478bd9Sstevel@tonic-gate /* Have to allocate additional space for the wrapper */
1111*7c478bd9Sstevel@tonic-gate wrapper = allocb(wrapperlen, BPRI_MED);
1112*7c478bd9Sstevel@tonic-gate if (wrapper == NULL) {
1113*7c478bd9Sstevel@tonic-gate sbclosechunk(sbp);
1114*7c478bd9Sstevel@tonic-gate return;
1115*7c478bd9Sstevel@tonic-gate }
1116*7c478bd9Sstevel@tonic-gate if (pad > 0) {
1117*7c478bd9Sstevel@tonic-gate /*
1118*7c478bd9Sstevel@tonic-gate * Pad with zeroes (we don't want to disclose
1119*7c478bd9Sstevel@tonic-gate * kernel data to users).
1120*7c478bd9Sstevel@tonic-gate */
1121*7c478bd9Sstevel@tonic-gate (void) memset(wrapper->b_wptr, 0, pad);
1122*7c478bd9Sstevel@tonic-gate wrapper->b_wptr += pad;
1123*7c478bd9Sstevel@tonic-gate }
1124*7c478bd9Sstevel@tonic-gate /* Link the wrapper msg onto the end of the chunk */
1125*7c478bd9Sstevel@tonic-gate linkb(mp, wrapper);
1126*7c478bd9Sstevel@tonic-gate /* Remember to write the next header in this wrapper */
1127*7c478bd9Sstevel@tonic-gate sbp->sb_head = wrapper;
1128*7c478bd9Sstevel@tonic-gate }
1129*7c478bd9Sstevel@tonic-gate }
1130*7c478bd9Sstevel@tonic-gate }
1131*7c478bd9Sstevel@tonic-gate
1132*7c478bd9Sstevel@tonic-gate /*
1133*7c478bd9Sstevel@tonic-gate * Called from timeout().
1134*7c478bd9Sstevel@tonic-gate * Signal a timeout by passing a zero-length M_CTL msg in the read-side
1135*7c478bd9Sstevel@tonic-gate * to synchronize with any active module threads (open, close, wput, rput).
1136*7c478bd9Sstevel@tonic-gate */
1137*7c478bd9Sstevel@tonic-gate static void
sbtick(void * arg)1138*7c478bd9Sstevel@tonic-gate sbtick(void *arg)
1139*7c478bd9Sstevel@tonic-gate {
1140*7c478bd9Sstevel@tonic-gate struct sb *sbp = arg;
1141*7c478bd9Sstevel@tonic-gate queue_t *rq;
1142*7c478bd9Sstevel@tonic-gate
1143*7c478bd9Sstevel@tonic-gate ASSERT(sbp);
1144*7c478bd9Sstevel@tonic-gate
1145*7c478bd9Sstevel@tonic-gate rq = sbp->sb_rq;
1146*7c478bd9Sstevel@tonic-gate sbp->sb_timeoutid = 0; /* timeout has fired */
1147*7c478bd9Sstevel@tonic-gate
1148*7c478bd9Sstevel@tonic-gate if (putctl(rq, M_CTL) == 0) /* failure */
1149*7c478bd9Sstevel@tonic-gate sbp->sb_timeoutid = qtimeout(rq, sbtick, sbp, sbp->sb_ticks);
1150*7c478bd9Sstevel@tonic-gate }
1151*7c478bd9Sstevel@tonic-gate
1152*7c478bd9Sstevel@tonic-gate /*
1153*7c478bd9Sstevel@tonic-gate * Close off the currently accumulating chunk and pass
1154*7c478bd9Sstevel@tonic-gate * it upward. Takes care of resetting timers as well.
1155*7c478bd9Sstevel@tonic-gate *
1156*7c478bd9Sstevel@tonic-gate * This routine is called both directly and as a result
1157*7c478bd9Sstevel@tonic-gate * of the chunk timeout expiring.
1158*7c478bd9Sstevel@tonic-gate */
1159*7c478bd9Sstevel@tonic-gate static void
sbclosechunk(struct sb * sbp)1160*7c478bd9Sstevel@tonic-gate sbclosechunk(struct sb *sbp)
1161*7c478bd9Sstevel@tonic-gate {
1162*7c478bd9Sstevel@tonic-gate mblk_t *mp;
1163*7c478bd9Sstevel@tonic-gate queue_t *rq;
1164*7c478bd9Sstevel@tonic-gate
1165*7c478bd9Sstevel@tonic-gate ASSERT(sbp);
1166*7c478bd9Sstevel@tonic-gate
1167*7c478bd9Sstevel@tonic-gate if (sbp->sb_timeoutid) {
1168*7c478bd9Sstevel@tonic-gate (void) quntimeout(sbp->sb_rq, sbp->sb_timeoutid);
1169*7c478bd9Sstevel@tonic-gate sbp->sb_timeoutid = 0;
1170*7c478bd9Sstevel@tonic-gate }
1171*7c478bd9Sstevel@tonic-gate
1172*7c478bd9Sstevel@tonic-gate mp = sbp->sb_mp;
1173*7c478bd9Sstevel@tonic-gate rq = sbp->sb_rq;
1174*7c478bd9Sstevel@tonic-gate
1175*7c478bd9Sstevel@tonic-gate /*
1176*7c478bd9Sstevel@tonic-gate * If there's currently a chunk in progress, close it off
1177*7c478bd9Sstevel@tonic-gate * and try to send it up.
1178*7c478bd9Sstevel@tonic-gate */
1179*7c478bd9Sstevel@tonic-gate if (mp) {
1180*7c478bd9Sstevel@tonic-gate sbsendit(rq, mp);
1181*7c478bd9Sstevel@tonic-gate }
1182*7c478bd9Sstevel@tonic-gate
1183*7c478bd9Sstevel@tonic-gate /*
1184*7c478bd9Sstevel@tonic-gate * Clear old chunk. Ready for new msgs.
1185*7c478bd9Sstevel@tonic-gate */
1186*7c478bd9Sstevel@tonic-gate sbp->sb_tail = sbp->sb_mp = sbp->sb_head = NULL;
1187*7c478bd9Sstevel@tonic-gate sbp->sb_mlen = 0;
1188*7c478bd9Sstevel@tonic-gate sbp->sb_mcount = 0;
1189*7c478bd9Sstevel@tonic-gate if (sbp->sb_flags & SB_DEFER_CHUNK)
1190*7c478bd9Sstevel@tonic-gate sbp->sb_state &= ~SB_FRCVD;
1191*7c478bd9Sstevel@tonic-gate
1192*7c478bd9Sstevel@tonic-gate }
1193*7c478bd9Sstevel@tonic-gate
1194*7c478bd9Sstevel@tonic-gate static void
sbsendit(queue_t * rq,mblk_t * mp)1195*7c478bd9Sstevel@tonic-gate sbsendit(queue_t *rq, mblk_t *mp)
1196*7c478bd9Sstevel@tonic-gate {
1197*7c478bd9Sstevel@tonic-gate struct sb *sbp = (struct sb *)rq->q_ptr;
1198*7c478bd9Sstevel@tonic-gate
1199*7c478bd9Sstevel@tonic-gate if (!canputnext(rq)) {
1200*7c478bd9Sstevel@tonic-gate if (sbp->sb_flags & SB_NO_DROPS)
1201*7c478bd9Sstevel@tonic-gate (void) putq(rq, mp);
1202*7c478bd9Sstevel@tonic-gate else {
1203*7c478bd9Sstevel@tonic-gate freemsg(mp);
1204*7c478bd9Sstevel@tonic-gate sbp->sb_drops += sbp->sb_mcount;
1205*7c478bd9Sstevel@tonic-gate }
1206*7c478bd9Sstevel@tonic-gate return;
1207*7c478bd9Sstevel@tonic-gate }
1208*7c478bd9Sstevel@tonic-gate /*
1209*7c478bd9Sstevel@tonic-gate * If there are messages on the q already, keep
1210*7c478bd9Sstevel@tonic-gate * queueing them since they need to be processed in order.
1211*7c478bd9Sstevel@tonic-gate */
1212*7c478bd9Sstevel@tonic-gate if (qsize(rq) > 0) {
1213*7c478bd9Sstevel@tonic-gate /* should only get here if SB_NO_DROPS */
1214*7c478bd9Sstevel@tonic-gate (void) putq(rq, mp);
1215*7c478bd9Sstevel@tonic-gate }
1216*7c478bd9Sstevel@tonic-gate else
1217*7c478bd9Sstevel@tonic-gate putnext(rq, mp);
1218*7c478bd9Sstevel@tonic-gate }
1219