1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 /* All Rights Reserved */
27
28
29 /*
30 * Transport Interface Library cooperating module - issue 2
31 */
32
33 #include <sys/param.h>
34 #include <sys/types.h>
35 #include <sys/stream.h>
36 #include <sys/stropts.h>
37 #include <sys/strsubr.h>
38 #define _SUN_TPI_VERSION 2
39 #include <sys/tihdr.h>
40 #include <sys/timod.h>
41 #include <sys/suntpi.h>
42 #include <sys/debug.h>
43 #include <sys/strlog.h>
44 #include <sys/errno.h>
45 #include <sys/cred.h>
46 #include <sys/cmn_err.h>
47 #include <sys/kmem.h>
48 #include <sys/sysmacros.h>
49 #include <sys/ddi.h>
50 #include <sys/sunddi.h>
51 #include <sys/strsun.h>
52 #include <c2/audit.h>
53
54 /*
55 * This is the loadable module wrapper.
56 */
57 #include <sys/conf.h>
58 #include <sys/modctl.h>
59
60 static struct streamtab timinfo;
61
62 static struct fmodsw fsw = {
63 "timod",
64 &timinfo,
65 D_MTQPAIR | D_MP,
66 };
67
68 /*
69 * Module linkage information for the kernel.
70 */
71
72 static struct modlstrmod modlstrmod = {
73 &mod_strmodops, "transport interface str mod", &fsw
74 };
75
76 static struct modlinkage modlinkage = {
77 MODREV_1, &modlstrmod, NULL
78 };
79
80 static krwlock_t tim_list_rwlock;
81
82 /*
83 * This module keeps track of capabilities of underlying transport. Information
84 * is persistent through module invocations (open/close). Currently it remembers
85 * whether underlying transport supports TI_GET{MY,PEER}NAME ioctls and
86 * T_CAPABILITY_REQ message. This module either passes ioctl/messages to the
87 * transport or emulates it when transport doesn't understand these
88 * ioctl/messages.
89 *
90 * It is assumed that transport supports T_CAPABILITY_REQ when timod receives
91 * T_CAPABILITY_ACK from the transport. There is no current standard describing
92 * transport behaviour when it receives unknown message type, so following
93 * reactions are expected and handled:
94 *
95 * 1) Transport drops unknown T_CAPABILITY_REQ message type. In this case timod
96 * will wait for tcap_wait time and assume that transport doesn't provide
97 * this message type. T_CAPABILITY_REQ should never travel over the wire, so
98 * timeout value should only take into consideration internal processing time
99 * for the message. From user standpoint it may mean that an application will
100 * hang for TCAP_WAIT time in the kernel the first time this message is used
101 * with some particular transport (e.g. TCP/IP) during system uptime.
102 *
103 * 2) Transport responds with T_ERROR_ACK specifying T_CAPABILITY_REQ as
104 * original message type. In this case it is assumed that transport doesn't
105 * support it (which may not always be true - some transports return
106 * T_ERROR_ACK in other cases like lack of system memory).
107 *
108 * 3) Transport responds with M_ERROR, effectively shutting down the
109 * stream. Unfortunately there is no standard way to pass the reason of
110 * M_ERROR message back to the caller, so it is assumed that if M_ERROR was
111 * sent in response to T_CAPABILITY_REQ message, transport doesn't support
112 * it.
113 *
114 * It is possible under certain circumstances that timod will incorrectly assume
115 * that underlying transport doesn't provide T_CAPABILITY_REQ message type. In
116 * this "worst-case" scenario timod will emulate its functionality by itself and
117 * will provide only TC1_INFO capability. All other bits in CAP_bits1 field are
118 * cleaned. TC1_INFO is emulated by sending T_INFO_REQ down to transport
119 * provider.
120 */
121
122 /*
123 * Notes about locking:
124 *
125 * tim_list_rwlock protects the list of tim_tim structures itself. When this
126 * lock is held, the list itself is stable, but the contents of the entries
127 * themselves might not be.
128 *
129 * The rest of the members are generally protected by D_MTQPAIR, which
130 * specifies a default exclusive inner perimeter. If you're looking at
131 * q->q_ptr, then it's stable.
132 *
133 * There's one exception to this rule: tim_peer{maxlen,len,name}. These members
134 * are touched without entering the associated STREAMS perimeter because we
135 * get the pointer via tim_findlink() rather than q_ptr. These are protected
136 * by tim_mutex instead. If you don't hold that lock, don't look at them.
137 *
138 * (It would be possible to separate out the 'set by T_CONN_RES' cases from the
139 * others, but there appears to be no reason to do so.)
140 */
141 struct tim_tim {
142 uint32_t tim_flags;
143 t_uscalar_t tim_backlog;
144 mblk_t *tim_iocsave;
145 t_scalar_t tim_mymaxlen;
146 t_scalar_t tim_mylen;
147 caddr_t tim_myname;
148 t_scalar_t tim_peermaxlen;
149 t_scalar_t tim_peerlen;
150 caddr_t tim_peername;
151 cred_t *tim_peercred;
152 mblk_t *tim_consave;
153 bufcall_id_t tim_wbufcid;
154 bufcall_id_t tim_rbufcid;
155 timeout_id_t tim_wtimoutid;
156 timeout_id_t tim_rtimoutid;
157 /* Protected by the global tim_list_rwlock for all instances */
158 struct tim_tim *tim_next;
159 struct tim_tim **tim_ptpn;
160 t_uscalar_t tim_acceptor;
161 t_scalar_t tim_saved_prim; /* Primitive from message */
162 /* part of ioctl. */
163 timeout_id_t tim_tcap_timoutid; /* For T_CAP_REQ timeout */
164 tpi_provinfo_t *tim_provinfo; /* Transport description */
165 kmutex_t tim_mutex; /* protect tim_peer* */
166 pid_t tim_cpid;
167 };
168
169
170 /*
171 * Local flags used with tim_flags field in instance structure of
172 * type 'struct _ti_user' declared above.
173 * Historical note:
174 * This namespace constants were previously declared in a
175 * a very messed up namespace in timod.h
176 *
177 * There may be 3 states for transport:
178 *
179 * 1) It provides T_CAPABILITY_REQ
180 * 2) It does not provide T_CAPABILITY_REQ
181 * 3) It is not known yet whether transport provides T_CAPABILITY_REQ or not.
182 *
183 * It is assumed that the underlying transport either provides
184 * T_CAPABILITY_REQ or not and this does not changes during the
185 * system lifetime.
186 *
187 */
188 #define PEEK_RDQ_EXPIND 0x0001 /* look for expinds on stream rd queues */
189 #define WAITIOCACK 0x0002 /* waiting for info for ioctl act */
190 #define CLTS 0x0004 /* connectionless transport */
191 #define COTS 0x0008 /* connection-oriented transport */
192 #define CONNWAIT 0x0010 /* waiting for connect confirmation */
193 #define LOCORDREL 0x0020 /* local end has orderly released */
194 #define REMORDREL 0x0040 /* remote end had orderly released */
195 #define NAMEPROC 0x0080 /* processing a NAME ioctl */
196 #define DO_MYNAME 0x0100 /* timod handles TI_GETMYNAME */
197 #define DO_PEERNAME 0x0200 /* timod handles TI_GETPEERNAME */
198 #define TI_CAP_RECVD 0x0400 /* TI_CAPABILITY received */
199 #define CAP_WANTS_INFO 0x0800 /* TI_CAPABILITY has TC1_INFO set */
200 #define WAIT_IOCINFOACK 0x1000 /* T_INFO_REQ generated from ioctl */
201 #define WAIT_CONNRESACK 0x2000 /* waiting for T_OK_ACK to T_CONN_RES */
202
203
204 /* Debugging facilities */
205 /*
206 * Logging needed for debugging timod should only appear in DEBUG kernel.
207 */
208 #ifdef DEBUG
209 #define TILOG(msg, arg) tilog((msg), (arg))
210 #define TILOGP(msg, arg) tilogp((msg), (arg))
211 #else
212 #define TILOG(msg, arg)
213 #define TILOGP(msg, arg)
214 #endif
215
216
217 /*
218 * Sleep timeout for T_CAPABILITY_REQ. This message never travels across
219 * network, so timeout value should be enough to cover all internal processing
220 * time.
221 */
222 clock_t tim_tcap_wait = 2;
223
224 /* Sleep timeout in tim_recover() */
225 #define TIMWAIT (1*hz)
226 /* Sleep timeout in tim_ioctl_retry() 0.2 seconds */
227 #define TIMIOCWAIT (200*hz/1000)
228
229 /*
230 * Return values for ti_doname().
231 */
232 #define DONAME_FAIL 0 /* failing ioctl (done) */
233 #define DONAME_DONE 1 /* done processing */
234 #define DONAME_CONT 2 /* continue proceesing (not done yet) */
235
236 /*
237 * Function prototypes
238 */
239 static int ti_doname(queue_t *, mblk_t *);
240 static int ti_expind_on_rdqueues(queue_t *);
241 static void tim_ioctl_send_reply(queue_t *, mblk_t *, mblk_t *);
242 static void tim_send_ioc_error_ack(queue_t *, struct tim_tim *, mblk_t *);
243 static void tim_tcap_timer(void *);
244 static void tim_tcap_genreply(queue_t *, struct tim_tim *);
245 static void tim_send_reply(queue_t *, mblk_t *, struct tim_tim *, t_scalar_t);
246 static void tim_answer_ti_sync(queue_t *, mblk_t *, struct tim_tim *,
247 mblk_t *, uint32_t);
248 static void tim_send_ioctl_tpi_msg(queue_t *, mblk_t *, struct tim_tim *,
249 struct iocblk *);
250 static void tim_clear_peer(struct tim_tim *);
251
252 int
_init(void)253 _init(void)
254 {
255 int error;
256
257 rw_init(&tim_list_rwlock, NULL, RW_DRIVER, NULL);
258 error = mod_install(&modlinkage);
259 if (error != 0) {
260 rw_destroy(&tim_list_rwlock);
261 return (error);
262 }
263
264 return (0);
265 }
266
267 int
_fini(void)268 _fini(void)
269 {
270 int error;
271
272 error = mod_remove(&modlinkage);
273 if (error != 0)
274 return (error);
275 rw_destroy(&tim_list_rwlock);
276 return (0);
277 }
278
279 int
_info(struct modinfo * modinfop)280 _info(struct modinfo *modinfop)
281 {
282 return (mod_info(&modlinkage, modinfop));
283 }
284
285
286 /*
287 * Hash list for all instances. Used to find tim_tim structure based on
288 * ACCEPTOR_id in T_CONN_RES. Protected by tim_list_rwlock.
289 */
290 #define TIM_HASH_SIZE 256
291 #ifdef _ILP32
292 #define TIM_HASH(id) (((uintptr_t)(id) >> 8) % TIM_HASH_SIZE)
293 #else
294 #define TIM_HASH(id) ((uintptr_t)(id) % TIM_HASH_SIZE)
295 #endif /* _ILP32 */
296 static struct tim_tim *tim_hash[TIM_HASH_SIZE];
297 int tim_cnt = 0;
298
299 static void tilog(char *, t_scalar_t);
300 static void tilogp(char *, uintptr_t);
301 static mblk_t *tim_filladdr(queue_t *, mblk_t *, boolean_t);
302 static void tim_addlink(struct tim_tim *);
303 static void tim_dellink(struct tim_tim *);
304 static struct tim_tim *tim_findlink(t_uscalar_t);
305 static void tim_recover(queue_t *, mblk_t *, t_scalar_t);
306 static void tim_ioctl_retry(queue_t *);
307
308 int dotilog = 0;
309
310 #define TIMOD_ID 3
311
312 static int timodopen(queue_t *, dev_t *, int, int, cred_t *);
313 static int timodclose(queue_t *, int, cred_t *);
314 static int timodwput(queue_t *, mblk_t *);
315 static int timodrput(queue_t *, mblk_t *);
316 static int timodrsrv(queue_t *);
317 static int timodwsrv(queue_t *);
318 static int timodrproc(queue_t *, mblk_t *);
319 static int timodwproc(queue_t *, mblk_t *);
320
321 /* stream data structure definitions */
322
323 static struct module_info timod_info =
324 {TIMOD_ID, "timod", 0, INFPSZ, 512, 128};
325 static struct qinit timodrinit = {
326 timodrput,
327 timodrsrv,
328 timodopen,
329 timodclose,
330 nulldev,
331 &timod_info,
332 NULL
333 };
334 static struct qinit timodwinit = {
335 timodwput,
336 timodwsrv,
337 timodopen,
338 timodclose,
339 nulldev,
340 &timod_info,
341 NULL
342 };
343 static struct streamtab timinfo = { &timodrinit, &timodwinit, NULL, NULL };
344
345 /*
346 * timodopen - open routine gets called when the module gets pushed
347 * onto the stream.
348 */
349 /*ARGSUSED*/
350 static int
timodopen(queue_t * q,dev_t * devp,int flag,int sflag,cred_t * crp)351 timodopen(
352 queue_t *q,
353 dev_t *devp,
354 int flag,
355 int sflag,
356 cred_t *crp)
357 {
358 struct tim_tim *tp;
359 struct stroptions *sop;
360 mblk_t *bp;
361
362 ASSERT(q != NULL);
363
364 if (q->q_ptr) {
365 return (0);
366 }
367
368 if ((bp = allocb(sizeof (struct stroptions), BPRI_MED)) == 0)
369 return (ENOMEM);
370
371 tp = kmem_zalloc(sizeof (struct tim_tim), KM_SLEEP);
372
373 tp->tim_cpid = -1;
374 tp->tim_saved_prim = -1;
375
376 mutex_init(&tp->tim_mutex, NULL, MUTEX_DEFAULT, NULL);
377
378 q->q_ptr = (caddr_t)tp;
379 WR(q)->q_ptr = (caddr_t)tp;
380
381 tilogp("timodopen: Allocated for tp %lx\n", (uintptr_t)tp);
382 tilogp("timodopen: Allocated for q %lx\n", (uintptr_t)q);
383
384 /* Must be done before tpi_findprov and _ILP32 q_next walk below */
385 qprocson(q);
386
387 tp->tim_provinfo = tpi_findprov(q);
388
389 /*
390 * Defer allocation of the buffers for the local address and
391 * the peer's address until we need them.
392 * Assume that timod has to handle getname until we here
393 * an iocack from the transport provider or we know that
394 * transport provider doesn't understand it.
395 */
396 if (tp->tim_provinfo->tpi_myname != PI_YES) {
397 TILOG("timodopen: setting DO_MYNAME\n", 0);
398 tp->tim_flags |= DO_MYNAME;
399 }
400
401 if (tp->tim_provinfo->tpi_peername != PI_YES) {
402 TILOG("timodopen: setting DO_PEERNAME\n", 0);
403 tp->tim_flags |= DO_PEERNAME;
404 }
405
406 #ifdef _ILP32
407 {
408 queue_t *driverq;
409
410 /*
411 * Find my driver's read queue (for T_CONN_RES handling)
412 */
413 driverq = WR(q);
414 while (SAMESTR(driverq))
415 driverq = driverq->q_next;
416
417 tp->tim_acceptor = (t_uscalar_t)RD(driverq);
418 }
419 #else
420 tp->tim_acceptor = (t_uscalar_t)getminor(*devp);
421 #endif /* _ILP32 */
422
423 /*
424 * Add this one to the list.
425 */
426 tim_addlink(tp);
427
428 /*
429 * Send M_SETOPTS to stream head to make sure M_PCPROTO messages
430 * are not flushed. This prevents application deadlocks.
431 */
432 bp->b_datap->db_type = M_SETOPTS;
433 bp->b_wptr += sizeof (struct stroptions);
434 sop = (struct stroptions *)bp->b_rptr;
435 sop->so_flags = SO_READOPT;
436 sop->so_readopt = RFLUSHPCPROT;
437
438 putnext(q, bp);
439
440 return (0);
441 }
442
443 static void
tim_timer(void * arg)444 tim_timer(void *arg)
445 {
446 queue_t *q = arg;
447 struct tim_tim *tp = (struct tim_tim *)q->q_ptr;
448
449 ASSERT(tp);
450
451 if (q->q_flag & QREADR) {
452 ASSERT(tp->tim_rtimoutid);
453 tp->tim_rtimoutid = 0;
454 } else {
455 ASSERT(tp->tim_wtimoutid);
456 tp->tim_wtimoutid = 0;
457 }
458 enableok(q);
459 qenable(q);
460 }
461
462 static void
tim_buffer(void * arg)463 tim_buffer(void *arg)
464 {
465 queue_t *q = arg;
466 struct tim_tim *tp = (struct tim_tim *)q->q_ptr;
467
468 ASSERT(tp);
469
470 if (q->q_flag & QREADR) {
471 ASSERT(tp->tim_rbufcid);
472 tp->tim_rbufcid = 0;
473 } else {
474 ASSERT(tp->tim_wbufcid);
475 tp->tim_wbufcid = 0;
476 }
477 enableok(q);
478 qenable(q);
479 }
480
481 /*
482 * timodclose - This routine gets called when the module gets popped
483 * off of the stream.
484 */
485 /*ARGSUSED*/
486 static int
timodclose(queue_t * q,int flag,cred_t * crp)487 timodclose(
488 queue_t *q,
489 int flag,
490 cred_t *crp)
491 {
492 struct tim_tim *tp;
493 mblk_t *mp;
494 mblk_t *nmp;
495
496 ASSERT(q != NULL);
497
498 tp = (struct tim_tim *)q->q_ptr;
499 q->q_ptr = NULL;
500
501 ASSERT(tp != NULL);
502
503 tilogp("timodclose: Entered for tp %lx\n", (uintptr_t)tp);
504 tilogp("timodclose: Entered for q %lx\n", (uintptr_t)q);
505
506 qprocsoff(q);
507 tim_dellink(tp);
508
509 /*
510 * Cancel any outstanding bufcall
511 * or timeout requests.
512 */
513 if (tp->tim_wbufcid) {
514 qunbufcall(q, tp->tim_wbufcid);
515 tp->tim_wbufcid = 0;
516 }
517 if (tp->tim_rbufcid) {
518 qunbufcall(q, tp->tim_rbufcid);
519 tp->tim_rbufcid = 0;
520 }
521 if (tp->tim_wtimoutid) {
522 (void) quntimeout(q, tp->tim_wtimoutid);
523 tp->tim_wtimoutid = 0;
524 }
525 if (tp->tim_rtimoutid) {
526 (void) quntimeout(q, tp->tim_rtimoutid);
527 tp->tim_rtimoutid = 0;
528 }
529
530 if (tp->tim_tcap_timoutid != 0) {
531 (void) quntimeout(q, tp->tim_tcap_timoutid);
532 tp->tim_tcap_timoutid = 0;
533 }
534
535 if (tp->tim_iocsave != NULL)
536 freemsg(tp->tim_iocsave);
537 mp = tp->tim_consave;
538 while (mp) {
539 nmp = mp->b_next;
540 mp->b_next = NULL;
541 freemsg(mp);
542 mp = nmp;
543 }
544 ASSERT(tp->tim_mymaxlen >= 0);
545 if (tp->tim_mymaxlen != 0)
546 kmem_free(tp->tim_myname, (size_t)tp->tim_mymaxlen);
547 ASSERT(tp->tim_peermaxlen >= 0);
548 if (tp->tim_peermaxlen != 0)
549 kmem_free(tp->tim_peername, (size_t)tp->tim_peermaxlen);
550
551 q->q_ptr = WR(q)->q_ptr = NULL;
552
553 mutex_destroy(&tp->tim_mutex);
554
555 if (tp->tim_peercred != NULL)
556 crfree(tp->tim_peercred);
557
558 kmem_free(tp, sizeof (struct tim_tim));
559
560 return (0);
561 }
562
563 /*
564 * timodrput - Module read put procedure. This is called from
565 * the module, driver, or stream head upstream/downstream.
566 * Handles M_FLUSH, M_DATA and some M_PROTO (T_DATA_IND,
567 * and T_UNITDATA_IND) messages. All others are queued to
568 * be handled by the service procedures.
569 */
570 static int
timodrput(queue_t * q,mblk_t * mp)571 timodrput(queue_t *q, mblk_t *mp)
572 {
573 union T_primitives *pptr;
574
575 /*
576 * During flow control and other instances when messages
577 * are on queue, queue up a non high priority message
578 */
579 if (q->q_first != 0 && mp->b_datap->db_type < QPCTL) {
580 (void) putq(q, mp);
581 return (0);
582 }
583
584 /*
585 * Inline processing of data (to avoid additional procedure call).
586 * Rest is handled in timodrproc.
587 */
588
589 switch (mp->b_datap->db_type) {
590 case M_DATA:
591 if (bcanputnext(q, mp->b_band))
592 putnext(q, mp);
593 else
594 (void) putq(q, mp);
595 break;
596 case M_PROTO:
597 case M_PCPROTO:
598 if (MBLKL(mp) < sizeof (t_scalar_t)) {
599 if (mp->b_datap->db_type == M_PCPROTO ||
600 bcanputnext(q, mp->b_band)) {
601 putnext(q, mp);
602 } else {
603 (void) putq(q, mp);
604 }
605 break;
606 }
607 pptr = (union T_primitives *)mp->b_rptr;
608 switch (pptr->type) {
609 case T_EXDATA_IND:
610 case T_DATA_IND:
611 case T_UNITDATA_IND:
612 if (bcanputnext(q, mp->b_band))
613 putnext(q, mp);
614 else
615 (void) putq(q, mp);
616 break;
617 default:
618 (void) timodrproc(q, mp);
619 break;
620 }
621 break;
622 default:
623 (void) timodrproc(q, mp);
624 break;
625 }
626 return (0);
627 }
628
629 /*
630 * timodrsrv - Module read queue service procedure. This is called when
631 * messages are placed on an empty queue, when high priority
632 * messages are placed on the queue, and when flow control
633 * restrictions subside. This code used to be included in a
634 * put procedure, but it was moved to a service procedure
635 * because several points were added where memory allocation
636 * could fail, and there is no reasonable recovery mechanism
637 * from the put procedure.
638 */
639 /*ARGSUSED*/
640 static int
timodrsrv(queue_t * q)641 timodrsrv(queue_t *q)
642 {
643 mblk_t *mp;
644 struct tim_tim *tp;
645
646 ASSERT(q != NULL);
647
648 tp = (struct tim_tim *)q->q_ptr;
649 if (!tp)
650 return (0);
651
652 while ((mp = getq(q)) != NULL) {
653 if (timodrproc(q, mp)) {
654 /*
655 * timodrproc did a putbq - stop processing
656 * messages.
657 */
658 return (0);
659 }
660 }
661 return (0);
662 }
663
664 /*
665 * Perform common processing when a T_CAPABILITY_ACK or T_INFO_ACK
666 * arrive. Set the queue properties and adjust the tim_flags according
667 * to the service type.
668 */
669 static void
timodprocessinfo(queue_t * q,struct tim_tim * tp,struct T_info_ack * tia)670 timodprocessinfo(queue_t *q, struct tim_tim *tp, struct T_info_ack *tia)
671 {
672 TILOG("timodprocessinfo: strqset(%d)\n", tia->TIDU_size);
673 (void) strqset(q, QMAXPSZ, 0, tia->TIDU_size);
674 (void) strqset(OTHERQ(q), QMAXPSZ, 0, tia->TIDU_size);
675
676 if ((tia->SERV_type == T_COTS) || (tia->SERV_type == T_COTS_ORD))
677 tp->tim_flags = (tp->tim_flags & ~CLTS) | COTS;
678 else if (tia->SERV_type == T_CLTS)
679 tp->tim_flags = (tp->tim_flags & ~COTS) | CLTS;
680 }
681
682 static int
timodrproc(queue_t * q,mblk_t * mp)683 timodrproc(queue_t *q, mblk_t *mp)
684 {
685 uint32_t auditing = AU_AUDITING();
686 union T_primitives *pptr;
687 struct tim_tim *tp;
688 struct iocblk *iocbp;
689 mblk_t *nbp;
690 size_t blen;
691
692 tp = (struct tim_tim *)q->q_ptr;
693
694 switch (mp->b_datap->db_type) {
695 default:
696 putnext(q, mp);
697 break;
698
699 case M_ERROR:
700 TILOG("timodrproc: Got M_ERROR, flags = %x\n", tp->tim_flags);
701 /*
702 * There is no specified standard response for driver when it
703 * receives unknown message type and M_ERROR is one
704 * possibility. If we send T_CAPABILITY_REQ down and transport
705 * provider responds with M_ERROR we assume that it doesn't
706 * understand this message type. This assumption may be
707 * sometimes incorrect (transport may reply with M_ERROR for
708 * some other reason) but there is no way for us to distinguish
709 * between different cases. In the worst case timod and everyone
710 * else sharing global transport description with it may end up
711 * emulating T_CAPABILITY_REQ.
712 */
713
714 /*
715 * Check that we are waiting for T_CAPABILITY_ACK and
716 * T_CAPABILITY_REQ is not implemented by transport or emulated
717 * by timod.
718 */
719 if ((tp->tim_provinfo->tpi_capability == PI_DONTKNOW) &&
720 ((tp->tim_flags & TI_CAP_RECVD) != 0)) {
721 /*
722 * Good chances that this transport doesn't provide
723 * T_CAPABILITY_REQ. Mark this information permanently
724 * for the module + transport combination.
725 */
726 PI_PROVLOCK(tp->tim_provinfo);
727 if (tp->tim_provinfo->tpi_capability == PI_DONTKNOW)
728 tp->tim_provinfo->tpi_capability = PI_NO;
729 PI_PROVUNLOCK(tp->tim_provinfo);
730 if (tp->tim_tcap_timoutid != 0) {
731 (void) quntimeout(q, tp->tim_tcap_timoutid);
732 tp->tim_tcap_timoutid = 0;
733 }
734 }
735 putnext(q, mp);
736 break;
737 case M_DATA:
738 if (!bcanputnext(q, mp->b_band)) {
739 (void) putbq(q, mp);
740 return (1);
741 }
742 putnext(q, mp);
743 break;
744
745 case M_PROTO:
746 case M_PCPROTO:
747 blen = MBLKL(mp);
748 if (blen < sizeof (t_scalar_t)) {
749 /*
750 * Note: it's not actually possible to get
751 * here with db_type M_PCPROTO, because
752 * timodrput has already checked MBLKL, and
753 * thus the assertion below. If the length
754 * was too short, then the message would have
755 * already been putnext'd, and would thus
756 * never appear here. Just the same, the code
757 * below handles the impossible case since
758 * it's easy to do and saves future
759 * maintainers from unfortunate accidents.
760 */
761 ASSERT(mp->b_datap->db_type == M_PROTO);
762 if (mp->b_datap->db_type == M_PROTO &&
763 !bcanputnext(q, mp->b_band)) {
764 (void) putbq(q, mp);
765 return (1);
766 }
767 putnext(q, mp);
768 break;
769 }
770
771 pptr = (union T_primitives *)mp->b_rptr;
772 switch (pptr->type) {
773 default:
774
775 if (auditing)
776 audit_sock(T_UNITDATA_IND, q, mp, TIMOD_ID);
777 putnext(q, mp);
778 break;
779
780 case T_ERROR_ACK:
781 /* Restore db_type - recover() might have changed it */
782 mp->b_datap->db_type = M_PCPROTO;
783 if (blen < sizeof (struct T_error_ack)) {
784 putnext(q, mp);
785 break;
786 }
787
788 tilog("timodrproc: Got T_ERROR_ACK, flags = %x\n",
789 tp->tim_flags);
790
791 if ((tp->tim_flags & WAIT_CONNRESACK) &&
792 tp->tim_saved_prim == pptr->error_ack.ERROR_prim) {
793 tp->tim_flags &=
794 ~(WAIT_CONNRESACK | WAITIOCACK);
795 freemsg(tp->tim_iocsave);
796 tp->tim_iocsave = NULL;
797 tp->tim_saved_prim = -1;
798 putnext(q, mp);
799 } else if (tp->tim_flags & WAITIOCACK) {
800 tim_send_ioc_error_ack(q, tp, mp);
801 } else {
802 putnext(q, mp);
803 }
804 break;
805
806 case T_OK_ACK:
807 if (blen < sizeof (pptr->ok_ack)) {
808 mp->b_datap->db_type = M_PCPROTO;
809 putnext(q, mp);
810 break;
811 }
812
813 tilog("timodrproc: Got T_OK_ACK\n", 0);
814
815 if (pptr->ok_ack.CORRECT_prim == T_UNBIND_REQ)
816 tp->tim_mylen = 0;
817
818 if ((tp->tim_flags & WAIT_CONNRESACK) &&
819 tp->tim_saved_prim == pptr->ok_ack.CORRECT_prim) {
820 struct T_conn_res *resp;
821 struct T_conn_ind *indp;
822 struct tim_tim *ntp;
823 caddr_t ptr;
824
825 rw_enter(&tim_list_rwlock, RW_READER);
826 resp = (struct T_conn_res *)
827 tp->tim_iocsave->b_rptr;
828 ntp = tim_findlink(resp->ACCEPTOR_id);
829 if (ntp == NULL)
830 goto cresackout;
831
832 mutex_enter(&ntp->tim_mutex);
833 if (ntp->tim_peercred != NULL)
834 crfree(ntp->tim_peercred);
835 ntp->tim_peercred =
836 msg_getcred(tp->tim_iocsave->b_cont,
837 &ntp->tim_cpid);
838 if (ntp->tim_peercred != NULL)
839 crhold(ntp->tim_peercred);
840
841 if (!(ntp->tim_flags & DO_PEERNAME)) {
842 mutex_exit(&ntp->tim_mutex);
843 goto cresackout;
844 }
845
846 indp = (struct T_conn_ind *)
847 tp->tim_iocsave->b_cont->b_rptr;
848 /* true as message is put on list */
849 ASSERT(indp->SRC_length >= 0);
850
851 if (indp->SRC_length > ntp->tim_peermaxlen) {
852 ptr = kmem_alloc(indp->SRC_length,
853 KM_NOSLEEP);
854 if (ptr == NULL) {
855 mutex_exit(&ntp->tim_mutex);
856 rw_exit(&tim_list_rwlock);
857 tilog("timodwproc: kmem_alloc "
858 "failed, attempting "
859 "recovery\n", 0);
860 tim_recover(q, mp,
861 indp->SRC_length);
862 return (1);
863 }
864 if (ntp->tim_peermaxlen > 0)
865 kmem_free(ntp->tim_peername,
866 ntp->tim_peermaxlen);
867 ntp->tim_peername = ptr;
868 ntp->tim_peermaxlen = indp->SRC_length;
869 }
870 ntp->tim_peerlen = indp->SRC_length;
871 ptr = (caddr_t)indp + indp->SRC_offset;
872 bcopy(ptr, ntp->tim_peername, ntp->tim_peerlen);
873
874 mutex_exit(&ntp->tim_mutex);
875
876 cresackout:
877 rw_exit(&tim_list_rwlock);
878 tp->tim_flags &=
879 ~(WAIT_CONNRESACK | WAITIOCACK);
880 freemsg(tp->tim_iocsave);
881 tp->tim_iocsave = NULL;
882 tp->tim_saved_prim = -1;
883 }
884
885 tim_send_reply(q, mp, tp, pptr->ok_ack.CORRECT_prim);
886 break;
887
888 case T_BIND_ACK: {
889 struct T_bind_ack *ackp =
890 (struct T_bind_ack *)mp->b_rptr;
891
892 /* Restore db_type - recover() might have changed it */
893 mp->b_datap->db_type = M_PCPROTO;
894 if (blen < sizeof (*ackp)) {
895 putnext(q, mp);
896 break;
897 }
898
899 /* save negotiated backlog */
900 tp->tim_backlog = ackp->CONIND_number;
901
902 if (((tp->tim_flags & WAITIOCACK) == 0) ||
903 ((tp->tim_saved_prim != O_T_BIND_REQ) &&
904 (tp->tim_saved_prim != T_BIND_REQ))) {
905 putnext(q, mp);
906 break;
907 }
908 ASSERT(tp->tim_iocsave != NULL);
909
910 if (tp->tim_flags & DO_MYNAME) {
911 caddr_t p;
912
913 if (ackp->ADDR_length < 0 ||
914 mp->b_rptr + ackp->ADDR_offset +
915 ackp->ADDR_length > mp->b_wptr) {
916 putnext(q, mp);
917 break;
918 }
919 if (ackp->ADDR_length > tp->tim_mymaxlen) {
920 p = kmem_alloc(ackp->ADDR_length,
921 KM_NOSLEEP);
922 if (p == NULL) {
923 tilog("timodrproc: kmem_alloc "
924 "failed attempt recovery",
925 0);
926
927 tim_recover(q, mp,
928 ackp->ADDR_length);
929 return (1);
930 }
931 ASSERT(tp->tim_mymaxlen >= 0);
932 if (tp->tim_mymaxlen != 0) {
933 kmem_free(tp->tim_myname,
934 tp->tim_mymaxlen);
935 }
936 tp->tim_myname = p;
937 tp->tim_mymaxlen = ackp->ADDR_length;
938 }
939 tp->tim_mylen = ackp->ADDR_length;
940 bcopy(mp->b_rptr + ackp->ADDR_offset,
941 tp->tim_myname, tp->tim_mylen);
942 }
943 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
944 tp->tim_iocsave = NULL;
945 tp->tim_saved_prim = -1;
946 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
947 TI_CAP_RECVD | CAP_WANTS_INFO);
948 break;
949 }
950
951 case T_OPTMGMT_ACK:
952
953 tilog("timodrproc: Got T_OPTMGMT_ACK\n", 0);
954
955 /* Restore db_type - recover() might have change it */
956 mp->b_datap->db_type = M_PCPROTO;
957
958 if (((tp->tim_flags & WAITIOCACK) == 0) ||
959 ((tp->tim_saved_prim != T_SVR4_OPTMGMT_REQ) &&
960 (tp->tim_saved_prim != T_OPTMGMT_REQ))) {
961 putnext(q, mp);
962 } else {
963 ASSERT(tp->tim_iocsave != NULL);
964 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
965 tp->tim_iocsave = NULL;
966 tp->tim_saved_prim = -1;
967 tp->tim_flags &= ~(WAITIOCACK |
968 WAIT_IOCINFOACK | TI_CAP_RECVD |
969 CAP_WANTS_INFO);
970 }
971 break;
972
973 case T_INFO_ACK: {
974 struct T_info_ack *tia = (struct T_info_ack *)pptr;
975
976 /* Restore db_type - recover() might have changed it */
977 mp->b_datap->db_type = M_PCPROTO;
978
979 if (blen < sizeof (*tia)) {
980 putnext(q, mp);
981 break;
982 }
983
984 tilog("timodrproc: Got T_INFO_ACK, flags = %x\n",
985 tp->tim_flags);
986
987 timodprocessinfo(q, tp, tia);
988
989 TILOG("timodrproc: flags = %x\n", tp->tim_flags);
990 if ((tp->tim_flags & WAITIOCACK) != 0) {
991 size_t expected_ack_size;
992 ssize_t deficit;
993 int ioc_cmd;
994 struct T_capability_ack *tcap;
995
996 /*
997 * The only case when T_INFO_ACK may be received back
998 * when we are waiting for ioctl to complete is when
999 * this ioctl sent T_INFO_REQ down.
1000 */
1001 if (!(tp->tim_flags & WAIT_IOCINFOACK)) {
1002 putnext(q, mp);
1003 break;
1004 }
1005 ASSERT(tp->tim_iocsave != NULL);
1006
1007 iocbp = (struct iocblk *)tp->tim_iocsave->b_rptr;
1008 ioc_cmd = iocbp->ioc_cmd;
1009
1010 /*
1011 * Was it sent from TI_CAPABILITY emulation?
1012 */
1013 if (ioc_cmd == TI_CAPABILITY) {
1014 struct T_info_ack saved_info;
1015
1016 /*
1017 * Perform sanity checks. The only case when we
1018 * send T_INFO_REQ from TI_CAPABILITY is when
1019 * timod emulates T_CAPABILITY_REQ and CAP_bits1
1020 * has TC1_INFO set.
1021 */
1022 if ((tp->tim_flags &
1023 (TI_CAP_RECVD | CAP_WANTS_INFO)) !=
1024 (TI_CAP_RECVD | CAP_WANTS_INFO)) {
1025 putnext(q, mp);
1026 break;
1027 }
1028
1029 TILOG("timodrproc: emulating TI_CAPABILITY/"
1030 "info\n", 0);
1031
1032 /* Save info & reuse mp for T_CAPABILITY_ACK */
1033 saved_info = *tia;
1034
1035 mp = tpi_ack_alloc(mp,
1036 sizeof (struct T_capability_ack),
1037 M_PCPROTO, T_CAPABILITY_ACK);
1038
1039 if (mp == NULL) {
1040 tilog("timodrproc: realloc failed, "
1041 "no recovery attempted\n", 0);
1042 return (1);
1043 }
1044
1045 /*
1046 * Copy T_INFO information into T_CAPABILITY_ACK
1047 */
1048 tcap = (struct T_capability_ack *)mp->b_rptr;
1049 tcap->CAP_bits1 = TC1_INFO;
1050 tcap->INFO_ack = saved_info;
1051 tp->tim_flags &= ~(WAITIOCACK |
1052 WAIT_IOCINFOACK | TI_CAP_RECVD |
1053 CAP_WANTS_INFO);
1054 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
1055 tp->tim_iocsave = NULL;
1056 tp->tim_saved_prim = -1;
1057 break;
1058 }
1059
1060 /*
1061 * The code for TI_SYNC/TI_GETINFO is left here only for
1062 * backward compatibility with staticaly linked old
1063 * applications. New TLI/XTI code should use
1064 * TI_CAPABILITY for getting transport info and should
1065 * not use TI_GETINFO/TI_SYNC for this purpose.
1066 */
1067
1068 /*
1069 * make sure the message sent back is the size of
1070 * the "expected ack"
1071 * For TI_GETINFO, expected ack size is
1072 * sizeof (T_info_ack)
1073 * For TI_SYNC, expected ack size is
1074 * sizeof (struct ti_sync_ack);
1075 */
1076 if (ioc_cmd != TI_GETINFO && ioc_cmd != TI_SYNC) {
1077 putnext(q, mp);
1078 break;
1079 }
1080
1081 expected_ack_size =
1082 sizeof (struct T_info_ack); /* TI_GETINFO */
1083 if (iocbp->ioc_cmd == TI_SYNC) {
1084 expected_ack_size = 2 * sizeof (uint32_t) +
1085 sizeof (struct ti_sync_ack);
1086 }
1087 deficit = expected_ack_size - blen;
1088
1089 if (deficit != 0) {
1090 if (mp->b_datap->db_lim - mp->b_wptr <
1091 deficit) {
1092 mblk_t *tmp = allocb(expected_ack_size,
1093 BPRI_HI);
1094 if (tmp == NULL) {
1095 ASSERT(MBLKSIZE(mp) >=
1096 sizeof (struct T_error_ack));
1097
1098 tilog("timodrproc: allocb failed no "
1099 "recovery attempt\n", 0);
1100
1101 mp->b_rptr = mp->b_datap->db_base;
1102 pptr = (union T_primitives *)
1103 mp->b_rptr;
1104 pptr->error_ack.ERROR_prim = T_INFO_REQ;
1105 pptr->error_ack.TLI_error = TSYSERR;
1106 pptr->error_ack.UNIX_error = EAGAIN;
1107 pptr->error_ack.PRIM_type = T_ERROR_ACK;
1108 mp->b_datap->db_type = M_PCPROTO;
1109 tim_send_ioc_error_ack(q, tp, mp);
1110 break;
1111 } else {
1112 bcopy(mp->b_rptr, tmp->b_rptr, blen);
1113 tmp->b_wptr += blen;
1114 pptr = (union T_primitives *)
1115 tmp->b_rptr;
1116 freemsg(mp);
1117 mp = tmp;
1118 }
1119 }
1120 }
1121 /*
1122 * We now have "mp" which has enough space for an
1123 * appropriate ack and contains struct T_info_ack
1124 * that the transport provider returned. We now
1125 * stuff it with more stuff to fullfill
1126 * TI_SYNC ioctl needs, as necessary
1127 */
1128 if (iocbp->ioc_cmd == TI_SYNC) {
1129 /*
1130 * Assumes struct T_info_ack is first embedded
1131 * type in struct ti_sync_ack so it is
1132 * automatically there.
1133 */
1134 struct ti_sync_ack *tsap =
1135 (struct ti_sync_ack *)mp->b_rptr;
1136
1137 /*
1138 * tsap->tsa_qlen needs to be set only if
1139 * TSRF_QLEN_REQ flag is set, but for
1140 * compatibility with statically linked
1141 * applications it is set here regardless of the
1142 * flag since old XTI library expected it to be
1143 * set.
1144 */
1145 tsap->tsa_qlen = tp->tim_backlog;
1146 tsap->tsa_flags = 0x0; /* intialize clear */
1147 if (tp->tim_flags & PEEK_RDQ_EXPIND) {
1148 /*
1149 * Request to peek for EXPIND in
1150 * rcvbuf.
1151 */
1152 if (ti_expind_on_rdqueues(q)) {
1153 /*
1154 * Expedited data is
1155 * queued on the stream
1156 * read side
1157 */
1158 tsap->tsa_flags |=
1159 TSAF_EXP_QUEUED;
1160 }
1161 tp->tim_flags &=
1162 ~PEEK_RDQ_EXPIND;
1163 }
1164 mp->b_wptr += 2*sizeof (uint32_t);
1165 }
1166 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
1167 tp->tim_iocsave = NULL;
1168 tp->tim_saved_prim = -1;
1169 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
1170 TI_CAP_RECVD | CAP_WANTS_INFO);
1171 break;
1172 }
1173 }
1174
1175 putnext(q, mp);
1176 break;
1177
1178 case T_ADDR_ACK:
1179 tilog("timodrproc: Got T_ADDR_ACK\n", 0);
1180 tim_send_reply(q, mp, tp, T_ADDR_REQ);
1181 break;
1182
1183 case T_CONN_IND: {
1184 struct T_conn_ind *tcip =
1185 (struct T_conn_ind *)mp->b_rptr;
1186
1187 tilog("timodrproc: Got T_CONN_IND\n", 0);
1188
1189 if (blen >= sizeof (*tcip) &&
1190 MBLKIN(mp, tcip->SRC_offset, tcip->SRC_length)) {
1191 if (((nbp = dupmsg(mp)) != NULL) ||
1192 ((nbp = copymsg(mp)) != NULL)) {
1193 nbp->b_next = tp->tim_consave;
1194 tp->tim_consave = nbp;
1195 } else {
1196 tim_recover(q, mp,
1197 (t_scalar_t)sizeof (mblk_t));
1198 return (1);
1199 }
1200 }
1201 if (auditing)
1202 audit_sock(T_CONN_IND, q, mp, TIMOD_ID);
1203 putnext(q, mp);
1204 break;
1205 }
1206
1207 case T_CONN_CON:
1208 mutex_enter(&tp->tim_mutex);
1209 if (tp->tim_peercred != NULL)
1210 crfree(tp->tim_peercred);
1211 tp->tim_peercred = msg_getcred(mp, &tp->tim_cpid);
1212 if (tp->tim_peercred != NULL)
1213 crhold(tp->tim_peercred);
1214 mutex_exit(&tp->tim_mutex);
1215
1216 tilog("timodrproc: Got T_CONN_CON\n", 0);
1217
1218 tp->tim_flags &= ~CONNWAIT;
1219 putnext(q, mp);
1220 break;
1221
1222 case T_DISCON_IND: {
1223 struct T_discon_ind *disp;
1224 struct T_conn_ind *conp;
1225 mblk_t *pbp = NULL;
1226
1227 if (q->q_first != 0)
1228 tilog("timodrput: T_DISCON_IND - flow control\n", 0);
1229
1230 if (blen < sizeof (*disp)) {
1231 putnext(q, mp);
1232 break;
1233 }
1234
1235 disp = (struct T_discon_ind *)mp->b_rptr;
1236
1237 tilog("timodrproc: Got T_DISCON_IND Reason: %d\n",
1238 disp->DISCON_reason);
1239
1240 tp->tim_flags &= ~(CONNWAIT|LOCORDREL|REMORDREL);
1241 tim_clear_peer(tp);
1242 for (nbp = tp->tim_consave; nbp; nbp = nbp->b_next) {
1243 conp = (struct T_conn_ind *)nbp->b_rptr;
1244 if (conp->SEQ_number == disp->SEQ_number)
1245 break;
1246 pbp = nbp;
1247 }
1248 if (nbp) {
1249 if (pbp)
1250 pbp->b_next = nbp->b_next;
1251 else
1252 tp->tim_consave = nbp->b_next;
1253 nbp->b_next = NULL;
1254 freemsg(nbp);
1255 }
1256 putnext(q, mp);
1257 break;
1258 }
1259
1260 case T_ORDREL_IND:
1261
1262 tilog("timodrproc: Got T_ORDREL_IND\n", 0);
1263
1264 if (tp->tim_flags & LOCORDREL) {
1265 tp->tim_flags &= ~(LOCORDREL|REMORDREL);
1266 tim_clear_peer(tp);
1267 } else {
1268 tp->tim_flags |= REMORDREL;
1269 }
1270 putnext(q, mp);
1271 break;
1272
1273 case T_EXDATA_IND:
1274 case T_DATA_IND:
1275 case T_UNITDATA_IND:
1276 if (pptr->type == T_EXDATA_IND)
1277 tilog("timodrproc: Got T_EXDATA_IND\n", 0);
1278
1279 if (!bcanputnext(q, mp->b_band)) {
1280 (void) putbq(q, mp);
1281 return (1);
1282 }
1283 putnext(q, mp);
1284 break;
1285
1286 case T_CAPABILITY_ACK: {
1287 struct T_capability_ack *tca;
1288
1289 if (blen < sizeof (*tca)) {
1290 putnext(q, mp);
1291 break;
1292 }
1293
1294 /* This transport supports T_CAPABILITY_REQ */
1295 tilog("timodrproc: Got T_CAPABILITY_ACK\n", 0);
1296
1297 PI_PROVLOCK(tp->tim_provinfo);
1298 if (tp->tim_provinfo->tpi_capability != PI_YES)
1299 tp->tim_provinfo->tpi_capability = PI_YES;
1300 PI_PROVUNLOCK(tp->tim_provinfo);
1301
1302 /* Reset possible pending timeout */
1303 if (tp->tim_tcap_timoutid != 0) {
1304 (void) quntimeout(q, tp->tim_tcap_timoutid);
1305 tp->tim_tcap_timoutid = 0;
1306 }
1307
1308 tca = (struct T_capability_ack *)mp->b_rptr;
1309
1310 if (tca->CAP_bits1 & TC1_INFO)
1311 timodprocessinfo(q, tp, &tca->INFO_ack);
1312
1313 tim_send_reply(q, mp, tp, T_CAPABILITY_REQ);
1314 }
1315 break;
1316 }
1317 break;
1318
1319 case M_FLUSH:
1320
1321 tilog("timodrproc: Got M_FLUSH\n", 0);
1322
1323 if (*mp->b_rptr & FLUSHR) {
1324 if (*mp->b_rptr & FLUSHBAND)
1325 flushband(q, *(mp->b_rptr + 1), FLUSHDATA);
1326 else
1327 flushq(q, FLUSHDATA);
1328 }
1329 putnext(q, mp);
1330 break;
1331
1332 case M_IOCACK:
1333 iocbp = (struct iocblk *)mp->b_rptr;
1334
1335 tilog("timodrproc: Got M_IOCACK\n", 0);
1336
1337 if (iocbp->ioc_cmd == TI_GETMYNAME) {
1338
1339 /*
1340 * Transport provider supports this ioctl,
1341 * so I don't have to.
1342 */
1343 if ((tp->tim_flags & DO_MYNAME) != 0) {
1344 tp->tim_flags &= ~DO_MYNAME;
1345 PI_PROVLOCK(tp->tim_provinfo);
1346 tp->tim_provinfo->tpi_myname = PI_YES;
1347 PI_PROVUNLOCK(tp->tim_provinfo);
1348 }
1349
1350 ASSERT(tp->tim_mymaxlen >= 0);
1351 if (tp->tim_mymaxlen != 0) {
1352 kmem_free(tp->tim_myname, (size_t)tp->tim_mymaxlen);
1353 tp->tim_myname = NULL;
1354 tp->tim_mymaxlen = 0;
1355 }
1356 /* tim_iocsave may already be overwritten. */
1357 if (tp->tim_saved_prim == -1) {
1358 freemsg(tp->tim_iocsave);
1359 tp->tim_iocsave = NULL;
1360 }
1361 } else if (iocbp->ioc_cmd == TI_GETPEERNAME) {
1362 boolean_t clearit;
1363
1364 /*
1365 * Transport provider supports this ioctl,
1366 * so I don't have to.
1367 */
1368 if ((tp->tim_flags & DO_PEERNAME) != 0) {
1369 tp->tim_flags &= ~DO_PEERNAME;
1370 PI_PROVLOCK(tp->tim_provinfo);
1371 tp->tim_provinfo->tpi_peername = PI_YES;
1372 PI_PROVUNLOCK(tp->tim_provinfo);
1373 }
1374
1375 mutex_enter(&tp->tim_mutex);
1376 ASSERT(tp->tim_peermaxlen >= 0);
1377 clearit = tp->tim_peermaxlen != 0;
1378 if (clearit) {
1379 kmem_free(tp->tim_peername, tp->tim_peermaxlen);
1380 tp->tim_peername = NULL;
1381 tp->tim_peermaxlen = 0;
1382 tp->tim_peerlen = 0;
1383 }
1384 mutex_exit(&tp->tim_mutex);
1385 if (clearit) {
1386 mblk_t *bp;
1387
1388 bp = tp->tim_consave;
1389 while (bp != NULL) {
1390 nbp = bp->b_next;
1391 bp->b_next = NULL;
1392 freemsg(bp);
1393 bp = nbp;
1394 }
1395 tp->tim_consave = NULL;
1396 }
1397 /* tim_iocsave may already be overwritten. */
1398 if (tp->tim_saved_prim == -1) {
1399 freemsg(tp->tim_iocsave);
1400 tp->tim_iocsave = NULL;
1401 }
1402 }
1403 putnext(q, mp);
1404 break;
1405
1406 case M_IOCNAK:
1407
1408 tilog("timodrproc: Got M_IOCNAK\n", 0);
1409
1410 iocbp = (struct iocblk *)mp->b_rptr;
1411 if (((iocbp->ioc_cmd == TI_GETMYNAME) ||
1412 (iocbp->ioc_cmd == TI_GETPEERNAME)) &&
1413 ((iocbp->ioc_error == EINVAL) || (iocbp->ioc_error == 0))) {
1414 PI_PROVLOCK(tp->tim_provinfo);
1415 if (iocbp->ioc_cmd == TI_GETMYNAME) {
1416 if (tp->tim_provinfo->tpi_myname == PI_DONTKNOW)
1417 tp->tim_provinfo->tpi_myname = PI_NO;
1418 } else if (iocbp->ioc_cmd == TI_GETPEERNAME) {
1419 if (tp->tim_provinfo->tpi_peername == PI_DONTKNOW)
1420 tp->tim_provinfo->tpi_peername = PI_NO;
1421 }
1422 PI_PROVUNLOCK(tp->tim_provinfo);
1423 /* tim_iocsave may already be overwritten. */
1424 if ((tp->tim_iocsave != NULL) &&
1425 (tp->tim_saved_prim == -1)) {
1426 freemsg(mp);
1427 mp = tp->tim_iocsave;
1428 tp->tim_iocsave = NULL;
1429 tp->tim_flags |= NAMEPROC;
1430 if (ti_doname(WR(q), mp) != DONAME_CONT) {
1431 tp->tim_flags &= ~NAMEPROC;
1432 }
1433 break;
1434 }
1435 }
1436 putnext(q, mp);
1437 break;
1438 }
1439
1440 return (0);
1441 }
1442
1443 /*
1444 * timodwput - Module write put procedure. This is called from
1445 * the module, driver, or stream head upstream/downstream.
1446 * Handles M_FLUSH, M_DATA and some M_PROTO (T_DATA_REQ,
1447 * and T_UNITDATA_REQ) messages. All others are queued to
1448 * be handled by the service procedures.
1449 */
1450
1451 static int
timodwput(queue_t * q,mblk_t * mp)1452 timodwput(queue_t *q, mblk_t *mp)
1453 {
1454 union T_primitives *pptr;
1455 struct tim_tim *tp;
1456 struct iocblk *iocbp;
1457
1458 /*
1459 * Enqueue normal-priority messages if our queue already
1460 * holds some messages for deferred processing but don't
1461 * enqueue those M_IOCTLs which will result in an
1462 * M_PCPROTO (ie, high priority) message being created.
1463 */
1464 if (q->q_first != 0 && mp->b_datap->db_type < QPCTL) {
1465 if (mp->b_datap->db_type == M_IOCTL) {
1466 iocbp = (struct iocblk *)mp->b_rptr;
1467 switch (iocbp->ioc_cmd) {
1468 default:
1469 (void) putq(q, mp);
1470 return (0);
1471
1472 case TI_GETINFO:
1473 case TI_SYNC:
1474 case TI_CAPABILITY:
1475 break;
1476 }
1477 } else {
1478 (void) putq(q, mp);
1479 return (0);
1480 }
1481 }
1482 /*
1483 * Inline processing of data (to avoid additional procedure call).
1484 * Rest is handled in timodwproc.
1485 */
1486
1487 switch (mp->b_datap->db_type) {
1488 case M_DATA:
1489 tp = (struct tim_tim *)q->q_ptr;
1490 ASSERT(tp);
1491 if (tp->tim_flags & CLTS) {
1492 mblk_t *tmp;
1493
1494 if ((tmp = tim_filladdr(q, mp, B_FALSE)) == NULL) {
1495 (void) putq(q, mp);
1496 break;
1497 } else {
1498 mp = tmp;
1499 }
1500 }
1501 if (bcanputnext(q, mp->b_band))
1502 putnext(q, mp);
1503 else
1504 (void) putq(q, mp);
1505 break;
1506 case M_PROTO:
1507 case M_PCPROTO:
1508 pptr = (union T_primitives *)mp->b_rptr;
1509 switch (pptr->type) {
1510 case T_UNITDATA_REQ:
1511 tp = (struct tim_tim *)q->q_ptr;
1512 ASSERT(tp);
1513 if (tp->tim_flags & CLTS) {
1514 mblk_t *tmp;
1515
1516 tmp = tim_filladdr(q, mp, B_FALSE);
1517 if (tmp == NULL) {
1518 (void) putq(q, mp);
1519 break;
1520 } else {
1521 mp = tmp;
1522 }
1523 }
1524 if (bcanputnext(q, mp->b_band))
1525 putnext(q, mp);
1526 else
1527 (void) putq(q, mp);
1528 break;
1529
1530 case T_DATA_REQ:
1531 case T_EXDATA_REQ:
1532 if (bcanputnext(q, mp->b_band))
1533 putnext(q, mp);
1534 else
1535 (void) putq(q, mp);
1536 break;
1537 default:
1538 (void) timodwproc(q, mp);
1539 break;
1540 }
1541 break;
1542 default:
1543 (void) timodwproc(q, mp);
1544 break;
1545 }
1546 return (0);
1547 }
1548 /*
1549 * timodwsrv - Module write queue service procedure.
1550 * This is called when messages are placed on an empty queue,
1551 * when high priority messages are placed on the queue, and
1552 * when flow control restrictions subside. This code used to
1553 * be included in a put procedure, but it was moved to a
1554 * service procedure because several points were added where
1555 * memory allocation could fail, and there is no reasonable
1556 * recovery mechanism from the put procedure.
1557 */
1558 static int
timodwsrv(queue_t * q)1559 timodwsrv(queue_t *q)
1560 {
1561 mblk_t *mp;
1562
1563 ASSERT(q != NULL);
1564 if (q->q_ptr == NULL)
1565 return (0);
1566
1567 while ((mp = getq(q)) != NULL) {
1568 if (timodwproc(q, mp)) {
1569 /*
1570 * timodwproc did a putbq - stop processing
1571 * messages.
1572 */
1573 return (0);
1574 }
1575 }
1576 return (0);
1577 }
1578
1579 /*
1580 * Common routine to process write side messages
1581 */
1582
1583 static int
timodwproc(queue_t * q,mblk_t * mp)1584 timodwproc(queue_t *q, mblk_t *mp)
1585 {
1586 union T_primitives *pptr;
1587 struct tim_tim *tp;
1588 uint32_t auditing = AU_AUDITING();
1589 mblk_t *tmp;
1590 struct iocblk *iocbp;
1591 int error;
1592
1593 tp = (struct tim_tim *)q->q_ptr;
1594
1595 switch (mp->b_datap->db_type) {
1596 default:
1597 putnext(q, mp);
1598 break;
1599
1600 case M_DATA:
1601 if (tp->tim_flags & CLTS) {
1602 if ((tmp = tim_filladdr(q, mp, B_TRUE)) == NULL) {
1603 return (1);
1604 } else {
1605 mp = tmp;
1606 }
1607 }
1608 if (!bcanputnext(q, mp->b_band)) {
1609 (void) putbq(q, mp);
1610 return (1);
1611 }
1612 putnext(q, mp);
1613 break;
1614
1615 case M_IOCTL:
1616
1617 iocbp = (struct iocblk *)mp->b_rptr;
1618 TILOG("timodwproc: Got M_IOCTL(%d)\n", iocbp->ioc_cmd);
1619
1620 ASSERT(MBLKL(mp) == sizeof (struct iocblk));
1621
1622 /*
1623 * TPI requires we await response to a previously sent message
1624 * before handling another, put it back on the head of queue.
1625 * Since putbq() may see QWANTR unset when called from the
1626 * service procedure, the queue must be explicitly scheduled
1627 * for service, as no backenable will occur for this case.
1628 * tim_ioctl_retry() sets a timer to handle the qenable.
1629 */
1630 if (tp->tim_flags & WAITIOCACK) {
1631 TILOG("timodwproc: putbq M_IOCTL(%d)\n",
1632 iocbp->ioc_cmd);
1633 (void) putbq(q, mp);
1634 /* Called from timodwsrv() and messages on queue */
1635 if (!(q->q_flag & QWANTR))
1636 tim_ioctl_retry(q);
1637 return (1);
1638 }
1639
1640 switch (iocbp->ioc_cmd) {
1641 default:
1642 putnext(q, mp);
1643 break;
1644
1645 case _I_GETPEERCRED:
1646 if ((tp->tim_flags & COTS) == 0) {
1647 miocnak(q, mp, 0, ENOTSUP);
1648 } else {
1649 mblk_t *cmp = mp->b_cont;
1650 k_peercred_t *kp = NULL;
1651
1652 mutex_enter(&tp->tim_mutex);
1653 if (cmp != NULL &&
1654 iocbp->ioc_flag == IOC_NATIVE &&
1655 (tp->tim_flags &
1656 (CONNWAIT|LOCORDREL|REMORDREL)) == 0 &&
1657 tp->tim_peercred != NULL &&
1658 DB_TYPE(cmp) == M_DATA &&
1659 MBLKL(cmp) == sizeof (k_peercred_t)) {
1660 kp = (k_peercred_t *)cmp->b_rptr;
1661 crhold(kp->pc_cr = tp->tim_peercred);
1662 kp->pc_cpid = tp->tim_cpid;
1663 }
1664 mutex_exit(&tp->tim_mutex);
1665 if (kp != NULL)
1666 miocack(q, mp, sizeof (*kp), 0);
1667 else
1668 miocnak(q, mp, 0, ENOTCONN);
1669 }
1670 break;
1671 case TI_BIND:
1672 case TI_UNBIND:
1673 case TI_OPTMGMT:
1674 case TI_GETADDRS:
1675 TILOG("timodwproc: TI_{BIND|UNBIND|OPTMGMT|GETADDRS}"
1676 "\n", 0);
1677
1678 /*
1679 * We know that tim_send_ioctl_tpi_msg() is only
1680 * going to examine the `type' field, so we only
1681 * check that we can access that much data.
1682 */
1683 error = miocpullup(mp, sizeof (t_scalar_t));
1684 if (error != 0) {
1685 miocnak(q, mp, 0, error);
1686 break;
1687 }
1688 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1689 break;
1690
1691 case TI_GETINFO:
1692 TILOG("timodwproc: TI_GETINFO\n", 0);
1693 error = miocpullup(mp, sizeof (struct T_info_req));
1694 if (error != 0) {
1695 miocnak(q, mp, 0, error);
1696 break;
1697 }
1698 tp->tim_flags |= WAIT_IOCINFOACK;
1699 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1700 break;
1701
1702 case TI_SYNC: {
1703 mblk_t *tsr_mp;
1704 struct ti_sync_req *tsr;
1705 uint32_t tsr_flags;
1706
1707 error = miocpullup(mp, sizeof (struct ti_sync_req));
1708 if (error != 0) {
1709 miocnak(q, mp, 0, error);
1710 break;
1711 }
1712
1713 tsr_mp = mp->b_cont;
1714 tsr = (struct ti_sync_req *)tsr_mp->b_rptr;
1715 TILOG("timodwproc: TI_SYNC(%x)\n", tsr->tsr_flags);
1716
1717 /*
1718 * Save out the value of tsr_flags, in case we
1719 * reallocb() tsr_mp (below).
1720 */
1721 tsr_flags = tsr->tsr_flags;
1722 if ((tsr_flags & TSRF_INFO_REQ) == 0) {
1723 mblk_t *ack_mp = reallocb(tsr_mp,
1724 sizeof (struct ti_sync_ack), 0);
1725
1726 /* Can reply immediately. */
1727 mp->b_cont = NULL;
1728 if (ack_mp == NULL) {
1729 tilog("timodwproc: allocb failed no "
1730 "recovery attempt\n", 0);
1731 freemsg(tsr_mp);
1732 miocnak(q, mp, 0, ENOMEM);
1733 } else {
1734 tim_answer_ti_sync(q, mp, tp,
1735 ack_mp, tsr_flags);
1736 }
1737 break;
1738 }
1739
1740 /*
1741 * This code is retained for compatibility with
1742 * old statically linked applications. New code
1743 * should use TI_CAPABILITY for all TPI
1744 * information and should not use TSRF_INFO_REQ
1745 * flag.
1746 *
1747 * defer processsing necessary to rput procedure
1748 * as we need to get information from transport
1749 * driver. Set flags that will tell the read
1750 * side the work needed on this request.
1751 */
1752
1753 if (tsr_flags & TSRF_IS_EXP_IN_RCVBUF)
1754 tp->tim_flags |= PEEK_RDQ_EXPIND;
1755
1756 /*
1757 * Convert message to a T_INFO_REQ message; relies
1758 * on sizeof (struct ti_sync_req) >= sizeof (struct
1759 * T_info_req)).
1760 */
1761 ASSERT(MBLKL(tsr_mp) >= sizeof (struct T_info_req));
1762
1763 ((struct T_info_req *)tsr_mp->b_rptr)->PRIM_type =
1764 T_INFO_REQ;
1765 tsr_mp->b_wptr = tsr_mp->b_rptr +
1766 sizeof (struct T_info_req);
1767 tp->tim_flags |= WAIT_IOCINFOACK;
1768 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1769 }
1770 break;
1771
1772 case TI_CAPABILITY: {
1773 mblk_t *tcsr_mp;
1774 struct T_capability_req *tcr;
1775
1776 error = miocpullup(mp, sizeof (*tcr));
1777 if (error != 0) {
1778 miocnak(q, mp, 0, error);
1779 break;
1780 }
1781
1782 tcsr_mp = mp->b_cont;
1783 tcr = (struct T_capability_req *)tcsr_mp->b_rptr;
1784 TILOG("timodwproc: TI_CAPABILITY(CAP_bits1 = %x)\n",
1785 tcr->CAP_bits1);
1786
1787 if (tcr->PRIM_type != T_CAPABILITY_REQ) {
1788 TILOG("timodwproc: invalid msg type %d\n",
1789 tcr->PRIM_type);
1790 miocnak(q, mp, 0, EPROTO);
1791 break;
1792 }
1793
1794 switch (tp->tim_provinfo->tpi_capability) {
1795 case PI_YES:
1796 /* Just send T_CAPABILITY_REQ down */
1797 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1798 break;
1799
1800 case PI_DONTKNOW:
1801 /*
1802 * It is unknown yet whether transport provides
1803 * T_CAPABILITY_REQ or not. Send message down
1804 * and wait for reply.
1805 */
1806
1807 ASSERT(tp->tim_tcap_timoutid == 0);
1808 if ((tcr->CAP_bits1 & TC1_INFO) == 0) {
1809 tp->tim_flags |= TI_CAP_RECVD;
1810 } else {
1811 tp->tim_flags |= (TI_CAP_RECVD |
1812 CAP_WANTS_INFO);
1813 }
1814
1815 tp->tim_tcap_timoutid = qtimeout(q,
1816 tim_tcap_timer, q, tim_tcap_wait * hz);
1817 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1818 break;
1819
1820 case PI_NO:
1821 /*
1822 * Transport doesn't support T_CAPABILITY_REQ.
1823 * Either reply immediately or send T_INFO_REQ
1824 * if needed.
1825 */
1826 if ((tcr->CAP_bits1 & TC1_INFO) != 0) {
1827 tp->tim_flags |= (TI_CAP_RECVD |
1828 CAP_WANTS_INFO | WAIT_IOCINFOACK);
1829 TILOG("timodwproc: sending down "
1830 "T_INFO_REQ, flags = %x\n",
1831 tp->tim_flags);
1832
1833 /*
1834 * Generate T_INFO_REQ message and send
1835 * it down
1836 */
1837 ((struct T_info_req *)tcsr_mp->b_rptr)->
1838 PRIM_type = T_INFO_REQ;
1839 tcsr_mp->b_wptr = tcsr_mp->b_rptr +
1840 sizeof (struct T_info_req);
1841 tim_send_ioctl_tpi_msg(q, mp, tp,
1842 iocbp);
1843 break;
1844 }
1845
1846
1847 /*
1848 * Can reply immediately. Just send back
1849 * T_CAPABILITY_ACK with CAP_bits1 set to 0.
1850 */
1851 mp->b_cont = tcsr_mp = tpi_ack_alloc(mp->b_cont,
1852 sizeof (struct T_capability_ack), M_PCPROTO,
1853 T_CAPABILITY_ACK);
1854
1855 if (tcsr_mp == NULL) {
1856 tilog("timodwproc: allocb failed no "
1857 "recovery attempt\n", 0);
1858 miocnak(q, mp, 0, ENOMEM);
1859 break;
1860 }
1861
1862 tp->tim_flags &= ~(WAITIOCACK | TI_CAP_RECVD |
1863 WAIT_IOCINFOACK | CAP_WANTS_INFO);
1864 ((struct T_capability_ack *)
1865 tcsr_mp->b_rptr)->CAP_bits1 = 0;
1866 tim_ioctl_send_reply(q, mp, tcsr_mp);
1867
1868 /*
1869 * It could happen when timod is awaiting ack
1870 * for TI_GETPEERNAME/TI_GETMYNAME.
1871 */
1872 if (tp->tim_iocsave != NULL) {
1873 freemsg(tp->tim_iocsave);
1874 tp->tim_iocsave = NULL;
1875 tp->tim_saved_prim = -1;
1876 }
1877 break;
1878
1879 default:
1880 cmn_err(CE_PANIC,
1881 "timodwproc: unknown tpi_capability value "
1882 "%d\n", tp->tim_provinfo->tpi_capability);
1883 break;
1884 }
1885 }
1886 break;
1887
1888 case TI_GETMYNAME:
1889
1890 tilog("timodwproc: Got TI_GETMYNAME\n", 0);
1891
1892 if (tp->tim_provinfo->tpi_myname == PI_YES) {
1893 putnext(q, mp);
1894 break;
1895 }
1896 goto getname;
1897
1898 case TI_GETPEERNAME:
1899
1900 tilog("timodwproc: Got TI_GETPEERNAME\n", 0);
1901
1902 if (tp->tim_provinfo->tpi_peername == PI_YES) {
1903 putnext(q, mp);
1904 break;
1905 }
1906 getname:
1907 if ((tmp = copymsg(mp)) == NULL) {
1908 tim_recover(q, mp, msgsize(mp));
1909 return (1);
1910 }
1911 /*
1912 * tim_iocsave may be non-NULL when timod is awaiting
1913 * ack for another TI_GETPEERNAME/TI_GETMYNAME.
1914 */
1915 freemsg(tp->tim_iocsave);
1916 tp->tim_iocsave = mp;
1917 tp->tim_saved_prim = -1;
1918 putnext(q, tmp);
1919 break;
1920 }
1921 break;
1922
1923 case M_IOCDATA:
1924
1925 if (tp->tim_flags & NAMEPROC) {
1926 if (ti_doname(q, mp) != DONAME_CONT) {
1927 tp->tim_flags &= ~NAMEPROC;
1928 }
1929 } else
1930 putnext(q, mp);
1931 break;
1932
1933 case M_PROTO:
1934 case M_PCPROTO:
1935 if (MBLKL(mp) < sizeof (t_scalar_t)) {
1936 merror(q, mp, EPROTO);
1937 return (1);
1938 }
1939
1940 pptr = (union T_primitives *)mp->b_rptr;
1941 switch (pptr->type) {
1942 default:
1943 putnext(q, mp);
1944 break;
1945
1946 case T_EXDATA_REQ:
1947 case T_DATA_REQ:
1948 if (pptr->type == T_EXDATA_REQ)
1949 tilog("timodwproc: Got T_EXDATA_REQ\n", 0);
1950
1951 if (!bcanputnext(q, mp->b_band)) {
1952 (void) putbq(q, mp);
1953 return (1);
1954 }
1955 putnext(q, mp);
1956 break;
1957
1958 case T_UNITDATA_REQ:
1959 if (tp->tim_flags & CLTS) {
1960 tmp = tim_filladdr(q, mp, B_TRUE);
1961 if (tmp == NULL) {
1962 return (1);
1963 } else {
1964 mp = tmp;
1965 }
1966 }
1967 if (auditing)
1968 audit_sock(T_UNITDATA_REQ, q, mp, TIMOD_ID);
1969 if (!bcanputnext(q, mp->b_band)) {
1970 (void) putbq(q, mp);
1971 return (1);
1972 }
1973 putnext(q, mp);
1974 break;
1975
1976 case T_CONN_REQ: {
1977 struct T_conn_req *reqp = (struct T_conn_req *)
1978 mp->b_rptr;
1979 void *p;
1980
1981 tilog("timodwproc: Got T_CONN_REQ\n", 0);
1982
1983 if (MBLKL(mp) < sizeof (struct T_conn_req)) {
1984 merror(q, mp, EPROTO);
1985 return (1);
1986 }
1987
1988 if (tp->tim_flags & DO_PEERNAME) {
1989 if (!MBLKIN(mp, reqp->DEST_offset,
1990 reqp->DEST_length)) {
1991 merror(q, mp, EPROTO);
1992 return (1);
1993 }
1994 ASSERT(reqp->DEST_length >= 0);
1995 mutex_enter(&tp->tim_mutex);
1996 if (reqp->DEST_length > tp->tim_peermaxlen) {
1997 p = kmem_alloc(reqp->DEST_length,
1998 KM_NOSLEEP);
1999 if (p == NULL) {
2000 mutex_exit(&tp->tim_mutex);
2001 tilog("timodwproc: kmem_alloc "
2002 "failed, attempting "
2003 "recovery\n", 0);
2004 tim_recover(q, mp,
2005 reqp->DEST_length);
2006 return (1);
2007 }
2008 if (tp->tim_peermaxlen)
2009 kmem_free(tp->tim_peername,
2010 tp->tim_peermaxlen);
2011 tp->tim_peername = p;
2012 tp->tim_peermaxlen = reqp->DEST_length;
2013 }
2014 tp->tim_peerlen = reqp->DEST_length;
2015 p = mp->b_rptr + reqp->DEST_offset;
2016 bcopy(p, tp->tim_peername, tp->tim_peerlen);
2017 mutex_exit(&tp->tim_mutex);
2018 }
2019 if (tp->tim_flags & COTS)
2020 tp->tim_flags |= CONNWAIT;
2021 if (auditing)
2022 audit_sock(T_CONN_REQ, q, mp, TIMOD_ID);
2023 putnext(q, mp);
2024 break;
2025 }
2026
2027 case O_T_CONN_RES:
2028 case T_CONN_RES: {
2029 struct T_conn_res *resp;
2030 struct T_conn_ind *indp;
2031 mblk_t *pmp = NULL;
2032 mblk_t *nbp;
2033
2034 if (MBLKL(mp) < sizeof (struct T_conn_res) ||
2035 (tp->tim_flags & WAITIOCACK)) {
2036 merror(q, mp, EPROTO);
2037 return (1);
2038 }
2039
2040 resp = (struct T_conn_res *)mp->b_rptr;
2041 for (tmp = tp->tim_consave; tmp != NULL;
2042 tmp = tmp->b_next) {
2043 indp = (struct T_conn_ind *)tmp->b_rptr;
2044 if (indp->SEQ_number == resp->SEQ_number)
2045 break;
2046 pmp = tmp;
2047 }
2048 if (tmp == NULL)
2049 goto cresout;
2050
2051 if ((nbp = dupb(mp)) == NULL &&
2052 (nbp = copyb(mp)) == NULL) {
2053 tim_recover(q, mp, msgsize(mp));
2054 return (1);
2055 }
2056
2057 if (pmp != NULL)
2058 pmp->b_next = tmp->b_next;
2059 else
2060 tp->tim_consave = tmp->b_next;
2061 tmp->b_next = NULL;
2062
2063 /*
2064 * Construct a list with:
2065 * nbp - copy of user's original request
2066 * tmp - the extracted T_conn_ind
2067 */
2068 nbp->b_cont = tmp;
2069 /*
2070 * tim_iocsave may be non-NULL when timod is awaiting
2071 * ack for TI_GETPEERNAME/TI_GETMYNAME.
2072 */
2073 freemsg(tp->tim_iocsave);
2074 tp->tim_iocsave = nbp;
2075 tp->tim_saved_prim = pptr->type;
2076 tp->tim_flags |= WAIT_CONNRESACK | WAITIOCACK;
2077
2078 cresout:
2079 putnext(q, mp);
2080 break;
2081 }
2082
2083 case T_DISCON_REQ: {
2084 struct T_discon_req *disp;
2085 struct T_conn_ind *conp;
2086 mblk_t *pmp = NULL;
2087
2088 if (MBLKL(mp) < sizeof (struct T_discon_req)) {
2089 merror(q, mp, EPROTO);
2090 return (1);
2091 }
2092
2093 disp = (struct T_discon_req *)mp->b_rptr;
2094 tp->tim_flags &= ~(CONNWAIT|LOCORDREL|REMORDREL);
2095 tim_clear_peer(tp);
2096
2097 /*
2098 * If we are already connected, there won't
2099 * be any messages on tim_consave.
2100 */
2101 for (tmp = tp->tim_consave; tmp; tmp = tmp->b_next) {
2102 conp = (struct T_conn_ind *)tmp->b_rptr;
2103 if (conp->SEQ_number == disp->SEQ_number)
2104 break;
2105 pmp = tmp;
2106 }
2107 if (tmp) {
2108 if (pmp)
2109 pmp->b_next = tmp->b_next;
2110 else
2111 tp->tim_consave = tmp->b_next;
2112 tmp->b_next = NULL;
2113 freemsg(tmp);
2114 }
2115 putnext(q, mp);
2116 break;
2117 }
2118
2119 case T_ORDREL_REQ:
2120 if (tp->tim_flags & REMORDREL) {
2121 tp->tim_flags &= ~(LOCORDREL|REMORDREL);
2122 tim_clear_peer(tp);
2123 } else {
2124 tp->tim_flags |= LOCORDREL;
2125 }
2126 putnext(q, mp);
2127 break;
2128
2129 case T_CAPABILITY_REQ:
2130 tilog("timodwproc: Got T_CAPABILITY_REQ\n", 0);
2131 /*
2132 * XXX: We may know at this point whether transport
2133 * provides T_CAPABILITY_REQ or not and we may utilise
2134 * this knowledge here.
2135 */
2136 putnext(q, mp);
2137 break;
2138 }
2139 break;
2140 case M_FLUSH:
2141
2142 tilog("timodwproc: Got M_FLUSH\n", 0);
2143
2144 if (*mp->b_rptr & FLUSHW) {
2145 if (*mp->b_rptr & FLUSHBAND)
2146 flushband(q, *(mp->b_rptr + 1), FLUSHDATA);
2147 else
2148 flushq(q, FLUSHDATA);
2149 }
2150 putnext(q, mp);
2151 break;
2152 }
2153
2154 return (0);
2155 }
2156
2157 static void
tilog(char * str,t_scalar_t arg)2158 tilog(char *str, t_scalar_t arg)
2159 {
2160 if (dotilog) {
2161 if (dotilog & 2)
2162 cmn_err(CE_CONT, str, arg);
2163 if (dotilog & 4)
2164 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE | SL_ERROR,
2165 str, arg);
2166 else
2167 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE, str, arg);
2168 }
2169 }
2170
2171 static void
tilogp(char * str,uintptr_t arg)2172 tilogp(char *str, uintptr_t arg)
2173 {
2174 if (dotilog) {
2175 if (dotilog & 2)
2176 cmn_err(CE_CONT, str, arg);
2177 if (dotilog & 4)
2178 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE | SL_ERROR,
2179 str, arg);
2180 else
2181 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE, str, arg);
2182 }
2183 }
2184
2185
2186 /*
2187 * Process the TI_GETNAME ioctl. If no name exists, return len = 0
2188 * in strbuf structures. The state transitions are determined by what
2189 * is hung of cq_private (cp_private) in the copyresp (copyreq) structure.
2190 * The high-level steps in the ioctl processing are as follows:
2191 *
2192 * 1) we recieve an transparent M_IOCTL with the arg in the second message
2193 * block of the message.
2194 * 2) we send up an M_COPYIN request for the strbuf structure pointed to
2195 * by arg. The block containing arg is hung off cq_private.
2196 * 3) we receive an M_IOCDATA response with cp->cp_private->b_cont == NULL.
2197 * This means that the strbuf structure is found in the message block
2198 * mp->b_cont.
2199 * 4) we send up an M_COPYOUT request with the strbuf message hung off
2200 * cq_private->b_cont. The address we are copying to is strbuf.buf.
2201 * we set strbuf.len to 0 to indicate that we should copy the strbuf
2202 * structure the next time. The message mp->b_cont contains the
2203 * address info.
2204 * 5) we receive an M_IOCDATA with cp_private->b_cont != NULL and
2205 * strbuf.len == 0. Restore strbuf.len to either tp->tim_mylen or
2206 * tp->tim_peerlen.
2207 * 6) we send up an M_COPYOUT request with a copy of the strbuf message
2208 * hung off mp->b_cont. In the strbuf structure in the message hung
2209 * off cq_private->b_cont, we set strbuf.len to 0 and strbuf.maxlen
2210 * to 0. This means that the next step is to ACK the ioctl.
2211 * 7) we receive an M_IOCDATA message with cp_private->b_cont != NULL and
2212 * strbuf.len == 0 and strbuf.maxlen == 0. Free up cp->private and
2213 * send an M_IOCACK upstream, and we are done.
2214 *
2215 */
2216 static int
ti_doname(queue_t * q,mblk_t * mp)2217 ti_doname(
2218 queue_t *q, /* queue message arrived at */
2219 mblk_t *mp) /* M_IOCTL or M_IOCDATA message only */
2220 {
2221 struct iocblk *iocp;
2222 struct copyreq *cqp;
2223 STRUCT_HANDLE(strbuf, sb);
2224 struct copyresp *csp;
2225 int ret;
2226 mblk_t *bp;
2227 struct tim_tim *tp = q->q_ptr;
2228 boolean_t getpeer;
2229
2230 switch (mp->b_datap->db_type) {
2231 case M_IOCTL:
2232 iocp = (struct iocblk *)mp->b_rptr;
2233 if ((iocp->ioc_cmd != TI_GETMYNAME) &&
2234 (iocp->ioc_cmd != TI_GETPEERNAME)) {
2235 tilog("ti_doname: bad M_IOCTL command\n", 0);
2236 miocnak(q, mp, 0, EINVAL);
2237 ret = DONAME_FAIL;
2238 break;
2239 }
2240 if ((iocp->ioc_count != TRANSPARENT)) {
2241 miocnak(q, mp, 0, EINVAL);
2242 ret = DONAME_FAIL;
2243 break;
2244 }
2245
2246 cqp = (struct copyreq *)mp->b_rptr;
2247 cqp->cq_private = mp->b_cont;
2248 cqp->cq_addr = (caddr_t)*(intptr_t *)mp->b_cont->b_rptr;
2249 mp->b_cont = NULL;
2250 cqp->cq_size = SIZEOF_STRUCT(strbuf, iocp->ioc_flag);
2251 cqp->cq_flag = 0;
2252 mp->b_datap->db_type = M_COPYIN;
2253 mp->b_wptr = mp->b_rptr + sizeof (struct copyreq);
2254 qreply(q, mp);
2255 ret = DONAME_CONT;
2256 break;
2257
2258 case M_IOCDATA:
2259 csp = (struct copyresp *)mp->b_rptr;
2260 iocp = (struct iocblk *)mp->b_rptr;
2261 cqp = (struct copyreq *)mp->b_rptr;
2262 if ((csp->cp_cmd != TI_GETMYNAME) &&
2263 (csp->cp_cmd != TI_GETPEERNAME)) {
2264 cmn_err(CE_WARN, "ti_doname: bad M_IOCDATA command\n");
2265 miocnak(q, mp, 0, EINVAL);
2266 ret = DONAME_FAIL;
2267 break;
2268 }
2269 if (csp->cp_rval) { /* error */
2270 freemsg(csp->cp_private);
2271 freemsg(mp);
2272 ret = DONAME_FAIL;
2273 break;
2274 }
2275 ASSERT(csp->cp_private != NULL);
2276 getpeer = csp->cp_cmd == TI_GETPEERNAME;
2277 if (getpeer)
2278 mutex_enter(&tp->tim_mutex);
2279 if (csp->cp_private->b_cont == NULL) { /* got strbuf */
2280 ASSERT(mp->b_cont);
2281 STRUCT_SET_HANDLE(sb, iocp->ioc_flag,
2282 (void *)mp->b_cont->b_rptr);
2283 if (getpeer) {
2284 if (tp->tim_peerlen == 0) {
2285 /* copy just strbuf */
2286 STRUCT_FSET(sb, len, 0);
2287 } else if (tp->tim_peerlen >
2288 STRUCT_FGET(sb, maxlen)) {
2289 mutex_exit(&tp->tim_mutex);
2290 miocnak(q, mp, 0, ENAMETOOLONG);
2291 ret = DONAME_FAIL;
2292 break;
2293 } else {
2294 /* copy buffer */
2295 STRUCT_FSET(sb, len, tp->tim_peerlen);
2296 }
2297 } else {
2298 if (tp->tim_mylen == 0) {
2299 /* copy just strbuf */
2300 STRUCT_FSET(sb, len, 0);
2301 } else if (tp->tim_mylen >
2302 STRUCT_FGET(sb, maxlen)) {
2303 freemsg(csp->cp_private);
2304 miocnak(q, mp, 0, ENAMETOOLONG);
2305 ret = DONAME_FAIL;
2306 break;
2307 } else {
2308 /* copy buffer */
2309 STRUCT_FSET(sb, len, tp->tim_mylen);
2310 }
2311 }
2312 csp->cp_private->b_cont = mp->b_cont;
2313 mp->b_cont = NULL;
2314 }
2315 STRUCT_SET_HANDLE(sb, iocp->ioc_flag,
2316 (void *)csp->cp_private->b_cont->b_rptr);
2317 if (STRUCT_FGET(sb, len) == 0) {
2318 /*
2319 * restore strbuf.len
2320 */
2321 if (getpeer)
2322 STRUCT_FSET(sb, len, tp->tim_peerlen);
2323 else
2324 STRUCT_FSET(sb, len, tp->tim_mylen);
2325
2326 if (getpeer)
2327 mutex_exit(&tp->tim_mutex);
2328 if (STRUCT_FGET(sb, maxlen) == 0) {
2329
2330 /*
2331 * ack the ioctl
2332 */
2333 freemsg(csp->cp_private);
2334 tim_ioctl_send_reply(q, mp, NULL);
2335 ret = DONAME_DONE;
2336 break;
2337 }
2338
2339 if ((bp = allocb(STRUCT_SIZE(sb), BPRI_MED)) == NULL) {
2340
2341 tilog(
2342 "ti_doname: allocb failed no recovery attempt\n", 0);
2343
2344 freemsg(csp->cp_private);
2345 miocnak(q, mp, 0, EAGAIN);
2346 ret = DONAME_FAIL;
2347 break;
2348 }
2349 bp->b_wptr += STRUCT_SIZE(sb);
2350 bcopy(STRUCT_BUF(sb), bp->b_rptr, STRUCT_SIZE(sb));
2351 cqp->cq_addr =
2352 (caddr_t)*(intptr_t *)csp->cp_private->b_rptr;
2353 cqp->cq_size = STRUCT_SIZE(sb);
2354 cqp->cq_flag = 0;
2355 mp->b_datap->db_type = M_COPYOUT;
2356 mp->b_cont = bp;
2357 STRUCT_FSET(sb, len, 0);
2358 STRUCT_FSET(sb, maxlen, 0); /* ack next time around */
2359 qreply(q, mp);
2360 ret = DONAME_CONT;
2361 break;
2362 }
2363
2364 /*
2365 * copy the address to the user
2366 */
2367 if ((bp = allocb((size_t)STRUCT_FGET(sb, len), BPRI_MED))
2368 == NULL) {
2369 if (getpeer)
2370 mutex_exit(&tp->tim_mutex);
2371
2372 tilog("ti_doname: allocb failed no recovery attempt\n",
2373 0);
2374
2375 freemsg(csp->cp_private);
2376 miocnak(q, mp, 0, EAGAIN);
2377 ret = DONAME_FAIL;
2378 break;
2379 }
2380 bp->b_wptr += STRUCT_FGET(sb, len);
2381 if (getpeer) {
2382 bcopy(tp->tim_peername, bp->b_rptr,
2383 STRUCT_FGET(sb, len));
2384 mutex_exit(&tp->tim_mutex);
2385 } else {
2386 bcopy(tp->tim_myname, bp->b_rptr, STRUCT_FGET(sb, len));
2387 }
2388 cqp->cq_addr = (caddr_t)STRUCT_FGETP(sb, buf);
2389 cqp->cq_size = STRUCT_FGET(sb, len);
2390 cqp->cq_flag = 0;
2391 mp->b_datap->db_type = M_COPYOUT;
2392 mp->b_cont = bp;
2393 STRUCT_FSET(sb, len, 0); /* copy the strbuf next time around */
2394 qreply(q, mp);
2395 ret = DONAME_CONT;
2396 break;
2397
2398 default:
2399 tilog("ti_doname: freeing bad message type = %d\n",
2400 mp->b_datap->db_type);
2401 freemsg(mp);
2402 ret = DONAME_FAIL;
2403 break;
2404 }
2405 return (ret);
2406 }
2407
2408
2409 /*
2410 * Fill in the address of a connectionless data packet if a connect
2411 * had been done on this endpoint.
2412 */
2413 static mblk_t *
tim_filladdr(queue_t * q,mblk_t * mp,boolean_t dorecover)2414 tim_filladdr(queue_t *q, mblk_t *mp, boolean_t dorecover)
2415 {
2416 mblk_t *bp;
2417 struct tim_tim *tp;
2418 struct T_unitdata_req *up;
2419 struct T_unitdata_req *nup;
2420 size_t plen;
2421
2422 tp = (struct tim_tim *)q->q_ptr;
2423 if (mp->b_datap->db_type == M_DATA) {
2424 mutex_enter(&tp->tim_mutex);
2425 bp = allocb(sizeof (struct T_unitdata_req) + tp->tim_peerlen,
2426 BPRI_MED);
2427 if (bp != NULL) {
2428 bp->b_datap->db_type = M_PROTO;
2429 up = (struct T_unitdata_req *)bp->b_rptr;
2430 up->PRIM_type = T_UNITDATA_REQ;
2431 up->DEST_length = tp->tim_peerlen;
2432 bp->b_wptr += sizeof (struct T_unitdata_req);
2433 up->DEST_offset = sizeof (struct T_unitdata_req);
2434 up->OPT_length = 0;
2435 up->OPT_offset = 0;
2436 if (tp->tim_peerlen > 0) {
2437 bcopy(tp->tim_peername, bp->b_wptr,
2438 tp->tim_peerlen);
2439 bp->b_wptr += tp->tim_peerlen;
2440 }
2441 bp->b_cont = mp;
2442 }
2443 } else {
2444 ASSERT(mp->b_datap->db_type == M_PROTO);
2445 up = (struct T_unitdata_req *)mp->b_rptr;
2446 ASSERT(up->PRIM_type == T_UNITDATA_REQ);
2447 if (up->DEST_length != 0)
2448 return (mp);
2449 mutex_enter(&tp->tim_mutex);
2450 bp = allocb(sizeof (struct T_unitdata_req) + up->OPT_length +
2451 tp->tim_peerlen, BPRI_MED);
2452 if (bp != NULL) {
2453 bp->b_datap->db_type = M_PROTO;
2454 nup = (struct T_unitdata_req *)bp->b_rptr;
2455 nup->PRIM_type = T_UNITDATA_REQ;
2456 nup->DEST_length = plen = tp->tim_peerlen;
2457 bp->b_wptr += sizeof (struct T_unitdata_req);
2458 nup->DEST_offset = sizeof (struct T_unitdata_req);
2459 if (plen > 0) {
2460 bcopy(tp->tim_peername, bp->b_wptr, plen);
2461 bp->b_wptr += plen;
2462 }
2463 mutex_exit(&tp->tim_mutex);
2464 if (up->OPT_length == 0) {
2465 nup->OPT_length = 0;
2466 nup->OPT_offset = 0;
2467 } else {
2468 nup->OPT_length = up->OPT_length;
2469 nup->OPT_offset =
2470 sizeof (struct T_unitdata_req) + plen;
2471 bcopy((mp->b_wptr + up->OPT_offset), bp->b_wptr,
2472 up->OPT_length);
2473 bp->b_wptr += up->OPT_length;
2474 }
2475 bp->b_cont = mp->b_cont;
2476 mp->b_cont = NULL;
2477 freeb(mp);
2478 return (bp);
2479 }
2480 }
2481 ASSERT(MUTEX_HELD(&tp->tim_mutex));
2482 if (bp == NULL && dorecover) {
2483 tim_recover(q, mp,
2484 sizeof (struct T_unitdata_req) + tp->tim_peerlen);
2485 }
2486 mutex_exit(&tp->tim_mutex);
2487 return (bp);
2488 }
2489
2490 static void
tim_addlink(struct tim_tim * tp)2491 tim_addlink(struct tim_tim *tp)
2492 {
2493 struct tim_tim **tpp;
2494 struct tim_tim *next;
2495
2496 tpp = &tim_hash[TIM_HASH(tp->tim_acceptor)];
2497 rw_enter(&tim_list_rwlock, RW_WRITER);
2498
2499 if ((next = *tpp) != NULL)
2500 next->tim_ptpn = &tp->tim_next;
2501 tp->tim_next = next;
2502 tp->tim_ptpn = tpp;
2503 *tpp = tp;
2504
2505 tim_cnt++;
2506
2507 rw_exit(&tim_list_rwlock);
2508 }
2509
2510 static void
tim_dellink(struct tim_tim * tp)2511 tim_dellink(struct tim_tim *tp)
2512 {
2513 struct tim_tim *next;
2514
2515 rw_enter(&tim_list_rwlock, RW_WRITER);
2516
2517 if ((next = tp->tim_next) != NULL)
2518 next->tim_ptpn = tp->tim_ptpn;
2519 *(tp->tim_ptpn) = next;
2520
2521 tim_cnt--;
2522
2523 rw_exit(&tim_list_rwlock);
2524 }
2525
2526 static struct tim_tim *
tim_findlink(t_uscalar_t id)2527 tim_findlink(t_uscalar_t id)
2528 {
2529 struct tim_tim *tp;
2530
2531 ASSERT(rw_lock_held(&tim_list_rwlock));
2532
2533 for (tp = tim_hash[TIM_HASH(id)]; tp != NULL; tp = tp->tim_next) {
2534 if (tp->tim_acceptor == id) {
2535 break;
2536 }
2537 }
2538 return (tp);
2539 }
2540
2541 static void
tim_recover(queue_t * q,mblk_t * mp,t_scalar_t size)2542 tim_recover(queue_t *q, mblk_t *mp, t_scalar_t size)
2543 {
2544 struct tim_tim *tp;
2545 bufcall_id_t bid;
2546 timeout_id_t tid;
2547
2548 tp = (struct tim_tim *)q->q_ptr;
2549
2550 /*
2551 * Avoid re-enabling the queue.
2552 */
2553 if (mp->b_datap->db_type == M_PCPROTO)
2554 mp->b_datap->db_type = M_PROTO;
2555 noenable(q);
2556 (void) putbq(q, mp);
2557
2558 /*
2559 * Make sure there is at most one outstanding request per queue.
2560 */
2561 if (q->q_flag & QREADR) {
2562 if (tp->tim_rtimoutid || tp->tim_rbufcid)
2563 return;
2564 } else {
2565 if (tp->tim_wtimoutid || tp->tim_wbufcid)
2566 return;
2567 }
2568 if (!(bid = qbufcall(RD(q), (size_t)size, BPRI_MED, tim_buffer, q))) {
2569 tid = qtimeout(RD(q), tim_timer, q, TIMWAIT);
2570 if (q->q_flag & QREADR)
2571 tp->tim_rtimoutid = tid;
2572 else
2573 tp->tim_wtimoutid = tid;
2574 } else {
2575 if (q->q_flag & QREADR)
2576 tp->tim_rbufcid = bid;
2577 else
2578 tp->tim_wbufcid = bid;
2579 }
2580 }
2581
2582 /*
2583 * Timod is waiting on a downstream ioctl reply, come back soon
2584 * to reschedule the write side service routine, which will check
2585 * if the ioctl is done and another can proceed.
2586 */
2587 static void
tim_ioctl_retry(queue_t * q)2588 tim_ioctl_retry(queue_t *q)
2589 {
2590 struct tim_tim *tp;
2591
2592 tp = (struct tim_tim *)q->q_ptr;
2593
2594 /*
2595 * Make sure there is at most one outstanding request per wqueue.
2596 */
2597 if (tp->tim_wtimoutid || tp->tim_wbufcid)
2598 return;
2599
2600 tp->tim_wtimoutid = qtimeout(RD(q), tim_timer, q, TIMIOCWAIT);
2601 }
2602
2603 /*
2604 * Inspect the data on read queues starting from read queues passed as
2605 * paramter (timod read queue) and traverse until
2606 * q_next is NULL (stream head). Look for a TPI T_EXDATA_IND message
2607 * reutrn 1 if found, 0 if not found.
2608 */
2609 static int
ti_expind_on_rdqueues(queue_t * rq)2610 ti_expind_on_rdqueues(queue_t *rq)
2611 {
2612 mblk_t *bp;
2613 queue_t *q;
2614
2615 q = rq;
2616 /*
2617 * We are going to walk q_next, so protect stream from plumbing
2618 * changes.
2619 */
2620 claimstr(q);
2621 do {
2622 /*
2623 * Hold QLOCK while referencing data on queues
2624 */
2625 mutex_enter(QLOCK(rq));
2626 bp = rq->q_first;
2627 while (bp != NULL) {
2628 /*
2629 * Walk the messages on the queue looking
2630 * for a possible T_EXDATA_IND
2631 */
2632 if ((bp->b_datap->db_type == M_PROTO) &&
2633 ((bp->b_wptr - bp->b_rptr) >=
2634 sizeof (struct T_exdata_ind)) &&
2635 (((struct T_exdata_ind *)bp->b_rptr)->PRIM_type
2636 == T_EXDATA_IND)) {
2637 /* bp is T_EXDATA_IND */
2638 mutex_exit(QLOCK(rq));
2639 releasestr(q); /* decrement sd_refcnt */
2640 return (1); /* expdata is on a read queue */
2641 }
2642 bp = bp->b_next; /* next message */
2643 }
2644 mutex_exit(QLOCK(rq));
2645 rq = rq->q_next; /* next upstream queue */
2646 } while (rq != NULL);
2647 releasestr(q);
2648 return (0); /* no expdata on read queues */
2649 }
2650
2651 static void
tim_tcap_timer(void * q_ptr)2652 tim_tcap_timer(void *q_ptr)
2653 {
2654 queue_t *q = (queue_t *)q_ptr;
2655 struct tim_tim *tp = (struct tim_tim *)q->q_ptr;
2656
2657 ASSERT(tp != NULL && tp->tim_tcap_timoutid != 0);
2658 ASSERT((tp->tim_flags & TI_CAP_RECVD) != 0);
2659
2660 tp->tim_tcap_timoutid = 0;
2661 TILOG("tim_tcap_timer: fired\n", 0);
2662 tim_tcap_genreply(q, tp);
2663 }
2664
2665 /*
2666 * tim_tcap_genreply() is called either from timeout routine or when
2667 * T_ERROR_ACK is received. In both cases it means that underlying
2668 * transport doesn't provide T_CAPABILITY_REQ.
2669 */
2670 static void
tim_tcap_genreply(queue_t * q,struct tim_tim * tp)2671 tim_tcap_genreply(queue_t *q, struct tim_tim *tp)
2672 {
2673 mblk_t *mp = tp->tim_iocsave;
2674 struct iocblk *iocbp;
2675
2676 TILOG("timodrproc: tim_tcap_genreply\n", 0);
2677
2678 ASSERT(tp == (struct tim_tim *)q->q_ptr);
2679 ASSERT(mp != NULL);
2680
2681 iocbp = (struct iocblk *)mp->b_rptr;
2682 ASSERT(iocbp != NULL);
2683 ASSERT(MBLKL(mp) == sizeof (struct iocblk));
2684 ASSERT(iocbp->ioc_cmd == TI_CAPABILITY);
2685 ASSERT(mp->b_cont == NULL);
2686
2687 /* Save this information permanently in the module */
2688 PI_PROVLOCK(tp->tim_provinfo);
2689 if (tp->tim_provinfo->tpi_capability == PI_DONTKNOW)
2690 tp->tim_provinfo->tpi_capability = PI_NO;
2691 PI_PROVUNLOCK(tp->tim_provinfo);
2692
2693 if (tp->tim_tcap_timoutid != 0) {
2694 (void) quntimeout(q, tp->tim_tcap_timoutid);
2695 tp->tim_tcap_timoutid = 0;
2696 }
2697
2698 if ((tp->tim_flags & CAP_WANTS_INFO) != 0) {
2699 /* Send T_INFO_REQ down */
2700 mblk_t *tirmp = tpi_ack_alloc(NULL,
2701 sizeof (struct T_info_req), M_PCPROTO, T_INFO_REQ);
2702
2703 if (tirmp != NULL) {
2704 /* Emulate TC1_INFO */
2705 TILOG("emulate_tcap_ioc_req: sending T_INFO_REQ\n", 0);
2706 tp->tim_flags |= WAIT_IOCINFOACK;
2707 putnext(WR(q), tirmp);
2708 } else {
2709 tilog("emulate_tcap_req: allocb fail, "
2710 "no recovery attmpt\n", 0);
2711 tp->tim_iocsave = NULL;
2712 tp->tim_saved_prim = -1;
2713 tp->tim_flags &= ~(TI_CAP_RECVD | WAITIOCACK |
2714 CAP_WANTS_INFO | WAIT_IOCINFOACK);
2715 miocnak(q, mp, 0, ENOMEM);
2716 }
2717 } else {
2718 /* Reply immediately */
2719 mblk_t *ackmp = tpi_ack_alloc(NULL,
2720 sizeof (struct T_capability_ack), M_PCPROTO,
2721 T_CAPABILITY_ACK);
2722
2723 mp->b_cont = ackmp;
2724
2725 if (ackmp != NULL) {
2726 ((struct T_capability_ack *)
2727 ackmp->b_rptr)->CAP_bits1 = 0;
2728 tim_ioctl_send_reply(q, mp, ackmp);
2729 tp->tim_iocsave = NULL;
2730 tp->tim_saved_prim = -1;
2731 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
2732 TI_CAP_RECVD | CAP_WANTS_INFO);
2733 } else {
2734 tilog("timodwproc:allocb failed no "
2735 "recovery attempt\n", 0);
2736 tp->tim_iocsave = NULL;
2737 tp->tim_saved_prim = -1;
2738 tp->tim_flags &= ~(TI_CAP_RECVD | WAITIOCACK |
2739 CAP_WANTS_INFO | WAIT_IOCINFOACK);
2740 miocnak(q, mp, 0, ENOMEM);
2741 }
2742 }
2743 }
2744
2745
2746 static void
tim_ioctl_send_reply(queue_t * q,mblk_t * ioc_mp,mblk_t * mp)2747 tim_ioctl_send_reply(queue_t *q, mblk_t *ioc_mp, mblk_t *mp)
2748 {
2749 struct iocblk *iocbp;
2750
2751 ASSERT(q != NULL && ioc_mp != NULL);
2752
2753 ioc_mp->b_datap->db_type = M_IOCACK;
2754 if (mp != NULL)
2755 mp->b_datap->db_type = M_DATA;
2756
2757 if (ioc_mp->b_cont != mp) {
2758 /* It is safe to call freemsg for NULL pointers */
2759 freemsg(ioc_mp->b_cont);
2760 ioc_mp->b_cont = mp;
2761 }
2762 iocbp = (struct iocblk *)ioc_mp->b_rptr;
2763 iocbp->ioc_error = 0;
2764 iocbp->ioc_rval = 0;
2765 /*
2766 * All ioctl's may return more data than was specified by
2767 * count arg. For TI_CAPABILITY count is treated as maximum data size.
2768 */
2769 if (mp == NULL)
2770 iocbp->ioc_count = 0;
2771 else if (iocbp->ioc_cmd != TI_CAPABILITY)
2772 iocbp->ioc_count = msgsize(mp);
2773 else {
2774 iocbp->ioc_count = MIN(MBLKL(mp), iocbp->ioc_count);
2775 /* Truncate message if too large */
2776 mp->b_wptr = mp->b_rptr + iocbp->ioc_count;
2777 }
2778
2779 TILOG("iosendreply: ioc_cmd = %d, ", iocbp->ioc_cmd);
2780 putnext(RD(q), ioc_mp);
2781 }
2782
2783 /*
2784 * Send M_IOCACK for errors.
2785 */
2786 static void
tim_send_ioc_error_ack(queue_t * q,struct tim_tim * tp,mblk_t * mp)2787 tim_send_ioc_error_ack(queue_t *q, struct tim_tim *tp, mblk_t *mp)
2788 {
2789 struct T_error_ack *tea = (struct T_error_ack *)mp->b_rptr;
2790 t_scalar_t error_prim;
2791
2792 mp->b_wptr = mp->b_rptr + sizeof (struct T_error_ack);
2793 ASSERT(mp->b_wptr <= mp->b_datap->db_lim);
2794 error_prim = tea->ERROR_prim;
2795
2796 ASSERT(tp->tim_iocsave != NULL);
2797 ASSERT(tp->tim_iocsave->b_cont != mp);
2798
2799 /* Always send this to the read side of the queue */
2800 q = RD(q);
2801
2802 TILOG("tim_send_ioc_error_ack: prim = %d\n", tp->tim_saved_prim);
2803
2804 if (tp->tim_saved_prim != error_prim) {
2805 putnext(q, mp);
2806 } else if (error_prim == T_CAPABILITY_REQ) {
2807 TILOG("timodrproc: T_ERROR_ACK/T_CAPABILITY_REQ\n", 0);
2808 ASSERT(tp->tim_iocsave->b_cont == NULL);
2809
2810 tim_tcap_genreply(q, tp);
2811 freemsg(mp);
2812 } else {
2813 struct iocblk *iocbp = (struct iocblk *)tp->tim_iocsave->b_rptr;
2814
2815 TILOG("tim_send_ioc_error_ack: T_ERROR_ACK: prim %d\n",
2816 error_prim);
2817 ASSERT(tp->tim_iocsave->b_cont == NULL);
2818
2819 switch (error_prim) {
2820 default:
2821 TILOG("timodrproc: Unknown T_ERROR_ACK: tlierror %d\n",
2822 tea->TLI_error);
2823
2824 putnext(q, mp);
2825 break;
2826
2827 case T_INFO_REQ:
2828 case T_SVR4_OPTMGMT_REQ:
2829 case T_OPTMGMT_REQ:
2830 case O_T_BIND_REQ:
2831 case T_BIND_REQ:
2832 case T_UNBIND_REQ:
2833 case T_ADDR_REQ:
2834 case T_CAPABILITY_REQ:
2835
2836 TILOG("ioc_err_ack: T_ERROR_ACK: tlierror %x\n",
2837 tea->TLI_error);
2838
2839 /* get saved ioctl msg and set values */
2840 iocbp->ioc_count = 0;
2841 iocbp->ioc_error = 0;
2842 iocbp->ioc_rval = tea->TLI_error;
2843 if (iocbp->ioc_rval == TSYSERR)
2844 iocbp->ioc_rval |= tea->UNIX_error << 8;
2845 tp->tim_iocsave->b_datap->db_type = M_IOCACK;
2846 freemsg(mp);
2847 putnext(q, tp->tim_iocsave);
2848 tp->tim_iocsave = NULL;
2849 tp->tim_saved_prim = -1;
2850 tp->tim_flags &= ~(WAITIOCACK | TI_CAP_RECVD |
2851 CAP_WANTS_INFO | WAIT_IOCINFOACK);
2852 break;
2853 }
2854 }
2855 }
2856
2857 /*
2858 * Send reply to a usual message or ioctl message upstream.
2859 * Should be called from the read side only.
2860 */
2861 static void
tim_send_reply(queue_t * q,mblk_t * mp,struct tim_tim * tp,t_scalar_t prim)2862 tim_send_reply(queue_t *q, mblk_t *mp, struct tim_tim *tp, t_scalar_t prim)
2863 {
2864 ASSERT(mp != NULL && q != NULL && tp != NULL);
2865 ASSERT(q == RD(q));
2866
2867 /* Restore db_type - recover() might have changed it */
2868 mp->b_datap->db_type = M_PCPROTO;
2869
2870 if (((tp->tim_flags & WAITIOCACK) == 0) || (tp->tim_saved_prim != prim))
2871 putnext(q, mp);
2872 else {
2873 ASSERT(tp->tim_iocsave != NULL);
2874 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
2875 tp->tim_iocsave = NULL;
2876 tp->tim_saved_prim = -1;
2877 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
2878 TI_CAP_RECVD | CAP_WANTS_INFO);
2879 }
2880 }
2881
2882 /*
2883 * Reply to TI_SYNC reequest without sending anything downstream.
2884 */
2885 static void
tim_answer_ti_sync(queue_t * q,mblk_t * mp,struct tim_tim * tp,mblk_t * ackmp,uint32_t tsr_flags)2886 tim_answer_ti_sync(queue_t *q, mblk_t *mp, struct tim_tim *tp,
2887 mblk_t *ackmp, uint32_t tsr_flags)
2888 {
2889 struct ti_sync_ack *tsap;
2890
2891 ASSERT(q != NULL && q == WR(q) && ackmp != NULL);
2892
2893 tsap = (struct ti_sync_ack *)ackmp->b_rptr;
2894 bzero(tsap, sizeof (struct ti_sync_ack));
2895 ackmp->b_wptr = ackmp->b_rptr + sizeof (struct ti_sync_ack);
2896
2897 if (tsr_flags == 0 ||
2898 (tsr_flags & ~(TSRF_QLEN_REQ | TSRF_IS_EXP_IN_RCVBUF)) != 0) {
2899 /*
2900 * unsupported/bad flag setting
2901 * or no flag set.
2902 */
2903 TILOG("timodwproc: unsupported/bad flag setting %x\n",
2904 tsr_flags);
2905 freemsg(ackmp);
2906 miocnak(q, mp, 0, EINVAL);
2907 return;
2908 }
2909
2910 if ((tsr_flags & TSRF_QLEN_REQ) != 0)
2911 tsap->tsa_qlen = tp->tim_backlog;
2912
2913 if ((tsr_flags & TSRF_IS_EXP_IN_RCVBUF) != 0 &&
2914 ti_expind_on_rdqueues(RD(q))) {
2915 /*
2916 * Expedited data is queued on
2917 * the stream read side
2918 */
2919 tsap->tsa_flags |= TSAF_EXP_QUEUED;
2920 }
2921
2922 tim_ioctl_send_reply(q, mp, ackmp);
2923 tp->tim_iocsave = NULL;
2924 tp->tim_saved_prim = -1;
2925 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
2926 TI_CAP_RECVD | CAP_WANTS_INFO);
2927 }
2928
2929 /*
2930 * Send TPI message from IOCTL message, ssave original ioctl header and TPI
2931 * message type. Should be called from write side only.
2932 */
2933 static void
tim_send_ioctl_tpi_msg(queue_t * q,mblk_t * mp,struct tim_tim * tp,struct iocblk * iocb)2934 tim_send_ioctl_tpi_msg(queue_t *q, mblk_t *mp, struct tim_tim *tp,
2935 struct iocblk *iocb)
2936 {
2937 mblk_t *tmp;
2938 int ioc_cmd = iocb->ioc_cmd;
2939
2940 ASSERT(q != NULL && mp != NULL && tp != NULL);
2941 ASSERT(q == WR(q));
2942 ASSERT(mp->b_cont != NULL);
2943
2944 tp->tim_iocsave = mp;
2945 tmp = mp->b_cont;
2946
2947 mp->b_cont = NULL;
2948 tp->tim_flags |= WAITIOCACK;
2949 tp->tim_saved_prim = ((union T_primitives *)tmp->b_rptr)->type;
2950
2951 /*
2952 * For TI_GETINFO, the attached message is a T_INFO_REQ
2953 * For TI_SYNC, we generate the T_INFO_REQ message above
2954 * For TI_CAPABILITY the attached message is either
2955 * T_CAPABILITY_REQ or T_INFO_REQ.
2956 * Among TPI request messages possible,
2957 * T_INFO_REQ/T_CAPABILITY_ACK messages are a M_PCPROTO, rest
2958 * are M_PROTO
2959 */
2960 if (ioc_cmd == TI_GETINFO || ioc_cmd == TI_SYNC ||
2961 ioc_cmd == TI_CAPABILITY) {
2962 tmp->b_datap->db_type = M_PCPROTO;
2963 } else {
2964 tmp->b_datap->db_type = M_PROTO;
2965 }
2966
2967 /* Verify credentials in STREAM */
2968 ASSERT(iocb->ioc_cr == NULL || iocb->ioc_cr == DB_CRED(tmp));
2969
2970 ASSERT(DB_CRED(tmp) != NULL);
2971
2972 TILOG("timodwproc: sending down %d\n", tp->tim_saved_prim);
2973 putnext(q, tmp);
2974 }
2975
2976 static void
tim_clear_peer(struct tim_tim * tp)2977 tim_clear_peer(struct tim_tim *tp)
2978 {
2979 mutex_enter(&tp->tim_mutex);
2980 if (tp->tim_peercred != NULL) {
2981 crfree(tp->tim_peercred);
2982 tp->tim_peercred = NULL;
2983 }
2984 tp->tim_peerlen = 0;
2985 mutex_exit(&tp->tim_mutex);
2986 }
2987