1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 /* All Rights Reserved */
27
28
29 /*
30 * Transport Interface Library cooperating module - issue 2
31 */
32
33 #include <sys/param.h>
34 #include <sys/types.h>
35 #include <sys/stream.h>
36 #include <sys/stropts.h>
37 #include <sys/strsubr.h>
38 #define _SUN_TPI_VERSION 2
39 #include <sys/tihdr.h>
40 #include <sys/timod.h>
41 #include <sys/suntpi.h>
42 #include <sys/debug.h>
43 #include <sys/strlog.h>
44 #include <sys/errno.h>
45 #include <sys/cred.h>
46 #include <sys/cmn_err.h>
47 #include <sys/kmem.h>
48 #include <sys/sysmacros.h>
49 #include <sys/ddi.h>
50 #include <sys/sunddi.h>
51 #include <sys/strsun.h>
52 #include <c2/audit.h>
53
54 /*
55 * This is the loadable module wrapper.
56 */
57 #include <sys/conf.h>
58 #include <sys/modctl.h>
59
60 static struct streamtab timinfo;
61
62 static struct fmodsw fsw = {
63 "timod",
64 &timinfo,
65 D_MTQPAIR | D_MP,
66 };
67
68 /*
69 * Module linkage information for the kernel.
70 */
71
72 static struct modlstrmod modlstrmod = {
73 &mod_strmodops, "transport interface str mod", &fsw
74 };
75
76 static struct modlinkage modlinkage = {
77 MODREV_1, &modlstrmod, NULL
78 };
79
80 static krwlock_t tim_list_rwlock;
81
82 /*
83 * This module keeps track of capabilities of underlying transport. Information
84 * is persistent through module invocations (open/close). Currently it remembers
85 * whether underlying transport supports TI_GET{MY,PEER}NAME ioctls and
86 * T_CAPABILITY_REQ message. This module either passes ioctl/messages to the
87 * transport or emulates it when transport doesn't understand these
88 * ioctl/messages.
89 *
90 * It is assumed that transport supports T_CAPABILITY_REQ when timod receives
91 * T_CAPABILITY_ACK from the transport. There is no current standard describing
92 * transport behaviour when it receives unknown message type, so following
93 * reactions are expected and handled:
94 *
95 * 1) Transport drops unknown T_CAPABILITY_REQ message type. In this case timod
96 * will wait for tcap_wait time and assume that transport doesn't provide
97 * this message type. T_CAPABILITY_REQ should never travel over the wire, so
98 * timeout value should only take into consideration internal processing time
99 * for the message. From user standpoint it may mean that an application will
100 * hang for TCAP_WAIT time in the kernel the first time this message is used
101 * with some particular transport (e.g. TCP/IP) during system uptime.
102 *
103 * 2) Transport responds with T_ERROR_ACK specifying T_CAPABILITY_REQ as
104 * original message type. In this case it is assumed that transport doesn't
105 * support it (which may not always be true - some transports return
106 * T_ERROR_ACK in other cases like lack of system memory).
107 *
108 * 3) Transport responds with M_ERROR, effectively shutting down the
109 * stream. Unfortunately there is no standard way to pass the reason of
110 * M_ERROR message back to the caller, so it is assumed that if M_ERROR was
111 * sent in response to T_CAPABILITY_REQ message, transport doesn't support
112 * it.
113 *
114 * It is possible under certain circumstances that timod will incorrectly assume
115 * that underlying transport doesn't provide T_CAPABILITY_REQ message type. In
116 * this "worst-case" scenario timod will emulate its functionality by itself and
117 * will provide only TC1_INFO capability. All other bits in CAP_bits1 field are
118 * cleaned. TC1_INFO is emulated by sending T_INFO_REQ down to transport
119 * provider.
120 */
121
122 /*
123 * Notes about locking:
124 *
125 * tim_list_rwlock protects the list of tim_tim structures itself. When this
126 * lock is held, the list itself is stable, but the contents of the entries
127 * themselves might not be.
128 *
129 * The rest of the members are generally protected by D_MTQPAIR, which
130 * specifies a default exclusive inner perimeter. If you're looking at
131 * q->q_ptr, then it's stable.
132 *
133 * There's one exception to this rule: tim_peer{maxlen,len,name}. These members
134 * are touched without entering the associated STREAMS perimeter because we
135 * get the pointer via tim_findlink() rather than q_ptr. These are protected
136 * by tim_mutex instead. If you don't hold that lock, don't look at them.
137 *
138 * (It would be possible to separate out the 'set by T_CONN_RES' cases from the
139 * others, but there appears to be no reason to do so.)
140 */
141 struct tim_tim {
142 uint32_t tim_flags;
143 t_uscalar_t tim_backlog;
144 mblk_t *tim_iocsave;
145 t_scalar_t tim_mymaxlen;
146 t_scalar_t tim_mylen;
147 caddr_t tim_myname;
148 t_scalar_t tim_peermaxlen;
149 t_scalar_t tim_peerlen;
150 caddr_t tim_peername;
151 cred_t *tim_peercred;
152 mblk_t *tim_consave;
153 bufcall_id_t tim_wbufcid;
154 bufcall_id_t tim_rbufcid;
155 timeout_id_t tim_wtimoutid;
156 timeout_id_t tim_rtimoutid;
157 /* Protected by the global tim_list_rwlock for all instances */
158 struct tim_tim *tim_next;
159 struct tim_tim **tim_ptpn;
160 t_uscalar_t tim_acceptor;
161 t_scalar_t tim_saved_prim; /* Primitive from message */
162 /* part of ioctl. */
163 timeout_id_t tim_tcap_timoutid; /* For T_CAP_REQ timeout */
164 tpi_provinfo_t *tim_provinfo; /* Transport description */
165 kmutex_t tim_mutex; /* protect tim_peer* */
166 pid_t tim_cpid;
167 };
168
169
170 /*
171 * Local flags used with tim_flags field in instance structure of
172 * type 'struct _ti_user' declared above.
173 * Historical note:
174 * This namespace constants were previously declared in a
175 * a very messed up namespace in timod.h
176 *
177 * There may be 3 states for transport:
178 *
179 * 1) It provides T_CAPABILITY_REQ
180 * 2) It does not provide T_CAPABILITY_REQ
181 * 3) It is not known yet whether transport provides T_CAPABILITY_REQ or not.
182 *
183 * It is assumed that the underlying transport either provides
184 * T_CAPABILITY_REQ or not and this does not changes during the
185 * system lifetime.
186 *
187 */
188 #define PEEK_RDQ_EXPIND 0x0001 /* look for expinds on stream rd queues */
189 #define WAITIOCACK 0x0002 /* waiting for info for ioctl act */
190 #define CLTS 0x0004 /* connectionless transport */
191 #define COTS 0x0008 /* connection-oriented transport */
192 #define CONNWAIT 0x0010 /* waiting for connect confirmation */
193 #define LOCORDREL 0x0020 /* local end has orderly released */
194 #define REMORDREL 0x0040 /* remote end had orderly released */
195 #define NAMEPROC 0x0080 /* processing a NAME ioctl */
196 #define DO_MYNAME 0x0100 /* timod handles TI_GETMYNAME */
197 #define DO_PEERNAME 0x0200 /* timod handles TI_GETPEERNAME */
198 #define TI_CAP_RECVD 0x0400 /* TI_CAPABILITY received */
199 #define CAP_WANTS_INFO 0x0800 /* TI_CAPABILITY has TC1_INFO set */
200 #define WAIT_IOCINFOACK 0x1000 /* T_INFO_REQ generated from ioctl */
201 #define WAIT_CONNRESACK 0x2000 /* waiting for T_OK_ACK to T_CONN_RES */
202
203
204 /* Debugging facilities */
205 /*
206 * Logging needed for debugging timod should only appear in DEBUG kernel.
207 */
208 #ifdef DEBUG
209 #define TILOG(msg, arg) tilog((msg), (arg))
210 #define TILOGP(msg, arg) tilogp((msg), (arg))
211 #else
212 #define TILOG(msg, arg)
213 #define TILOGP(msg, arg)
214 #endif
215
216
217 /*
218 * Sleep timeout for T_CAPABILITY_REQ. This message never travels across
219 * network, so timeout value should be enough to cover all internal processing
220 * time.
221 */
222 clock_t tim_tcap_wait = 2;
223
224 /* Sleep timeout in tim_recover() */
225 #define TIMWAIT (1*hz)
226 /* Sleep timeout in tim_ioctl_retry() 0.2 seconds */
227 #define TIMIOCWAIT (200*hz/1000)
228
229 /*
230 * Return values for ti_doname().
231 */
232 #define DONAME_FAIL 0 /* failing ioctl (done) */
233 #define DONAME_DONE 1 /* done processing */
234 #define DONAME_CONT 2 /* continue proceesing (not done yet) */
235
236 /*
237 * Function prototypes
238 */
239 static int ti_doname(queue_t *, mblk_t *);
240 static int ti_expind_on_rdqueues(queue_t *);
241 static void tim_ioctl_send_reply(queue_t *, mblk_t *, mblk_t *);
242 static void tim_send_ioc_error_ack(queue_t *, struct tim_tim *, mblk_t *);
243 static void tim_tcap_timer(void *);
244 static void tim_tcap_genreply(queue_t *, struct tim_tim *);
245 static void tim_send_reply(queue_t *, mblk_t *, struct tim_tim *, t_scalar_t);
246 static void tim_answer_ti_sync(queue_t *, mblk_t *, struct tim_tim *,
247 mblk_t *, uint32_t);
248 static void tim_send_ioctl_tpi_msg(queue_t *, mblk_t *, struct tim_tim *,
249 struct iocblk *);
250 static void tim_clear_peer(struct tim_tim *);
251
252 int
_init(void)253 _init(void)
254 {
255 int error;
256
257 rw_init(&tim_list_rwlock, NULL, RW_DRIVER, NULL);
258 error = mod_install(&modlinkage);
259 if (error != 0) {
260 rw_destroy(&tim_list_rwlock);
261 return (error);
262 }
263
264 return (0);
265 }
266
267 int
_fini(void)268 _fini(void)
269 {
270 int error;
271
272 error = mod_remove(&modlinkage);
273 if (error != 0)
274 return (error);
275 rw_destroy(&tim_list_rwlock);
276 return (0);
277 }
278
279 int
_info(struct modinfo * modinfop)280 _info(struct modinfo *modinfop)
281 {
282 return (mod_info(&modlinkage, modinfop));
283 }
284
285
286 /*
287 * Hash list for all instances. Used to find tim_tim structure based on
288 * ACCEPTOR_id in T_CONN_RES. Protected by tim_list_rwlock.
289 */
290 #define TIM_HASH_SIZE 256
291 #ifdef _ILP32
292 #define TIM_HASH(id) (((uintptr_t)(id) >> 8) % TIM_HASH_SIZE)
293 #else
294 #define TIM_HASH(id) ((uintptr_t)(id) % TIM_HASH_SIZE)
295 #endif /* _ILP32 */
296 static struct tim_tim *tim_hash[TIM_HASH_SIZE];
297 int tim_cnt = 0;
298
299 static void tilog(char *, t_scalar_t);
300 static void tilogp(char *, uintptr_t);
301 static mblk_t *tim_filladdr(queue_t *, mblk_t *, boolean_t);
302 static void tim_addlink(struct tim_tim *);
303 static void tim_dellink(struct tim_tim *);
304 static struct tim_tim *tim_findlink(t_uscalar_t);
305 static void tim_recover(queue_t *, mblk_t *, t_scalar_t);
306 static void tim_ioctl_retry(queue_t *);
307
308 int dotilog = 0;
309
310 #define TIMOD_ID 3
311
312 static int timodopen(queue_t *, dev_t *, int, int, cred_t *);
313 static int timodclose(queue_t *, int, cred_t *);
314 static void timodwput(queue_t *, mblk_t *);
315 static void timodrput(queue_t *, mblk_t *);
316 static void timodrsrv(queue_t *);
317 static void timodwsrv(queue_t *);
318 static int timodrproc(queue_t *, mblk_t *);
319 static int timodwproc(queue_t *, mblk_t *);
320
321 /* stream data structure definitions */
322
323 static struct module_info timod_info =
324 {TIMOD_ID, "timod", 0, INFPSZ, 512, 128};
325 static struct qinit timodrinit = {
326 (int (*)())timodrput,
327 (int (*)())timodrsrv,
328 timodopen,
329 timodclose,
330 nulldev,
331 &timod_info,
332 NULL
333 };
334 static struct qinit timodwinit = {
335 (int (*)())timodwput,
336 (int (*)())timodwsrv,
337 timodopen,
338 timodclose,
339 nulldev,
340 &timod_info,
341 NULL
342 };
343 static struct streamtab timinfo = { &timodrinit, &timodwinit, NULL, NULL };
344
345 /*
346 * timodopen - open routine gets called when the module gets pushed
347 * onto the stream.
348 */
349 /*ARGSUSED*/
350 static int
timodopen(queue_t * q,dev_t * devp,int flag,int sflag,cred_t * crp)351 timodopen(
352 queue_t *q,
353 dev_t *devp,
354 int flag,
355 int sflag,
356 cred_t *crp)
357 {
358 struct tim_tim *tp;
359 struct stroptions *sop;
360 mblk_t *bp;
361
362 ASSERT(q != NULL);
363
364 if (q->q_ptr) {
365 return (0);
366 }
367
368 if ((bp = allocb(sizeof (struct stroptions), BPRI_MED)) == 0)
369 return (ENOMEM);
370
371 tp = kmem_zalloc(sizeof (struct tim_tim), KM_SLEEP);
372
373 tp->tim_cpid = -1;
374 tp->tim_saved_prim = -1;
375
376 mutex_init(&tp->tim_mutex, NULL, MUTEX_DEFAULT, NULL);
377
378 q->q_ptr = (caddr_t)tp;
379 WR(q)->q_ptr = (caddr_t)tp;
380
381 tilogp("timodopen: Allocated for tp %lx\n", (uintptr_t)tp);
382 tilogp("timodopen: Allocated for q %lx\n", (uintptr_t)q);
383
384 /* Must be done before tpi_findprov and _ILP32 q_next walk below */
385 qprocson(q);
386
387 tp->tim_provinfo = tpi_findprov(q);
388
389 /*
390 * Defer allocation of the buffers for the local address and
391 * the peer's address until we need them.
392 * Assume that timod has to handle getname until we here
393 * an iocack from the transport provider or we know that
394 * transport provider doesn't understand it.
395 */
396 if (tp->tim_provinfo->tpi_myname != PI_YES) {
397 TILOG("timodopen: setting DO_MYNAME\n", 0);
398 tp->tim_flags |= DO_MYNAME;
399 }
400
401 if (tp->tim_provinfo->tpi_peername != PI_YES) {
402 TILOG("timodopen: setting DO_PEERNAME\n", 0);
403 tp->tim_flags |= DO_PEERNAME;
404 }
405
406 #ifdef _ILP32
407 {
408 queue_t *driverq;
409
410 /*
411 * Find my driver's read queue (for T_CONN_RES handling)
412 */
413 driverq = WR(q);
414 while (SAMESTR(driverq))
415 driverq = driverq->q_next;
416
417 tp->tim_acceptor = (t_uscalar_t)RD(driverq);
418 }
419 #else
420 tp->tim_acceptor = (t_uscalar_t)getminor(*devp);
421 #endif /* _ILP32 */
422
423 /*
424 * Add this one to the list.
425 */
426 tim_addlink(tp);
427
428 /*
429 * Send M_SETOPTS to stream head to make sure M_PCPROTO messages
430 * are not flushed. This prevents application deadlocks.
431 */
432 bp->b_datap->db_type = M_SETOPTS;
433 bp->b_wptr += sizeof (struct stroptions);
434 sop = (struct stroptions *)bp->b_rptr;
435 sop->so_flags = SO_READOPT;
436 sop->so_readopt = RFLUSHPCPROT;
437
438 putnext(q, bp);
439
440 return (0);
441 }
442
443 static void
tim_timer(void * arg)444 tim_timer(void *arg)
445 {
446 queue_t *q = arg;
447 struct tim_tim *tp = (struct tim_tim *)q->q_ptr;
448
449 ASSERT(tp);
450
451 if (q->q_flag & QREADR) {
452 ASSERT(tp->tim_rtimoutid);
453 tp->tim_rtimoutid = 0;
454 } else {
455 ASSERT(tp->tim_wtimoutid);
456 tp->tim_wtimoutid = 0;
457 }
458 enableok(q);
459 qenable(q);
460 }
461
462 static void
tim_buffer(void * arg)463 tim_buffer(void *arg)
464 {
465 queue_t *q = arg;
466 struct tim_tim *tp = (struct tim_tim *)q->q_ptr;
467
468 ASSERT(tp);
469
470 if (q->q_flag & QREADR) {
471 ASSERT(tp->tim_rbufcid);
472 tp->tim_rbufcid = 0;
473 } else {
474 ASSERT(tp->tim_wbufcid);
475 tp->tim_wbufcid = 0;
476 }
477 enableok(q);
478 qenable(q);
479 }
480
481 /*
482 * timodclose - This routine gets called when the module gets popped
483 * off of the stream.
484 */
485 /*ARGSUSED*/
486 static int
timodclose(queue_t * q,int flag,cred_t * crp)487 timodclose(
488 queue_t *q,
489 int flag,
490 cred_t *crp)
491 {
492 struct tim_tim *tp;
493 mblk_t *mp;
494 mblk_t *nmp;
495
496 ASSERT(q != NULL);
497
498 tp = (struct tim_tim *)q->q_ptr;
499 q->q_ptr = NULL;
500
501 ASSERT(tp != NULL);
502
503 tilogp("timodclose: Entered for tp %lx\n", (uintptr_t)tp);
504 tilogp("timodclose: Entered for q %lx\n", (uintptr_t)q);
505
506 qprocsoff(q);
507 tim_dellink(tp);
508
509 /*
510 * Cancel any outstanding bufcall
511 * or timeout requests.
512 */
513 if (tp->tim_wbufcid) {
514 qunbufcall(q, tp->tim_wbufcid);
515 tp->tim_wbufcid = 0;
516 }
517 if (tp->tim_rbufcid) {
518 qunbufcall(q, tp->tim_rbufcid);
519 tp->tim_rbufcid = 0;
520 }
521 if (tp->tim_wtimoutid) {
522 (void) quntimeout(q, tp->tim_wtimoutid);
523 tp->tim_wtimoutid = 0;
524 }
525 if (tp->tim_rtimoutid) {
526 (void) quntimeout(q, tp->tim_rtimoutid);
527 tp->tim_rtimoutid = 0;
528 }
529
530 if (tp->tim_tcap_timoutid != 0) {
531 (void) quntimeout(q, tp->tim_tcap_timoutid);
532 tp->tim_tcap_timoutid = 0;
533 }
534
535 if (tp->tim_iocsave != NULL)
536 freemsg(tp->tim_iocsave);
537 mp = tp->tim_consave;
538 while (mp) {
539 nmp = mp->b_next;
540 mp->b_next = NULL;
541 freemsg(mp);
542 mp = nmp;
543 }
544 ASSERT(tp->tim_mymaxlen >= 0);
545 if (tp->tim_mymaxlen != 0)
546 kmem_free(tp->tim_myname, (size_t)tp->tim_mymaxlen);
547 ASSERT(tp->tim_peermaxlen >= 0);
548 if (tp->tim_peermaxlen != 0)
549 kmem_free(tp->tim_peername, (size_t)tp->tim_peermaxlen);
550
551 q->q_ptr = WR(q)->q_ptr = NULL;
552
553 mutex_destroy(&tp->tim_mutex);
554
555 if (tp->tim_peercred != NULL)
556 crfree(tp->tim_peercred);
557
558 kmem_free(tp, sizeof (struct tim_tim));
559
560 return (0);
561 }
562
563 /*
564 * timodrput - Module read put procedure. This is called from
565 * the module, driver, or stream head upstream/downstream.
566 * Handles M_FLUSH, M_DATA and some M_PROTO (T_DATA_IND,
567 * and T_UNITDATA_IND) messages. All others are queued to
568 * be handled by the service procedures.
569 */
570 static void
timodrput(queue_t * q,mblk_t * mp)571 timodrput(queue_t *q, mblk_t *mp)
572 {
573 union T_primitives *pptr;
574
575 /*
576 * During flow control and other instances when messages
577 * are on queue, queue up a non high priority message
578 */
579 if (q->q_first != 0 && mp->b_datap->db_type < QPCTL) {
580 (void) putq(q, mp);
581 return;
582 }
583
584 /*
585 * Inline processing of data (to avoid additional procedure call).
586 * Rest is handled in timodrproc.
587 */
588
589 switch (mp->b_datap->db_type) {
590 case M_DATA:
591 if (bcanputnext(q, mp->b_band))
592 putnext(q, mp);
593 else
594 (void) putq(q, mp);
595 break;
596 case M_PROTO:
597 case M_PCPROTO:
598 if (MBLKL(mp) < sizeof (t_scalar_t)) {
599 if (mp->b_datap->db_type == M_PCPROTO ||
600 bcanputnext(q, mp->b_band)) {
601 putnext(q, mp);
602 } else {
603 (void) putq(q, mp);
604 }
605 break;
606 }
607 pptr = (union T_primitives *)mp->b_rptr;
608 switch (pptr->type) {
609 case T_EXDATA_IND:
610 case T_DATA_IND:
611 case T_UNITDATA_IND:
612 if (bcanputnext(q, mp->b_band))
613 putnext(q, mp);
614 else
615 (void) putq(q, mp);
616 break;
617 default:
618 (void) timodrproc(q, mp);
619 break;
620 }
621 break;
622 default:
623 (void) timodrproc(q, mp);
624 break;
625 }
626 }
627
628 /*
629 * timodrsrv - Module read queue service procedure. This is called when
630 * messages are placed on an empty queue, when high priority
631 * messages are placed on the queue, and when flow control
632 * restrictions subside. This code used to be included in a
633 * put procedure, but it was moved to a service procedure
634 * because several points were added where memory allocation
635 * could fail, and there is no reasonable recovery mechanism
636 * from the put procedure.
637 */
638 /*ARGSUSED*/
639 static void
timodrsrv(queue_t * q)640 timodrsrv(queue_t *q)
641 {
642 mblk_t *mp;
643 struct tim_tim *tp;
644
645 ASSERT(q != NULL);
646
647 tp = (struct tim_tim *)q->q_ptr;
648 if (!tp)
649 return;
650
651 while ((mp = getq(q)) != NULL) {
652 if (timodrproc(q, mp)) {
653 /*
654 * timodrproc did a putbq - stop processing
655 * messages.
656 */
657 return;
658 }
659 }
660 }
661
662 /*
663 * Perform common processing when a T_CAPABILITY_ACK or T_INFO_ACK
664 * arrive. Set the queue properties and adjust the tim_flags according
665 * to the service type.
666 */
667 static void
timodprocessinfo(queue_t * q,struct tim_tim * tp,struct T_info_ack * tia)668 timodprocessinfo(queue_t *q, struct tim_tim *tp, struct T_info_ack *tia)
669 {
670 TILOG("timodprocessinfo: strqset(%d)\n", tia->TIDU_size);
671 (void) strqset(q, QMAXPSZ, 0, tia->TIDU_size);
672 (void) strqset(OTHERQ(q), QMAXPSZ, 0, tia->TIDU_size);
673
674 if ((tia->SERV_type == T_COTS) || (tia->SERV_type == T_COTS_ORD))
675 tp->tim_flags = (tp->tim_flags & ~CLTS) | COTS;
676 else if (tia->SERV_type == T_CLTS)
677 tp->tim_flags = (tp->tim_flags & ~COTS) | CLTS;
678 }
679
680 static int
timodrproc(queue_t * q,mblk_t * mp)681 timodrproc(queue_t *q, mblk_t *mp)
682 {
683 uint32_t auditing = AU_AUDITING();
684 union T_primitives *pptr;
685 struct tim_tim *tp;
686 struct iocblk *iocbp;
687 mblk_t *nbp;
688 size_t blen;
689
690 tp = (struct tim_tim *)q->q_ptr;
691
692 switch (mp->b_datap->db_type) {
693 default:
694 putnext(q, mp);
695 break;
696
697 case M_ERROR:
698 TILOG("timodrproc: Got M_ERROR, flags = %x\n", tp->tim_flags);
699 /*
700 * There is no specified standard response for driver when it
701 * receives unknown message type and M_ERROR is one
702 * possibility. If we send T_CAPABILITY_REQ down and transport
703 * provider responds with M_ERROR we assume that it doesn't
704 * understand this message type. This assumption may be
705 * sometimes incorrect (transport may reply with M_ERROR for
706 * some other reason) but there is no way for us to distinguish
707 * between different cases. In the worst case timod and everyone
708 * else sharing global transport description with it may end up
709 * emulating T_CAPABILITY_REQ.
710 */
711
712 /*
713 * Check that we are waiting for T_CAPABILITY_ACK and
714 * T_CAPABILITY_REQ is not implemented by transport or emulated
715 * by timod.
716 */
717 if ((tp->tim_provinfo->tpi_capability == PI_DONTKNOW) &&
718 ((tp->tim_flags & TI_CAP_RECVD) != 0)) {
719 /*
720 * Good chances that this transport doesn't provide
721 * T_CAPABILITY_REQ. Mark this information permanently
722 * for the module + transport combination.
723 */
724 PI_PROVLOCK(tp->tim_provinfo);
725 if (tp->tim_provinfo->tpi_capability == PI_DONTKNOW)
726 tp->tim_provinfo->tpi_capability = PI_NO;
727 PI_PROVUNLOCK(tp->tim_provinfo);
728 if (tp->tim_tcap_timoutid != 0) {
729 (void) quntimeout(q, tp->tim_tcap_timoutid);
730 tp->tim_tcap_timoutid = 0;
731 }
732 }
733 putnext(q, mp);
734 break;
735 case M_DATA:
736 if (!bcanputnext(q, mp->b_band)) {
737 (void) putbq(q, mp);
738 return (1);
739 }
740 putnext(q, mp);
741 break;
742
743 case M_PROTO:
744 case M_PCPROTO:
745 blen = MBLKL(mp);
746 if (blen < sizeof (t_scalar_t)) {
747 /*
748 * Note: it's not actually possible to get
749 * here with db_type M_PCPROTO, because
750 * timodrput has already checked MBLKL, and
751 * thus the assertion below. If the length
752 * was too short, then the message would have
753 * already been putnext'd, and would thus
754 * never appear here. Just the same, the code
755 * below handles the impossible case since
756 * it's easy to do and saves future
757 * maintainers from unfortunate accidents.
758 */
759 ASSERT(mp->b_datap->db_type == M_PROTO);
760 if (mp->b_datap->db_type == M_PROTO &&
761 !bcanputnext(q, mp->b_band)) {
762 (void) putbq(q, mp);
763 return (1);
764 }
765 putnext(q, mp);
766 break;
767 }
768
769 pptr = (union T_primitives *)mp->b_rptr;
770 switch (pptr->type) {
771 default:
772
773 if (auditing)
774 audit_sock(T_UNITDATA_IND, q, mp, TIMOD_ID);
775 putnext(q, mp);
776 break;
777
778 case T_ERROR_ACK:
779 /* Restore db_type - recover() might have changed it */
780 mp->b_datap->db_type = M_PCPROTO;
781 if (blen < sizeof (struct T_error_ack)) {
782 putnext(q, mp);
783 break;
784 }
785
786 tilog("timodrproc: Got T_ERROR_ACK, flags = %x\n",
787 tp->tim_flags);
788
789 if ((tp->tim_flags & WAIT_CONNRESACK) &&
790 tp->tim_saved_prim == pptr->error_ack.ERROR_prim) {
791 tp->tim_flags &=
792 ~(WAIT_CONNRESACK | WAITIOCACK);
793 freemsg(tp->tim_iocsave);
794 tp->tim_iocsave = NULL;
795 tp->tim_saved_prim = -1;
796 putnext(q, mp);
797 } else if (tp->tim_flags & WAITIOCACK) {
798 tim_send_ioc_error_ack(q, tp, mp);
799 } else {
800 putnext(q, mp);
801 }
802 break;
803
804 case T_OK_ACK:
805 if (blen < sizeof (pptr->ok_ack)) {
806 mp->b_datap->db_type = M_PCPROTO;
807 putnext(q, mp);
808 break;
809 }
810
811 tilog("timodrproc: Got T_OK_ACK\n", 0);
812
813 if (pptr->ok_ack.CORRECT_prim == T_UNBIND_REQ)
814 tp->tim_mylen = 0;
815
816 if ((tp->tim_flags & WAIT_CONNRESACK) &&
817 tp->tim_saved_prim == pptr->ok_ack.CORRECT_prim) {
818 struct T_conn_res *resp;
819 struct T_conn_ind *indp;
820 struct tim_tim *ntp;
821 caddr_t ptr;
822
823 rw_enter(&tim_list_rwlock, RW_READER);
824 resp = (struct T_conn_res *)
825 tp->tim_iocsave->b_rptr;
826 ntp = tim_findlink(resp->ACCEPTOR_id);
827 if (ntp == NULL)
828 goto cresackout;
829
830 mutex_enter(&ntp->tim_mutex);
831 if (ntp->tim_peercred != NULL)
832 crfree(ntp->tim_peercred);
833 ntp->tim_peercred =
834 msg_getcred(tp->tim_iocsave->b_cont,
835 &ntp->tim_cpid);
836 if (ntp->tim_peercred != NULL)
837 crhold(ntp->tim_peercred);
838
839 if (!(ntp->tim_flags & DO_PEERNAME)) {
840 mutex_exit(&ntp->tim_mutex);
841 goto cresackout;
842 }
843
844 indp = (struct T_conn_ind *)
845 tp->tim_iocsave->b_cont->b_rptr;
846 /* true as message is put on list */
847 ASSERT(indp->SRC_length >= 0);
848
849 if (indp->SRC_length > ntp->tim_peermaxlen) {
850 ptr = kmem_alloc(indp->SRC_length,
851 KM_NOSLEEP);
852 if (ptr == NULL) {
853 mutex_exit(&ntp->tim_mutex);
854 rw_exit(&tim_list_rwlock);
855 tilog("timodwproc: kmem_alloc "
856 "failed, attempting "
857 "recovery\n", 0);
858 tim_recover(q, mp,
859 indp->SRC_length);
860 return (1);
861 }
862 if (ntp->tim_peermaxlen > 0)
863 kmem_free(ntp->tim_peername,
864 ntp->tim_peermaxlen);
865 ntp->tim_peername = ptr;
866 ntp->tim_peermaxlen = indp->SRC_length;
867 }
868 ntp->tim_peerlen = indp->SRC_length;
869 ptr = (caddr_t)indp + indp->SRC_offset;
870 bcopy(ptr, ntp->tim_peername, ntp->tim_peerlen);
871
872 mutex_exit(&ntp->tim_mutex);
873
874 cresackout:
875 rw_exit(&tim_list_rwlock);
876 tp->tim_flags &=
877 ~(WAIT_CONNRESACK | WAITIOCACK);
878 freemsg(tp->tim_iocsave);
879 tp->tim_iocsave = NULL;
880 tp->tim_saved_prim = -1;
881 }
882
883 tim_send_reply(q, mp, tp, pptr->ok_ack.CORRECT_prim);
884 break;
885
886 case T_BIND_ACK: {
887 struct T_bind_ack *ackp =
888 (struct T_bind_ack *)mp->b_rptr;
889
890 /* Restore db_type - recover() might have changed it */
891 mp->b_datap->db_type = M_PCPROTO;
892 if (blen < sizeof (*ackp)) {
893 putnext(q, mp);
894 break;
895 }
896
897 /* save negotiated backlog */
898 tp->tim_backlog = ackp->CONIND_number;
899
900 if (((tp->tim_flags & WAITIOCACK) == 0) ||
901 ((tp->tim_saved_prim != O_T_BIND_REQ) &&
902 (tp->tim_saved_prim != T_BIND_REQ))) {
903 putnext(q, mp);
904 break;
905 }
906 ASSERT(tp->tim_iocsave != NULL);
907
908 if (tp->tim_flags & DO_MYNAME) {
909 caddr_t p;
910
911 if (ackp->ADDR_length < 0 ||
912 mp->b_rptr + ackp->ADDR_offset +
913 ackp->ADDR_length > mp->b_wptr) {
914 putnext(q, mp);
915 break;
916 }
917 if (ackp->ADDR_length > tp->tim_mymaxlen) {
918 p = kmem_alloc(ackp->ADDR_length,
919 KM_NOSLEEP);
920 if (p == NULL) {
921 tilog("timodrproc: kmem_alloc "
922 "failed attempt recovery",
923 0);
924
925 tim_recover(q, mp,
926 ackp->ADDR_length);
927 return (1);
928 }
929 ASSERT(tp->tim_mymaxlen >= 0);
930 if (tp->tim_mymaxlen != NULL) {
931 kmem_free(tp->tim_myname,
932 tp->tim_mymaxlen);
933 }
934 tp->tim_myname = p;
935 tp->tim_mymaxlen = ackp->ADDR_length;
936 }
937 tp->tim_mylen = ackp->ADDR_length;
938 bcopy(mp->b_rptr + ackp->ADDR_offset,
939 tp->tim_myname, tp->tim_mylen);
940 }
941 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
942 tp->tim_iocsave = NULL;
943 tp->tim_saved_prim = -1;
944 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
945 TI_CAP_RECVD | CAP_WANTS_INFO);
946 break;
947 }
948
949 case T_OPTMGMT_ACK:
950
951 tilog("timodrproc: Got T_OPTMGMT_ACK\n", 0);
952
953 /* Restore db_type - recover() might have change it */
954 mp->b_datap->db_type = M_PCPROTO;
955
956 if (((tp->tim_flags & WAITIOCACK) == 0) ||
957 ((tp->tim_saved_prim != T_SVR4_OPTMGMT_REQ) &&
958 (tp->tim_saved_prim != T_OPTMGMT_REQ))) {
959 putnext(q, mp);
960 } else {
961 ASSERT(tp->tim_iocsave != NULL);
962 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
963 tp->tim_iocsave = NULL;
964 tp->tim_saved_prim = -1;
965 tp->tim_flags &= ~(WAITIOCACK |
966 WAIT_IOCINFOACK | TI_CAP_RECVD |
967 CAP_WANTS_INFO);
968 }
969 break;
970
971 case T_INFO_ACK: {
972 struct T_info_ack *tia = (struct T_info_ack *)pptr;
973
974 /* Restore db_type - recover() might have changed it */
975 mp->b_datap->db_type = M_PCPROTO;
976
977 if (blen < sizeof (*tia)) {
978 putnext(q, mp);
979 break;
980 }
981
982 tilog("timodrproc: Got T_INFO_ACK, flags = %x\n",
983 tp->tim_flags);
984
985 timodprocessinfo(q, tp, tia);
986
987 TILOG("timodrproc: flags = %x\n", tp->tim_flags);
988 if ((tp->tim_flags & WAITIOCACK) != 0) {
989 size_t expected_ack_size;
990 ssize_t deficit;
991 int ioc_cmd;
992 struct T_capability_ack *tcap;
993
994 /*
995 * The only case when T_INFO_ACK may be received back
996 * when we are waiting for ioctl to complete is when
997 * this ioctl sent T_INFO_REQ down.
998 */
999 if (!(tp->tim_flags & WAIT_IOCINFOACK)) {
1000 putnext(q, mp);
1001 break;
1002 }
1003 ASSERT(tp->tim_iocsave != NULL);
1004
1005 iocbp = (struct iocblk *)tp->tim_iocsave->b_rptr;
1006 ioc_cmd = iocbp->ioc_cmd;
1007
1008 /*
1009 * Was it sent from TI_CAPABILITY emulation?
1010 */
1011 if (ioc_cmd == TI_CAPABILITY) {
1012 struct T_info_ack saved_info;
1013
1014 /*
1015 * Perform sanity checks. The only case when we
1016 * send T_INFO_REQ from TI_CAPABILITY is when
1017 * timod emulates T_CAPABILITY_REQ and CAP_bits1
1018 * has TC1_INFO set.
1019 */
1020 if ((tp->tim_flags &
1021 (TI_CAP_RECVD | CAP_WANTS_INFO)) !=
1022 (TI_CAP_RECVD | CAP_WANTS_INFO)) {
1023 putnext(q, mp);
1024 break;
1025 }
1026
1027 TILOG("timodrproc: emulating TI_CAPABILITY/"
1028 "info\n", 0);
1029
1030 /* Save info & reuse mp for T_CAPABILITY_ACK */
1031 saved_info = *tia;
1032
1033 mp = tpi_ack_alloc(mp,
1034 sizeof (struct T_capability_ack),
1035 M_PCPROTO, T_CAPABILITY_ACK);
1036
1037 if (mp == NULL) {
1038 tilog("timodrproc: realloc failed, "
1039 "no recovery attempted\n", 0);
1040 return (1);
1041 }
1042
1043 /*
1044 * Copy T_INFO information into T_CAPABILITY_ACK
1045 */
1046 tcap = (struct T_capability_ack *)mp->b_rptr;
1047 tcap->CAP_bits1 = TC1_INFO;
1048 tcap->INFO_ack = saved_info;
1049 tp->tim_flags &= ~(WAITIOCACK |
1050 WAIT_IOCINFOACK | TI_CAP_RECVD |
1051 CAP_WANTS_INFO);
1052 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
1053 tp->tim_iocsave = NULL;
1054 tp->tim_saved_prim = -1;
1055 break;
1056 }
1057
1058 /*
1059 * The code for TI_SYNC/TI_GETINFO is left here only for
1060 * backward compatibility with staticaly linked old
1061 * applications. New TLI/XTI code should use
1062 * TI_CAPABILITY for getting transport info and should
1063 * not use TI_GETINFO/TI_SYNC for this purpose.
1064 */
1065
1066 /*
1067 * make sure the message sent back is the size of
1068 * the "expected ack"
1069 * For TI_GETINFO, expected ack size is
1070 * sizeof (T_info_ack)
1071 * For TI_SYNC, expected ack size is
1072 * sizeof (struct ti_sync_ack);
1073 */
1074 if (ioc_cmd != TI_GETINFO && ioc_cmd != TI_SYNC) {
1075 putnext(q, mp);
1076 break;
1077 }
1078
1079 expected_ack_size =
1080 sizeof (struct T_info_ack); /* TI_GETINFO */
1081 if (iocbp->ioc_cmd == TI_SYNC) {
1082 expected_ack_size = 2 * sizeof (uint32_t) +
1083 sizeof (struct ti_sync_ack);
1084 }
1085 deficit = expected_ack_size - blen;
1086
1087 if (deficit != 0) {
1088 if (mp->b_datap->db_lim - mp->b_wptr <
1089 deficit) {
1090 mblk_t *tmp = allocb(expected_ack_size,
1091 BPRI_HI);
1092 if (tmp == NULL) {
1093 ASSERT(MBLKSIZE(mp) >=
1094 sizeof (struct T_error_ack));
1095
1096 tilog("timodrproc: allocb failed no "
1097 "recovery attempt\n", 0);
1098
1099 mp->b_rptr = mp->b_datap->db_base;
1100 pptr = (union T_primitives *)
1101 mp->b_rptr;
1102 pptr->error_ack.ERROR_prim = T_INFO_REQ;
1103 pptr->error_ack.TLI_error = TSYSERR;
1104 pptr->error_ack.UNIX_error = EAGAIN;
1105 pptr->error_ack.PRIM_type = T_ERROR_ACK;
1106 mp->b_datap->db_type = M_PCPROTO;
1107 tim_send_ioc_error_ack(q, tp, mp);
1108 break;
1109 } else {
1110 bcopy(mp->b_rptr, tmp->b_rptr, blen);
1111 tmp->b_wptr += blen;
1112 pptr = (union T_primitives *)
1113 tmp->b_rptr;
1114 freemsg(mp);
1115 mp = tmp;
1116 }
1117 }
1118 }
1119 /*
1120 * We now have "mp" which has enough space for an
1121 * appropriate ack and contains struct T_info_ack
1122 * that the transport provider returned. We now
1123 * stuff it with more stuff to fullfill
1124 * TI_SYNC ioctl needs, as necessary
1125 */
1126 if (iocbp->ioc_cmd == TI_SYNC) {
1127 /*
1128 * Assumes struct T_info_ack is first embedded
1129 * type in struct ti_sync_ack so it is
1130 * automatically there.
1131 */
1132 struct ti_sync_ack *tsap =
1133 (struct ti_sync_ack *)mp->b_rptr;
1134
1135 /*
1136 * tsap->tsa_qlen needs to be set only if
1137 * TSRF_QLEN_REQ flag is set, but for
1138 * compatibility with statically linked
1139 * applications it is set here regardless of the
1140 * flag since old XTI library expected it to be
1141 * set.
1142 */
1143 tsap->tsa_qlen = tp->tim_backlog;
1144 tsap->tsa_flags = 0x0; /* intialize clear */
1145 if (tp->tim_flags & PEEK_RDQ_EXPIND) {
1146 /*
1147 * Request to peek for EXPIND in
1148 * rcvbuf.
1149 */
1150 if (ti_expind_on_rdqueues(q)) {
1151 /*
1152 * Expedited data is
1153 * queued on the stream
1154 * read side
1155 */
1156 tsap->tsa_flags |=
1157 TSAF_EXP_QUEUED;
1158 }
1159 tp->tim_flags &=
1160 ~PEEK_RDQ_EXPIND;
1161 }
1162 mp->b_wptr += 2*sizeof (uint32_t);
1163 }
1164 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
1165 tp->tim_iocsave = NULL;
1166 tp->tim_saved_prim = -1;
1167 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
1168 TI_CAP_RECVD | CAP_WANTS_INFO);
1169 break;
1170 }
1171 }
1172
1173 putnext(q, mp);
1174 break;
1175
1176 case T_ADDR_ACK:
1177 tilog("timodrproc: Got T_ADDR_ACK\n", 0);
1178 tim_send_reply(q, mp, tp, T_ADDR_REQ);
1179 break;
1180
1181 case T_CONN_IND: {
1182 struct T_conn_ind *tcip =
1183 (struct T_conn_ind *)mp->b_rptr;
1184
1185 tilog("timodrproc: Got T_CONN_IND\n", 0);
1186
1187 if (blen >= sizeof (*tcip) &&
1188 MBLKIN(mp, tcip->SRC_offset, tcip->SRC_length)) {
1189 if (((nbp = dupmsg(mp)) != NULL) ||
1190 ((nbp = copymsg(mp)) != NULL)) {
1191 nbp->b_next = tp->tim_consave;
1192 tp->tim_consave = nbp;
1193 } else {
1194 tim_recover(q, mp,
1195 (t_scalar_t)sizeof (mblk_t));
1196 return (1);
1197 }
1198 }
1199 if (auditing)
1200 audit_sock(T_CONN_IND, q, mp, TIMOD_ID);
1201 putnext(q, mp);
1202 break;
1203 }
1204
1205 case T_CONN_CON:
1206 mutex_enter(&tp->tim_mutex);
1207 if (tp->tim_peercred != NULL)
1208 crfree(tp->tim_peercred);
1209 tp->tim_peercred = msg_getcred(mp, &tp->tim_cpid);
1210 if (tp->tim_peercred != NULL)
1211 crhold(tp->tim_peercred);
1212 mutex_exit(&tp->tim_mutex);
1213
1214 tilog("timodrproc: Got T_CONN_CON\n", 0);
1215
1216 tp->tim_flags &= ~CONNWAIT;
1217 putnext(q, mp);
1218 break;
1219
1220 case T_DISCON_IND: {
1221 struct T_discon_ind *disp;
1222 struct T_conn_ind *conp;
1223 mblk_t *pbp = NULL;
1224
1225 if (q->q_first != 0)
1226 tilog("timodrput: T_DISCON_IND - flow control\n", 0);
1227
1228 if (blen < sizeof (*disp)) {
1229 putnext(q, mp);
1230 break;
1231 }
1232
1233 disp = (struct T_discon_ind *)mp->b_rptr;
1234
1235 tilog("timodrproc: Got T_DISCON_IND Reason: %d\n",
1236 disp->DISCON_reason);
1237
1238 tp->tim_flags &= ~(CONNWAIT|LOCORDREL|REMORDREL);
1239 tim_clear_peer(tp);
1240 for (nbp = tp->tim_consave; nbp; nbp = nbp->b_next) {
1241 conp = (struct T_conn_ind *)nbp->b_rptr;
1242 if (conp->SEQ_number == disp->SEQ_number)
1243 break;
1244 pbp = nbp;
1245 }
1246 if (nbp) {
1247 if (pbp)
1248 pbp->b_next = nbp->b_next;
1249 else
1250 tp->tim_consave = nbp->b_next;
1251 nbp->b_next = NULL;
1252 freemsg(nbp);
1253 }
1254 putnext(q, mp);
1255 break;
1256 }
1257
1258 case T_ORDREL_IND:
1259
1260 tilog("timodrproc: Got T_ORDREL_IND\n", 0);
1261
1262 if (tp->tim_flags & LOCORDREL) {
1263 tp->tim_flags &= ~(LOCORDREL|REMORDREL);
1264 tim_clear_peer(tp);
1265 } else {
1266 tp->tim_flags |= REMORDREL;
1267 }
1268 putnext(q, mp);
1269 break;
1270
1271 case T_EXDATA_IND:
1272 case T_DATA_IND:
1273 case T_UNITDATA_IND:
1274 if (pptr->type == T_EXDATA_IND)
1275 tilog("timodrproc: Got T_EXDATA_IND\n", 0);
1276
1277 if (!bcanputnext(q, mp->b_band)) {
1278 (void) putbq(q, mp);
1279 return (1);
1280 }
1281 putnext(q, mp);
1282 break;
1283
1284 case T_CAPABILITY_ACK: {
1285 struct T_capability_ack *tca;
1286
1287 if (blen < sizeof (*tca)) {
1288 putnext(q, mp);
1289 break;
1290 }
1291
1292 /* This transport supports T_CAPABILITY_REQ */
1293 tilog("timodrproc: Got T_CAPABILITY_ACK\n", 0);
1294
1295 PI_PROVLOCK(tp->tim_provinfo);
1296 if (tp->tim_provinfo->tpi_capability != PI_YES)
1297 tp->tim_provinfo->tpi_capability = PI_YES;
1298 PI_PROVUNLOCK(tp->tim_provinfo);
1299
1300 /* Reset possible pending timeout */
1301 if (tp->tim_tcap_timoutid != 0) {
1302 (void) quntimeout(q, tp->tim_tcap_timoutid);
1303 tp->tim_tcap_timoutid = 0;
1304 }
1305
1306 tca = (struct T_capability_ack *)mp->b_rptr;
1307
1308 if (tca->CAP_bits1 & TC1_INFO)
1309 timodprocessinfo(q, tp, &tca->INFO_ack);
1310
1311 tim_send_reply(q, mp, tp, T_CAPABILITY_REQ);
1312 }
1313 break;
1314 }
1315 break;
1316
1317 case M_FLUSH:
1318
1319 tilog("timodrproc: Got M_FLUSH\n", 0);
1320
1321 if (*mp->b_rptr & FLUSHR) {
1322 if (*mp->b_rptr & FLUSHBAND)
1323 flushband(q, *(mp->b_rptr + 1), FLUSHDATA);
1324 else
1325 flushq(q, FLUSHDATA);
1326 }
1327 putnext(q, mp);
1328 break;
1329
1330 case M_IOCACK:
1331 iocbp = (struct iocblk *)mp->b_rptr;
1332
1333 tilog("timodrproc: Got M_IOCACK\n", 0);
1334
1335 if (iocbp->ioc_cmd == TI_GETMYNAME) {
1336
1337 /*
1338 * Transport provider supports this ioctl,
1339 * so I don't have to.
1340 */
1341 if ((tp->tim_flags & DO_MYNAME) != 0) {
1342 tp->tim_flags &= ~DO_MYNAME;
1343 PI_PROVLOCK(tp->tim_provinfo);
1344 tp->tim_provinfo->tpi_myname = PI_YES;
1345 PI_PROVUNLOCK(tp->tim_provinfo);
1346 }
1347
1348 ASSERT(tp->tim_mymaxlen >= 0);
1349 if (tp->tim_mymaxlen != 0) {
1350 kmem_free(tp->tim_myname, (size_t)tp->tim_mymaxlen);
1351 tp->tim_myname = NULL;
1352 tp->tim_mymaxlen = 0;
1353 }
1354 /* tim_iocsave may already be overwritten. */
1355 if (tp->tim_saved_prim == -1) {
1356 freemsg(tp->tim_iocsave);
1357 tp->tim_iocsave = NULL;
1358 }
1359 } else if (iocbp->ioc_cmd == TI_GETPEERNAME) {
1360 boolean_t clearit;
1361
1362 /*
1363 * Transport provider supports this ioctl,
1364 * so I don't have to.
1365 */
1366 if ((tp->tim_flags & DO_PEERNAME) != 0) {
1367 tp->tim_flags &= ~DO_PEERNAME;
1368 PI_PROVLOCK(tp->tim_provinfo);
1369 tp->tim_provinfo->tpi_peername = PI_YES;
1370 PI_PROVUNLOCK(tp->tim_provinfo);
1371 }
1372
1373 mutex_enter(&tp->tim_mutex);
1374 ASSERT(tp->tim_peermaxlen >= 0);
1375 clearit = tp->tim_peermaxlen != 0;
1376 if (clearit) {
1377 kmem_free(tp->tim_peername, tp->tim_peermaxlen);
1378 tp->tim_peername = NULL;
1379 tp->tim_peermaxlen = 0;
1380 tp->tim_peerlen = 0;
1381 }
1382 mutex_exit(&tp->tim_mutex);
1383 if (clearit) {
1384 mblk_t *bp;
1385
1386 bp = tp->tim_consave;
1387 while (bp != NULL) {
1388 nbp = bp->b_next;
1389 bp->b_next = NULL;
1390 freemsg(bp);
1391 bp = nbp;
1392 }
1393 tp->tim_consave = NULL;
1394 }
1395 /* tim_iocsave may already be overwritten. */
1396 if (tp->tim_saved_prim == -1) {
1397 freemsg(tp->tim_iocsave);
1398 tp->tim_iocsave = NULL;
1399 }
1400 }
1401 putnext(q, mp);
1402 break;
1403
1404 case M_IOCNAK:
1405
1406 tilog("timodrproc: Got M_IOCNAK\n", 0);
1407
1408 iocbp = (struct iocblk *)mp->b_rptr;
1409 if (((iocbp->ioc_cmd == TI_GETMYNAME) ||
1410 (iocbp->ioc_cmd == TI_GETPEERNAME)) &&
1411 ((iocbp->ioc_error == EINVAL) || (iocbp->ioc_error == 0))) {
1412 PI_PROVLOCK(tp->tim_provinfo);
1413 if (iocbp->ioc_cmd == TI_GETMYNAME) {
1414 if (tp->tim_provinfo->tpi_myname == PI_DONTKNOW)
1415 tp->tim_provinfo->tpi_myname = PI_NO;
1416 } else if (iocbp->ioc_cmd == TI_GETPEERNAME) {
1417 if (tp->tim_provinfo->tpi_peername == PI_DONTKNOW)
1418 tp->tim_provinfo->tpi_peername = PI_NO;
1419 }
1420 PI_PROVUNLOCK(tp->tim_provinfo);
1421 /* tim_iocsave may already be overwritten. */
1422 if ((tp->tim_iocsave != NULL) &&
1423 (tp->tim_saved_prim == -1)) {
1424 freemsg(mp);
1425 mp = tp->tim_iocsave;
1426 tp->tim_iocsave = NULL;
1427 tp->tim_flags |= NAMEPROC;
1428 if (ti_doname(WR(q), mp) != DONAME_CONT) {
1429 tp->tim_flags &= ~NAMEPROC;
1430 }
1431 break;
1432 }
1433 }
1434 putnext(q, mp);
1435 break;
1436 }
1437
1438 return (0);
1439 }
1440
1441 /*
1442 * timodwput - Module write put procedure. This is called from
1443 * the module, driver, or stream head upstream/downstream.
1444 * Handles M_FLUSH, M_DATA and some M_PROTO (T_DATA_REQ,
1445 * and T_UNITDATA_REQ) messages. All others are queued to
1446 * be handled by the service procedures.
1447 */
1448
1449 static void
timodwput(queue_t * q,mblk_t * mp)1450 timodwput(queue_t *q, mblk_t *mp)
1451 {
1452 union T_primitives *pptr;
1453 struct tim_tim *tp;
1454 struct iocblk *iocbp;
1455
1456 /*
1457 * Enqueue normal-priority messages if our queue already
1458 * holds some messages for deferred processing but don't
1459 * enqueue those M_IOCTLs which will result in an
1460 * M_PCPROTO (ie, high priority) message being created.
1461 */
1462 if (q->q_first != 0 && mp->b_datap->db_type < QPCTL) {
1463 if (mp->b_datap->db_type == M_IOCTL) {
1464 iocbp = (struct iocblk *)mp->b_rptr;
1465 switch (iocbp->ioc_cmd) {
1466 default:
1467 (void) putq(q, mp);
1468 return;
1469
1470 case TI_GETINFO:
1471 case TI_SYNC:
1472 case TI_CAPABILITY:
1473 break;
1474 }
1475 } else {
1476 (void) putq(q, mp);
1477 return;
1478 }
1479 }
1480 /*
1481 * Inline processing of data (to avoid additional procedure call).
1482 * Rest is handled in timodwproc.
1483 */
1484
1485 switch (mp->b_datap->db_type) {
1486 case M_DATA:
1487 tp = (struct tim_tim *)q->q_ptr;
1488 ASSERT(tp);
1489 if (tp->tim_flags & CLTS) {
1490 mblk_t *tmp;
1491
1492 if ((tmp = tim_filladdr(q, mp, B_FALSE)) == NULL) {
1493 (void) putq(q, mp);
1494 break;
1495 } else {
1496 mp = tmp;
1497 }
1498 }
1499 if (bcanputnext(q, mp->b_band))
1500 putnext(q, mp);
1501 else
1502 (void) putq(q, mp);
1503 break;
1504 case M_PROTO:
1505 case M_PCPROTO:
1506 pptr = (union T_primitives *)mp->b_rptr;
1507 switch (pptr->type) {
1508 case T_UNITDATA_REQ:
1509 tp = (struct tim_tim *)q->q_ptr;
1510 ASSERT(tp);
1511 if (tp->tim_flags & CLTS) {
1512 mblk_t *tmp;
1513
1514 tmp = tim_filladdr(q, mp, B_FALSE);
1515 if (tmp == NULL) {
1516 (void) putq(q, mp);
1517 break;
1518 } else {
1519 mp = tmp;
1520 }
1521 }
1522 if (bcanputnext(q, mp->b_band))
1523 putnext(q, mp);
1524 else
1525 (void) putq(q, mp);
1526 break;
1527
1528 case T_DATA_REQ:
1529 case T_EXDATA_REQ:
1530 if (bcanputnext(q, mp->b_band))
1531 putnext(q, mp);
1532 else
1533 (void) putq(q, mp);
1534 break;
1535 default:
1536 (void) timodwproc(q, mp);
1537 break;
1538 }
1539 break;
1540 default:
1541 (void) timodwproc(q, mp);
1542 break;
1543 }
1544 }
1545 /*
1546 * timodwsrv - Module write queue service procedure.
1547 * This is called when messages are placed on an empty queue,
1548 * when high priority messages are placed on the queue, and
1549 * when flow control restrictions subside. This code used to
1550 * be included in a put procedure, but it was moved to a
1551 * service procedure because several points were added where
1552 * memory allocation could fail, and there is no reasonable
1553 * recovery mechanism from the put procedure.
1554 */
1555 static void
timodwsrv(queue_t * q)1556 timodwsrv(queue_t *q)
1557 {
1558 mblk_t *mp;
1559
1560 ASSERT(q != NULL);
1561 if (q->q_ptr == NULL)
1562 return;
1563
1564 while ((mp = getq(q)) != NULL) {
1565 if (timodwproc(q, mp)) {
1566 /*
1567 * timodwproc did a putbq - stop processing
1568 * messages.
1569 */
1570 return;
1571 }
1572 }
1573 }
1574
1575 /*
1576 * Common routine to process write side messages
1577 */
1578
1579 static int
timodwproc(queue_t * q,mblk_t * mp)1580 timodwproc(queue_t *q, mblk_t *mp)
1581 {
1582 union T_primitives *pptr;
1583 struct tim_tim *tp;
1584 uint32_t auditing = AU_AUDITING();
1585 mblk_t *tmp;
1586 struct iocblk *iocbp;
1587 int error;
1588
1589 tp = (struct tim_tim *)q->q_ptr;
1590
1591 switch (mp->b_datap->db_type) {
1592 default:
1593 putnext(q, mp);
1594 break;
1595
1596 case M_DATA:
1597 if (tp->tim_flags & CLTS) {
1598 if ((tmp = tim_filladdr(q, mp, B_TRUE)) == NULL) {
1599 return (1);
1600 } else {
1601 mp = tmp;
1602 }
1603 }
1604 if (!bcanputnext(q, mp->b_band)) {
1605 (void) putbq(q, mp);
1606 return (1);
1607 }
1608 putnext(q, mp);
1609 break;
1610
1611 case M_IOCTL:
1612
1613 iocbp = (struct iocblk *)mp->b_rptr;
1614 TILOG("timodwproc: Got M_IOCTL(%d)\n", iocbp->ioc_cmd);
1615
1616 ASSERT(MBLKL(mp) == sizeof (struct iocblk));
1617
1618 /*
1619 * TPI requires we await response to a previously sent message
1620 * before handling another, put it back on the head of queue.
1621 * Since putbq() may see QWANTR unset when called from the
1622 * service procedure, the queue must be explicitly scheduled
1623 * for service, as no backenable will occur for this case.
1624 * tim_ioctl_retry() sets a timer to handle the qenable.
1625 */
1626 if (tp->tim_flags & WAITIOCACK) {
1627 TILOG("timodwproc: putbq M_IOCTL(%d)\n",
1628 iocbp->ioc_cmd);
1629 (void) putbq(q, mp);
1630 /* Called from timodwsrv() and messages on queue */
1631 if (!(q->q_flag & QWANTR))
1632 tim_ioctl_retry(q);
1633 return (1);
1634 }
1635
1636 switch (iocbp->ioc_cmd) {
1637 default:
1638 putnext(q, mp);
1639 break;
1640
1641 case _I_GETPEERCRED:
1642 if ((tp->tim_flags & COTS) == 0) {
1643 miocnak(q, mp, 0, ENOTSUP);
1644 } else {
1645 mblk_t *cmp = mp->b_cont;
1646 k_peercred_t *kp = NULL;
1647
1648 mutex_enter(&tp->tim_mutex);
1649 if (cmp != NULL &&
1650 iocbp->ioc_flag == IOC_NATIVE &&
1651 (tp->tim_flags &
1652 (CONNWAIT|LOCORDREL|REMORDREL)) == 0 &&
1653 tp->tim_peercred != NULL &&
1654 DB_TYPE(cmp) == M_DATA &&
1655 MBLKL(cmp) == sizeof (k_peercred_t)) {
1656 kp = (k_peercred_t *)cmp->b_rptr;
1657 crhold(kp->pc_cr = tp->tim_peercred);
1658 kp->pc_cpid = tp->tim_cpid;
1659 }
1660 mutex_exit(&tp->tim_mutex);
1661 if (kp != NULL)
1662 miocack(q, mp, sizeof (*kp), 0);
1663 else
1664 miocnak(q, mp, 0, ENOTCONN);
1665 }
1666 break;
1667 case TI_BIND:
1668 case TI_UNBIND:
1669 case TI_OPTMGMT:
1670 case TI_GETADDRS:
1671 TILOG("timodwproc: TI_{BIND|UNBIND|OPTMGMT|GETADDRS}"
1672 "\n", 0);
1673
1674 /*
1675 * We know that tim_send_ioctl_tpi_msg() is only
1676 * going to examine the `type' field, so we only
1677 * check that we can access that much data.
1678 */
1679 error = miocpullup(mp, sizeof (t_scalar_t));
1680 if (error != 0) {
1681 miocnak(q, mp, 0, error);
1682 break;
1683 }
1684 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1685 break;
1686
1687 case TI_GETINFO:
1688 TILOG("timodwproc: TI_GETINFO\n", 0);
1689 error = miocpullup(mp, sizeof (struct T_info_req));
1690 if (error != 0) {
1691 miocnak(q, mp, 0, error);
1692 break;
1693 }
1694 tp->tim_flags |= WAIT_IOCINFOACK;
1695 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1696 break;
1697
1698 case TI_SYNC: {
1699 mblk_t *tsr_mp;
1700 struct ti_sync_req *tsr;
1701 uint32_t tsr_flags;
1702
1703 error = miocpullup(mp, sizeof (struct ti_sync_req));
1704 if (error != 0) {
1705 miocnak(q, mp, 0, error);
1706 break;
1707 }
1708
1709 tsr_mp = mp->b_cont;
1710 tsr = (struct ti_sync_req *)tsr_mp->b_rptr;
1711 TILOG("timodwproc: TI_SYNC(%x)\n", tsr->tsr_flags);
1712
1713 /*
1714 * Save out the value of tsr_flags, in case we
1715 * reallocb() tsr_mp (below).
1716 */
1717 tsr_flags = tsr->tsr_flags;
1718 if ((tsr_flags & TSRF_INFO_REQ) == 0) {
1719 mblk_t *ack_mp = reallocb(tsr_mp,
1720 sizeof (struct ti_sync_ack), 0);
1721
1722 /* Can reply immediately. */
1723 mp->b_cont = NULL;
1724 if (ack_mp == NULL) {
1725 tilog("timodwproc: allocb failed no "
1726 "recovery attempt\n", 0);
1727 freemsg(tsr_mp);
1728 miocnak(q, mp, 0, ENOMEM);
1729 } else {
1730 tim_answer_ti_sync(q, mp, tp,
1731 ack_mp, tsr_flags);
1732 }
1733 break;
1734 }
1735
1736 /*
1737 * This code is retained for compatibility with
1738 * old statically linked applications. New code
1739 * should use TI_CAPABILITY for all TPI
1740 * information and should not use TSRF_INFO_REQ
1741 * flag.
1742 *
1743 * defer processsing necessary to rput procedure
1744 * as we need to get information from transport
1745 * driver. Set flags that will tell the read
1746 * side the work needed on this request.
1747 */
1748
1749 if (tsr_flags & TSRF_IS_EXP_IN_RCVBUF)
1750 tp->tim_flags |= PEEK_RDQ_EXPIND;
1751
1752 /*
1753 * Convert message to a T_INFO_REQ message; relies
1754 * on sizeof (struct ti_sync_req) >= sizeof (struct
1755 * T_info_req)).
1756 */
1757 ASSERT(MBLKL(tsr_mp) >= sizeof (struct T_info_req));
1758
1759 ((struct T_info_req *)tsr_mp->b_rptr)->PRIM_type =
1760 T_INFO_REQ;
1761 tsr_mp->b_wptr = tsr_mp->b_rptr +
1762 sizeof (struct T_info_req);
1763 tp->tim_flags |= WAIT_IOCINFOACK;
1764 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1765 }
1766 break;
1767
1768 case TI_CAPABILITY: {
1769 mblk_t *tcsr_mp;
1770 struct T_capability_req *tcr;
1771
1772 error = miocpullup(mp, sizeof (*tcr));
1773 if (error != 0) {
1774 miocnak(q, mp, 0, error);
1775 break;
1776 }
1777
1778 tcsr_mp = mp->b_cont;
1779 tcr = (struct T_capability_req *)tcsr_mp->b_rptr;
1780 TILOG("timodwproc: TI_CAPABILITY(CAP_bits1 = %x)\n",
1781 tcr->CAP_bits1);
1782
1783 if (tcr->PRIM_type != T_CAPABILITY_REQ) {
1784 TILOG("timodwproc: invalid msg type %d\n",
1785 tcr->PRIM_type);
1786 miocnak(q, mp, 0, EPROTO);
1787 break;
1788 }
1789
1790 switch (tp->tim_provinfo->tpi_capability) {
1791 case PI_YES:
1792 /* Just send T_CAPABILITY_REQ down */
1793 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1794 break;
1795
1796 case PI_DONTKNOW:
1797 /*
1798 * It is unknown yet whether transport provides
1799 * T_CAPABILITY_REQ or not. Send message down
1800 * and wait for reply.
1801 */
1802
1803 ASSERT(tp->tim_tcap_timoutid == 0);
1804 if ((tcr->CAP_bits1 & TC1_INFO) == 0) {
1805 tp->tim_flags |= TI_CAP_RECVD;
1806 } else {
1807 tp->tim_flags |= (TI_CAP_RECVD |
1808 CAP_WANTS_INFO);
1809 }
1810
1811 tp->tim_tcap_timoutid = qtimeout(q,
1812 tim_tcap_timer, q, tim_tcap_wait * hz);
1813 tim_send_ioctl_tpi_msg(q, mp, tp, iocbp);
1814 break;
1815
1816 case PI_NO:
1817 /*
1818 * Transport doesn't support T_CAPABILITY_REQ.
1819 * Either reply immediately or send T_INFO_REQ
1820 * if needed.
1821 */
1822 if ((tcr->CAP_bits1 & TC1_INFO) != 0) {
1823 tp->tim_flags |= (TI_CAP_RECVD |
1824 CAP_WANTS_INFO | WAIT_IOCINFOACK);
1825 TILOG("timodwproc: sending down "
1826 "T_INFO_REQ, flags = %x\n",
1827 tp->tim_flags);
1828
1829 /*
1830 * Generate T_INFO_REQ message and send
1831 * it down
1832 */
1833 ((struct T_info_req *)tcsr_mp->b_rptr)->
1834 PRIM_type = T_INFO_REQ;
1835 tcsr_mp->b_wptr = tcsr_mp->b_rptr +
1836 sizeof (struct T_info_req);
1837 tim_send_ioctl_tpi_msg(q, mp, tp,
1838 iocbp);
1839 break;
1840 }
1841
1842
1843 /*
1844 * Can reply immediately. Just send back
1845 * T_CAPABILITY_ACK with CAP_bits1 set to 0.
1846 */
1847 mp->b_cont = tcsr_mp = tpi_ack_alloc(mp->b_cont,
1848 sizeof (struct T_capability_ack), M_PCPROTO,
1849 T_CAPABILITY_ACK);
1850
1851 if (tcsr_mp == NULL) {
1852 tilog("timodwproc: allocb failed no "
1853 "recovery attempt\n", 0);
1854 miocnak(q, mp, 0, ENOMEM);
1855 break;
1856 }
1857
1858 tp->tim_flags &= ~(WAITIOCACK | TI_CAP_RECVD |
1859 WAIT_IOCINFOACK | CAP_WANTS_INFO);
1860 ((struct T_capability_ack *)
1861 tcsr_mp->b_rptr)->CAP_bits1 = 0;
1862 tim_ioctl_send_reply(q, mp, tcsr_mp);
1863
1864 /*
1865 * It could happen when timod is awaiting ack
1866 * for TI_GETPEERNAME/TI_GETMYNAME.
1867 */
1868 if (tp->tim_iocsave != NULL) {
1869 freemsg(tp->tim_iocsave);
1870 tp->tim_iocsave = NULL;
1871 tp->tim_saved_prim = -1;
1872 }
1873 break;
1874
1875 default:
1876 cmn_err(CE_PANIC,
1877 "timodwproc: unknown tpi_capability value "
1878 "%d\n", tp->tim_provinfo->tpi_capability);
1879 break;
1880 }
1881 }
1882 break;
1883
1884 case TI_GETMYNAME:
1885
1886 tilog("timodwproc: Got TI_GETMYNAME\n", 0);
1887
1888 if (tp->tim_provinfo->tpi_myname == PI_YES) {
1889 putnext(q, mp);
1890 break;
1891 }
1892 goto getname;
1893
1894 case TI_GETPEERNAME:
1895
1896 tilog("timodwproc: Got TI_GETPEERNAME\n", 0);
1897
1898 if (tp->tim_provinfo->tpi_peername == PI_YES) {
1899 putnext(q, mp);
1900 break;
1901 }
1902 getname:
1903 if ((tmp = copymsg(mp)) == NULL) {
1904 tim_recover(q, mp, msgsize(mp));
1905 return (1);
1906 }
1907 /*
1908 * tim_iocsave may be non-NULL when timod is awaiting
1909 * ack for another TI_GETPEERNAME/TI_GETMYNAME.
1910 */
1911 freemsg(tp->tim_iocsave);
1912 tp->tim_iocsave = mp;
1913 tp->tim_saved_prim = -1;
1914 putnext(q, tmp);
1915 break;
1916 }
1917 break;
1918
1919 case M_IOCDATA:
1920
1921 if (tp->tim_flags & NAMEPROC) {
1922 if (ti_doname(q, mp) != DONAME_CONT) {
1923 tp->tim_flags &= ~NAMEPROC;
1924 }
1925 } else
1926 putnext(q, mp);
1927 break;
1928
1929 case M_PROTO:
1930 case M_PCPROTO:
1931 if (MBLKL(mp) < sizeof (t_scalar_t)) {
1932 merror(q, mp, EPROTO);
1933 return (1);
1934 }
1935
1936 pptr = (union T_primitives *)mp->b_rptr;
1937 switch (pptr->type) {
1938 default:
1939 putnext(q, mp);
1940 break;
1941
1942 case T_EXDATA_REQ:
1943 case T_DATA_REQ:
1944 if (pptr->type == T_EXDATA_REQ)
1945 tilog("timodwproc: Got T_EXDATA_REQ\n", 0);
1946
1947 if (!bcanputnext(q, mp->b_band)) {
1948 (void) putbq(q, mp);
1949 return (1);
1950 }
1951 putnext(q, mp);
1952 break;
1953
1954 case T_UNITDATA_REQ:
1955 if (tp->tim_flags & CLTS) {
1956 tmp = tim_filladdr(q, mp, B_TRUE);
1957 if (tmp == NULL) {
1958 return (1);
1959 } else {
1960 mp = tmp;
1961 }
1962 }
1963 if (auditing)
1964 audit_sock(T_UNITDATA_REQ, q, mp, TIMOD_ID);
1965 if (!bcanputnext(q, mp->b_band)) {
1966 (void) putbq(q, mp);
1967 return (1);
1968 }
1969 putnext(q, mp);
1970 break;
1971
1972 case T_CONN_REQ: {
1973 struct T_conn_req *reqp = (struct T_conn_req *)
1974 mp->b_rptr;
1975 void *p;
1976
1977 tilog("timodwproc: Got T_CONN_REQ\n", 0);
1978
1979 if (MBLKL(mp) < sizeof (struct T_conn_req)) {
1980 merror(q, mp, EPROTO);
1981 return (1);
1982 }
1983
1984 if (tp->tim_flags & DO_PEERNAME) {
1985 if (!MBLKIN(mp, reqp->DEST_offset,
1986 reqp->DEST_length)) {
1987 merror(q, mp, EPROTO);
1988 return (1);
1989 }
1990 ASSERT(reqp->DEST_length >= 0);
1991 mutex_enter(&tp->tim_mutex);
1992 if (reqp->DEST_length > tp->tim_peermaxlen) {
1993 p = kmem_alloc(reqp->DEST_length,
1994 KM_NOSLEEP);
1995 if (p == NULL) {
1996 mutex_exit(&tp->tim_mutex);
1997 tilog("timodwproc: kmem_alloc "
1998 "failed, attempting "
1999 "recovery\n", 0);
2000 tim_recover(q, mp,
2001 reqp->DEST_length);
2002 return (1);
2003 }
2004 if (tp->tim_peermaxlen)
2005 kmem_free(tp->tim_peername,
2006 tp->tim_peermaxlen);
2007 tp->tim_peername = p;
2008 tp->tim_peermaxlen = reqp->DEST_length;
2009 }
2010 tp->tim_peerlen = reqp->DEST_length;
2011 p = mp->b_rptr + reqp->DEST_offset;
2012 bcopy(p, tp->tim_peername, tp->tim_peerlen);
2013 mutex_exit(&tp->tim_mutex);
2014 }
2015 if (tp->tim_flags & COTS)
2016 tp->tim_flags |= CONNWAIT;
2017 if (auditing)
2018 audit_sock(T_CONN_REQ, q, mp, TIMOD_ID);
2019 putnext(q, mp);
2020 break;
2021 }
2022
2023 case O_T_CONN_RES:
2024 case T_CONN_RES: {
2025 struct T_conn_res *resp;
2026 struct T_conn_ind *indp;
2027 mblk_t *pmp = NULL;
2028 mblk_t *nbp;
2029
2030 if (MBLKL(mp) < sizeof (struct T_conn_res) ||
2031 (tp->tim_flags & WAITIOCACK)) {
2032 merror(q, mp, EPROTO);
2033 return (1);
2034 }
2035
2036 resp = (struct T_conn_res *)mp->b_rptr;
2037 for (tmp = tp->tim_consave; tmp != NULL;
2038 tmp = tmp->b_next) {
2039 indp = (struct T_conn_ind *)tmp->b_rptr;
2040 if (indp->SEQ_number == resp->SEQ_number)
2041 break;
2042 pmp = tmp;
2043 }
2044 if (tmp == NULL)
2045 goto cresout;
2046
2047 if ((nbp = dupb(mp)) == NULL &&
2048 (nbp = copyb(mp)) == NULL) {
2049 tim_recover(q, mp, msgsize(mp));
2050 return (1);
2051 }
2052
2053 if (pmp != NULL)
2054 pmp->b_next = tmp->b_next;
2055 else
2056 tp->tim_consave = tmp->b_next;
2057 tmp->b_next = NULL;
2058
2059 /*
2060 * Construct a list with:
2061 * nbp - copy of user's original request
2062 * tmp - the extracted T_conn_ind
2063 */
2064 nbp->b_cont = tmp;
2065 /*
2066 * tim_iocsave may be non-NULL when timod is awaiting
2067 * ack for TI_GETPEERNAME/TI_GETMYNAME.
2068 */
2069 freemsg(tp->tim_iocsave);
2070 tp->tim_iocsave = nbp;
2071 tp->tim_saved_prim = pptr->type;
2072 tp->tim_flags |= WAIT_CONNRESACK | WAITIOCACK;
2073
2074 cresout:
2075 putnext(q, mp);
2076 break;
2077 }
2078
2079 case T_DISCON_REQ: {
2080 struct T_discon_req *disp;
2081 struct T_conn_ind *conp;
2082 mblk_t *pmp = NULL;
2083
2084 if (MBLKL(mp) < sizeof (struct T_discon_req)) {
2085 merror(q, mp, EPROTO);
2086 return (1);
2087 }
2088
2089 disp = (struct T_discon_req *)mp->b_rptr;
2090 tp->tim_flags &= ~(CONNWAIT|LOCORDREL|REMORDREL);
2091 tim_clear_peer(tp);
2092
2093 /*
2094 * If we are already connected, there won't
2095 * be any messages on tim_consave.
2096 */
2097 for (tmp = tp->tim_consave; tmp; tmp = tmp->b_next) {
2098 conp = (struct T_conn_ind *)tmp->b_rptr;
2099 if (conp->SEQ_number == disp->SEQ_number)
2100 break;
2101 pmp = tmp;
2102 }
2103 if (tmp) {
2104 if (pmp)
2105 pmp->b_next = tmp->b_next;
2106 else
2107 tp->tim_consave = tmp->b_next;
2108 tmp->b_next = NULL;
2109 freemsg(tmp);
2110 }
2111 putnext(q, mp);
2112 break;
2113 }
2114
2115 case T_ORDREL_REQ:
2116 if (tp->tim_flags & REMORDREL) {
2117 tp->tim_flags &= ~(LOCORDREL|REMORDREL);
2118 tim_clear_peer(tp);
2119 } else {
2120 tp->tim_flags |= LOCORDREL;
2121 }
2122 putnext(q, mp);
2123 break;
2124
2125 case T_CAPABILITY_REQ:
2126 tilog("timodwproc: Got T_CAPABILITY_REQ\n", 0);
2127 /*
2128 * XXX: We may know at this point whether transport
2129 * provides T_CAPABILITY_REQ or not and we may utilise
2130 * this knowledge here.
2131 */
2132 putnext(q, mp);
2133 break;
2134 }
2135 break;
2136 case M_FLUSH:
2137
2138 tilog("timodwproc: Got M_FLUSH\n", 0);
2139
2140 if (*mp->b_rptr & FLUSHW) {
2141 if (*mp->b_rptr & FLUSHBAND)
2142 flushband(q, *(mp->b_rptr + 1), FLUSHDATA);
2143 else
2144 flushq(q, FLUSHDATA);
2145 }
2146 putnext(q, mp);
2147 break;
2148 }
2149
2150 return (0);
2151 }
2152
2153 static void
tilog(char * str,t_scalar_t arg)2154 tilog(char *str, t_scalar_t arg)
2155 {
2156 if (dotilog) {
2157 if (dotilog & 2)
2158 cmn_err(CE_CONT, str, arg);
2159 if (dotilog & 4)
2160 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE | SL_ERROR,
2161 str, arg);
2162 else
2163 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE, str, arg);
2164 }
2165 }
2166
2167 static void
tilogp(char * str,uintptr_t arg)2168 tilogp(char *str, uintptr_t arg)
2169 {
2170 if (dotilog) {
2171 if (dotilog & 2)
2172 cmn_err(CE_CONT, str, arg);
2173 if (dotilog & 4)
2174 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE | SL_ERROR,
2175 str, arg);
2176 else
2177 (void) strlog(TIMOD_ID, -1, 0, SL_TRACE, str, arg);
2178 }
2179 }
2180
2181
2182 /*
2183 * Process the TI_GETNAME ioctl. If no name exists, return len = 0
2184 * in strbuf structures. The state transitions are determined by what
2185 * is hung of cq_private (cp_private) in the copyresp (copyreq) structure.
2186 * The high-level steps in the ioctl processing are as follows:
2187 *
2188 * 1) we recieve an transparent M_IOCTL with the arg in the second message
2189 * block of the message.
2190 * 2) we send up an M_COPYIN request for the strbuf structure pointed to
2191 * by arg. The block containing arg is hung off cq_private.
2192 * 3) we receive an M_IOCDATA response with cp->cp_private->b_cont == NULL.
2193 * This means that the strbuf structure is found in the message block
2194 * mp->b_cont.
2195 * 4) we send up an M_COPYOUT request with the strbuf message hung off
2196 * cq_private->b_cont. The address we are copying to is strbuf.buf.
2197 * we set strbuf.len to 0 to indicate that we should copy the strbuf
2198 * structure the next time. The message mp->b_cont contains the
2199 * address info.
2200 * 5) we receive an M_IOCDATA with cp_private->b_cont != NULL and
2201 * strbuf.len == 0. Restore strbuf.len to either tp->tim_mylen or
2202 * tp->tim_peerlen.
2203 * 6) we send up an M_COPYOUT request with a copy of the strbuf message
2204 * hung off mp->b_cont. In the strbuf structure in the message hung
2205 * off cq_private->b_cont, we set strbuf.len to 0 and strbuf.maxlen
2206 * to 0. This means that the next step is to ACK the ioctl.
2207 * 7) we receive an M_IOCDATA message with cp_private->b_cont != NULL and
2208 * strbuf.len == 0 and strbuf.maxlen == 0. Free up cp->private and
2209 * send an M_IOCACK upstream, and we are done.
2210 *
2211 */
2212 static int
ti_doname(queue_t * q,mblk_t * mp)2213 ti_doname(
2214 queue_t *q, /* queue message arrived at */
2215 mblk_t *mp) /* M_IOCTL or M_IOCDATA message only */
2216 {
2217 struct iocblk *iocp;
2218 struct copyreq *cqp;
2219 STRUCT_HANDLE(strbuf, sb);
2220 struct copyresp *csp;
2221 int ret;
2222 mblk_t *bp;
2223 struct tim_tim *tp = q->q_ptr;
2224 boolean_t getpeer;
2225
2226 switch (mp->b_datap->db_type) {
2227 case M_IOCTL:
2228 iocp = (struct iocblk *)mp->b_rptr;
2229 if ((iocp->ioc_cmd != TI_GETMYNAME) &&
2230 (iocp->ioc_cmd != TI_GETPEERNAME)) {
2231 tilog("ti_doname: bad M_IOCTL command\n", 0);
2232 miocnak(q, mp, 0, EINVAL);
2233 ret = DONAME_FAIL;
2234 break;
2235 }
2236 if ((iocp->ioc_count != TRANSPARENT)) {
2237 miocnak(q, mp, 0, EINVAL);
2238 ret = DONAME_FAIL;
2239 break;
2240 }
2241
2242 cqp = (struct copyreq *)mp->b_rptr;
2243 cqp->cq_private = mp->b_cont;
2244 cqp->cq_addr = (caddr_t)*(intptr_t *)mp->b_cont->b_rptr;
2245 mp->b_cont = NULL;
2246 cqp->cq_size = SIZEOF_STRUCT(strbuf, iocp->ioc_flag);
2247 cqp->cq_flag = 0;
2248 mp->b_datap->db_type = M_COPYIN;
2249 mp->b_wptr = mp->b_rptr + sizeof (struct copyreq);
2250 qreply(q, mp);
2251 ret = DONAME_CONT;
2252 break;
2253
2254 case M_IOCDATA:
2255 csp = (struct copyresp *)mp->b_rptr;
2256 iocp = (struct iocblk *)mp->b_rptr;
2257 cqp = (struct copyreq *)mp->b_rptr;
2258 if ((csp->cp_cmd != TI_GETMYNAME) &&
2259 (csp->cp_cmd != TI_GETPEERNAME)) {
2260 cmn_err(CE_WARN, "ti_doname: bad M_IOCDATA command\n");
2261 miocnak(q, mp, 0, EINVAL);
2262 ret = DONAME_FAIL;
2263 break;
2264 }
2265 if (csp->cp_rval) { /* error */
2266 freemsg(csp->cp_private);
2267 freemsg(mp);
2268 ret = DONAME_FAIL;
2269 break;
2270 }
2271 ASSERT(csp->cp_private != NULL);
2272 getpeer = csp->cp_cmd == TI_GETPEERNAME;
2273 if (getpeer)
2274 mutex_enter(&tp->tim_mutex);
2275 if (csp->cp_private->b_cont == NULL) { /* got strbuf */
2276 ASSERT(mp->b_cont);
2277 STRUCT_SET_HANDLE(sb, iocp->ioc_flag,
2278 (void *)mp->b_cont->b_rptr);
2279 if (getpeer) {
2280 if (tp->tim_peerlen == 0) {
2281 /* copy just strbuf */
2282 STRUCT_FSET(sb, len, 0);
2283 } else if (tp->tim_peerlen >
2284 STRUCT_FGET(sb, maxlen)) {
2285 mutex_exit(&tp->tim_mutex);
2286 miocnak(q, mp, 0, ENAMETOOLONG);
2287 ret = DONAME_FAIL;
2288 break;
2289 } else {
2290 /* copy buffer */
2291 STRUCT_FSET(sb, len, tp->tim_peerlen);
2292 }
2293 } else {
2294 if (tp->tim_mylen == 0) {
2295 /* copy just strbuf */
2296 STRUCT_FSET(sb, len, 0);
2297 } else if (tp->tim_mylen >
2298 STRUCT_FGET(sb, maxlen)) {
2299 freemsg(csp->cp_private);
2300 miocnak(q, mp, 0, ENAMETOOLONG);
2301 ret = DONAME_FAIL;
2302 break;
2303 } else {
2304 /* copy buffer */
2305 STRUCT_FSET(sb, len, tp->tim_mylen);
2306 }
2307 }
2308 csp->cp_private->b_cont = mp->b_cont;
2309 mp->b_cont = NULL;
2310 }
2311 STRUCT_SET_HANDLE(sb, iocp->ioc_flag,
2312 (void *)csp->cp_private->b_cont->b_rptr);
2313 if (STRUCT_FGET(sb, len) == 0) {
2314 /*
2315 * restore strbuf.len
2316 */
2317 if (getpeer)
2318 STRUCT_FSET(sb, len, tp->tim_peerlen);
2319 else
2320 STRUCT_FSET(sb, len, tp->tim_mylen);
2321
2322 if (getpeer)
2323 mutex_exit(&tp->tim_mutex);
2324 if (STRUCT_FGET(sb, maxlen) == 0) {
2325
2326 /*
2327 * ack the ioctl
2328 */
2329 freemsg(csp->cp_private);
2330 tim_ioctl_send_reply(q, mp, NULL);
2331 ret = DONAME_DONE;
2332 break;
2333 }
2334
2335 if ((bp = allocb(STRUCT_SIZE(sb), BPRI_MED)) == NULL) {
2336
2337 tilog(
2338 "ti_doname: allocb failed no recovery attempt\n", 0);
2339
2340 freemsg(csp->cp_private);
2341 miocnak(q, mp, 0, EAGAIN);
2342 ret = DONAME_FAIL;
2343 break;
2344 }
2345 bp->b_wptr += STRUCT_SIZE(sb);
2346 bcopy(STRUCT_BUF(sb), bp->b_rptr, STRUCT_SIZE(sb));
2347 cqp->cq_addr =
2348 (caddr_t)*(intptr_t *)csp->cp_private->b_rptr;
2349 cqp->cq_size = STRUCT_SIZE(sb);
2350 cqp->cq_flag = 0;
2351 mp->b_datap->db_type = M_COPYOUT;
2352 mp->b_cont = bp;
2353 STRUCT_FSET(sb, len, 0);
2354 STRUCT_FSET(sb, maxlen, 0); /* ack next time around */
2355 qreply(q, mp);
2356 ret = DONAME_CONT;
2357 break;
2358 }
2359
2360 /*
2361 * copy the address to the user
2362 */
2363 if ((bp = allocb((size_t)STRUCT_FGET(sb, len), BPRI_MED))
2364 == NULL) {
2365 if (getpeer)
2366 mutex_exit(&tp->tim_mutex);
2367
2368 tilog("ti_doname: allocb failed no recovery attempt\n",
2369 0);
2370
2371 freemsg(csp->cp_private);
2372 miocnak(q, mp, 0, EAGAIN);
2373 ret = DONAME_FAIL;
2374 break;
2375 }
2376 bp->b_wptr += STRUCT_FGET(sb, len);
2377 if (getpeer) {
2378 bcopy(tp->tim_peername, bp->b_rptr,
2379 STRUCT_FGET(sb, len));
2380 mutex_exit(&tp->tim_mutex);
2381 } else {
2382 bcopy(tp->tim_myname, bp->b_rptr, STRUCT_FGET(sb, len));
2383 }
2384 cqp->cq_addr = (caddr_t)STRUCT_FGETP(sb, buf);
2385 cqp->cq_size = STRUCT_FGET(sb, len);
2386 cqp->cq_flag = 0;
2387 mp->b_datap->db_type = M_COPYOUT;
2388 mp->b_cont = bp;
2389 STRUCT_FSET(sb, len, 0); /* copy the strbuf next time around */
2390 qreply(q, mp);
2391 ret = DONAME_CONT;
2392 break;
2393
2394 default:
2395 tilog("ti_doname: freeing bad message type = %d\n",
2396 mp->b_datap->db_type);
2397 freemsg(mp);
2398 ret = DONAME_FAIL;
2399 break;
2400 }
2401 return (ret);
2402 }
2403
2404
2405 /*
2406 * Fill in the address of a connectionless data packet if a connect
2407 * had been done on this endpoint.
2408 */
2409 static mblk_t *
tim_filladdr(queue_t * q,mblk_t * mp,boolean_t dorecover)2410 tim_filladdr(queue_t *q, mblk_t *mp, boolean_t dorecover)
2411 {
2412 mblk_t *bp;
2413 struct tim_tim *tp;
2414 struct T_unitdata_req *up;
2415 struct T_unitdata_req *nup;
2416 size_t plen;
2417
2418 tp = (struct tim_tim *)q->q_ptr;
2419 if (mp->b_datap->db_type == M_DATA) {
2420 mutex_enter(&tp->tim_mutex);
2421 bp = allocb(sizeof (struct T_unitdata_req) + tp->tim_peerlen,
2422 BPRI_MED);
2423 if (bp != NULL) {
2424 bp->b_datap->db_type = M_PROTO;
2425 up = (struct T_unitdata_req *)bp->b_rptr;
2426 up->PRIM_type = T_UNITDATA_REQ;
2427 up->DEST_length = tp->tim_peerlen;
2428 bp->b_wptr += sizeof (struct T_unitdata_req);
2429 up->DEST_offset = sizeof (struct T_unitdata_req);
2430 up->OPT_length = 0;
2431 up->OPT_offset = 0;
2432 if (tp->tim_peerlen > 0) {
2433 bcopy(tp->tim_peername, bp->b_wptr,
2434 tp->tim_peerlen);
2435 bp->b_wptr += tp->tim_peerlen;
2436 }
2437 bp->b_cont = mp;
2438 }
2439 } else {
2440 ASSERT(mp->b_datap->db_type == M_PROTO);
2441 up = (struct T_unitdata_req *)mp->b_rptr;
2442 ASSERT(up->PRIM_type == T_UNITDATA_REQ);
2443 if (up->DEST_length != 0)
2444 return (mp);
2445 mutex_enter(&tp->tim_mutex);
2446 bp = allocb(sizeof (struct T_unitdata_req) + up->OPT_length +
2447 tp->tim_peerlen, BPRI_MED);
2448 if (bp != NULL) {
2449 bp->b_datap->db_type = M_PROTO;
2450 nup = (struct T_unitdata_req *)bp->b_rptr;
2451 nup->PRIM_type = T_UNITDATA_REQ;
2452 nup->DEST_length = plen = tp->tim_peerlen;
2453 bp->b_wptr += sizeof (struct T_unitdata_req);
2454 nup->DEST_offset = sizeof (struct T_unitdata_req);
2455 if (plen > 0) {
2456 bcopy(tp->tim_peername, bp->b_wptr, plen);
2457 bp->b_wptr += plen;
2458 }
2459 mutex_exit(&tp->tim_mutex);
2460 if (up->OPT_length == 0) {
2461 nup->OPT_length = 0;
2462 nup->OPT_offset = 0;
2463 } else {
2464 nup->OPT_length = up->OPT_length;
2465 nup->OPT_offset =
2466 sizeof (struct T_unitdata_req) + plen;
2467 bcopy((mp->b_wptr + up->OPT_offset), bp->b_wptr,
2468 up->OPT_length);
2469 bp->b_wptr += up->OPT_length;
2470 }
2471 bp->b_cont = mp->b_cont;
2472 mp->b_cont = NULL;
2473 freeb(mp);
2474 return (bp);
2475 }
2476 }
2477 ASSERT(MUTEX_HELD(&tp->tim_mutex));
2478 if (bp == NULL && dorecover) {
2479 tim_recover(q, mp,
2480 sizeof (struct T_unitdata_req) + tp->tim_peerlen);
2481 }
2482 mutex_exit(&tp->tim_mutex);
2483 return (bp);
2484 }
2485
2486 static void
tim_addlink(struct tim_tim * tp)2487 tim_addlink(struct tim_tim *tp)
2488 {
2489 struct tim_tim **tpp;
2490 struct tim_tim *next;
2491
2492 tpp = &tim_hash[TIM_HASH(tp->tim_acceptor)];
2493 rw_enter(&tim_list_rwlock, RW_WRITER);
2494
2495 if ((next = *tpp) != NULL)
2496 next->tim_ptpn = &tp->tim_next;
2497 tp->tim_next = next;
2498 tp->tim_ptpn = tpp;
2499 *tpp = tp;
2500
2501 tim_cnt++;
2502
2503 rw_exit(&tim_list_rwlock);
2504 }
2505
2506 static void
tim_dellink(struct tim_tim * tp)2507 tim_dellink(struct tim_tim *tp)
2508 {
2509 struct tim_tim *next;
2510
2511 rw_enter(&tim_list_rwlock, RW_WRITER);
2512
2513 if ((next = tp->tim_next) != NULL)
2514 next->tim_ptpn = tp->tim_ptpn;
2515 *(tp->tim_ptpn) = next;
2516
2517 tim_cnt--;
2518
2519 rw_exit(&tim_list_rwlock);
2520 }
2521
2522 static struct tim_tim *
tim_findlink(t_uscalar_t id)2523 tim_findlink(t_uscalar_t id)
2524 {
2525 struct tim_tim *tp;
2526
2527 ASSERT(rw_lock_held(&tim_list_rwlock));
2528
2529 for (tp = tim_hash[TIM_HASH(id)]; tp != NULL; tp = tp->tim_next) {
2530 if (tp->tim_acceptor == id) {
2531 break;
2532 }
2533 }
2534 return (tp);
2535 }
2536
2537 static void
tim_recover(queue_t * q,mblk_t * mp,t_scalar_t size)2538 tim_recover(queue_t *q, mblk_t *mp, t_scalar_t size)
2539 {
2540 struct tim_tim *tp;
2541 bufcall_id_t bid;
2542 timeout_id_t tid;
2543
2544 tp = (struct tim_tim *)q->q_ptr;
2545
2546 /*
2547 * Avoid re-enabling the queue.
2548 */
2549 if (mp->b_datap->db_type == M_PCPROTO)
2550 mp->b_datap->db_type = M_PROTO;
2551 noenable(q);
2552 (void) putbq(q, mp);
2553
2554 /*
2555 * Make sure there is at most one outstanding request per queue.
2556 */
2557 if (q->q_flag & QREADR) {
2558 if (tp->tim_rtimoutid || tp->tim_rbufcid)
2559 return;
2560 } else {
2561 if (tp->tim_wtimoutid || tp->tim_wbufcid)
2562 return;
2563 }
2564 if (!(bid = qbufcall(RD(q), (size_t)size, BPRI_MED, tim_buffer, q))) {
2565 tid = qtimeout(RD(q), tim_timer, q, TIMWAIT);
2566 if (q->q_flag & QREADR)
2567 tp->tim_rtimoutid = tid;
2568 else
2569 tp->tim_wtimoutid = tid;
2570 } else {
2571 if (q->q_flag & QREADR)
2572 tp->tim_rbufcid = bid;
2573 else
2574 tp->tim_wbufcid = bid;
2575 }
2576 }
2577
2578 /*
2579 * Timod is waiting on a downstream ioctl reply, come back soon
2580 * to reschedule the write side service routine, which will check
2581 * if the ioctl is done and another can proceed.
2582 */
2583 static void
tim_ioctl_retry(queue_t * q)2584 tim_ioctl_retry(queue_t *q)
2585 {
2586 struct tim_tim *tp;
2587
2588 tp = (struct tim_tim *)q->q_ptr;
2589
2590 /*
2591 * Make sure there is at most one outstanding request per wqueue.
2592 */
2593 if (tp->tim_wtimoutid || tp->tim_wbufcid)
2594 return;
2595
2596 tp->tim_wtimoutid = qtimeout(RD(q), tim_timer, q, TIMIOCWAIT);
2597 }
2598
2599 /*
2600 * Inspect the data on read queues starting from read queues passed as
2601 * paramter (timod read queue) and traverse until
2602 * q_next is NULL (stream head). Look for a TPI T_EXDATA_IND message
2603 * reutrn 1 if found, 0 if not found.
2604 */
2605 static int
ti_expind_on_rdqueues(queue_t * rq)2606 ti_expind_on_rdqueues(queue_t *rq)
2607 {
2608 mblk_t *bp;
2609 queue_t *q;
2610
2611 q = rq;
2612 /*
2613 * We are going to walk q_next, so protect stream from plumbing
2614 * changes.
2615 */
2616 claimstr(q);
2617 do {
2618 /*
2619 * Hold QLOCK while referencing data on queues
2620 */
2621 mutex_enter(QLOCK(rq));
2622 bp = rq->q_first;
2623 while (bp != NULL) {
2624 /*
2625 * Walk the messages on the queue looking
2626 * for a possible T_EXDATA_IND
2627 */
2628 if ((bp->b_datap->db_type == M_PROTO) &&
2629 ((bp->b_wptr - bp->b_rptr) >=
2630 sizeof (struct T_exdata_ind)) &&
2631 (((struct T_exdata_ind *)bp->b_rptr)->PRIM_type
2632 == T_EXDATA_IND)) {
2633 /* bp is T_EXDATA_IND */
2634 mutex_exit(QLOCK(rq));
2635 releasestr(q); /* decrement sd_refcnt */
2636 return (1); /* expdata is on a read queue */
2637 }
2638 bp = bp->b_next; /* next message */
2639 }
2640 mutex_exit(QLOCK(rq));
2641 rq = rq->q_next; /* next upstream queue */
2642 } while (rq != NULL);
2643 releasestr(q);
2644 return (0); /* no expdata on read queues */
2645 }
2646
2647 static void
tim_tcap_timer(void * q_ptr)2648 tim_tcap_timer(void *q_ptr)
2649 {
2650 queue_t *q = (queue_t *)q_ptr;
2651 struct tim_tim *tp = (struct tim_tim *)q->q_ptr;
2652
2653 ASSERT(tp != NULL && tp->tim_tcap_timoutid != 0);
2654 ASSERT((tp->tim_flags & TI_CAP_RECVD) != 0);
2655
2656 tp->tim_tcap_timoutid = 0;
2657 TILOG("tim_tcap_timer: fired\n", 0);
2658 tim_tcap_genreply(q, tp);
2659 }
2660
2661 /*
2662 * tim_tcap_genreply() is called either from timeout routine or when
2663 * T_ERROR_ACK is received. In both cases it means that underlying
2664 * transport doesn't provide T_CAPABILITY_REQ.
2665 */
2666 static void
tim_tcap_genreply(queue_t * q,struct tim_tim * tp)2667 tim_tcap_genreply(queue_t *q, struct tim_tim *tp)
2668 {
2669 mblk_t *mp = tp->tim_iocsave;
2670 struct iocblk *iocbp;
2671
2672 TILOG("timodrproc: tim_tcap_genreply\n", 0);
2673
2674 ASSERT(tp == (struct tim_tim *)q->q_ptr);
2675 ASSERT(mp != NULL);
2676
2677 iocbp = (struct iocblk *)mp->b_rptr;
2678 ASSERT(iocbp != NULL);
2679 ASSERT(MBLKL(mp) == sizeof (struct iocblk));
2680 ASSERT(iocbp->ioc_cmd == TI_CAPABILITY);
2681 ASSERT(mp->b_cont == NULL);
2682
2683 /* Save this information permanently in the module */
2684 PI_PROVLOCK(tp->tim_provinfo);
2685 if (tp->tim_provinfo->tpi_capability == PI_DONTKNOW)
2686 tp->tim_provinfo->tpi_capability = PI_NO;
2687 PI_PROVUNLOCK(tp->tim_provinfo);
2688
2689 if (tp->tim_tcap_timoutid != 0) {
2690 (void) quntimeout(q, tp->tim_tcap_timoutid);
2691 tp->tim_tcap_timoutid = 0;
2692 }
2693
2694 if ((tp->tim_flags & CAP_WANTS_INFO) != 0) {
2695 /* Send T_INFO_REQ down */
2696 mblk_t *tirmp = tpi_ack_alloc(NULL,
2697 sizeof (struct T_info_req), M_PCPROTO, T_INFO_REQ);
2698
2699 if (tirmp != NULL) {
2700 /* Emulate TC1_INFO */
2701 TILOG("emulate_tcap_ioc_req: sending T_INFO_REQ\n", 0);
2702 tp->tim_flags |= WAIT_IOCINFOACK;
2703 putnext(WR(q), tirmp);
2704 } else {
2705 tilog("emulate_tcap_req: allocb fail, "
2706 "no recovery attmpt\n", 0);
2707 tp->tim_iocsave = NULL;
2708 tp->tim_saved_prim = -1;
2709 tp->tim_flags &= ~(TI_CAP_RECVD | WAITIOCACK |
2710 CAP_WANTS_INFO | WAIT_IOCINFOACK);
2711 miocnak(q, mp, 0, ENOMEM);
2712 }
2713 } else {
2714 /* Reply immediately */
2715 mblk_t *ackmp = tpi_ack_alloc(NULL,
2716 sizeof (struct T_capability_ack), M_PCPROTO,
2717 T_CAPABILITY_ACK);
2718
2719 mp->b_cont = ackmp;
2720
2721 if (ackmp != NULL) {
2722 ((struct T_capability_ack *)
2723 ackmp->b_rptr)->CAP_bits1 = 0;
2724 tim_ioctl_send_reply(q, mp, ackmp);
2725 tp->tim_iocsave = NULL;
2726 tp->tim_saved_prim = -1;
2727 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
2728 TI_CAP_RECVD | CAP_WANTS_INFO);
2729 } else {
2730 tilog("timodwproc:allocb failed no "
2731 "recovery attempt\n", 0);
2732 tp->tim_iocsave = NULL;
2733 tp->tim_saved_prim = -1;
2734 tp->tim_flags &= ~(TI_CAP_RECVD | WAITIOCACK |
2735 CAP_WANTS_INFO | WAIT_IOCINFOACK);
2736 miocnak(q, mp, 0, ENOMEM);
2737 }
2738 }
2739 }
2740
2741
2742 static void
tim_ioctl_send_reply(queue_t * q,mblk_t * ioc_mp,mblk_t * mp)2743 tim_ioctl_send_reply(queue_t *q, mblk_t *ioc_mp, mblk_t *mp)
2744 {
2745 struct iocblk *iocbp;
2746
2747 ASSERT(q != NULL && ioc_mp != NULL);
2748
2749 ioc_mp->b_datap->db_type = M_IOCACK;
2750 if (mp != NULL)
2751 mp->b_datap->db_type = M_DATA;
2752
2753 if (ioc_mp->b_cont != mp) {
2754 /* It is safe to call freemsg for NULL pointers */
2755 freemsg(ioc_mp->b_cont);
2756 ioc_mp->b_cont = mp;
2757 }
2758 iocbp = (struct iocblk *)ioc_mp->b_rptr;
2759 iocbp->ioc_error = 0;
2760 iocbp->ioc_rval = 0;
2761 /*
2762 * All ioctl's may return more data than was specified by
2763 * count arg. For TI_CAPABILITY count is treated as maximum data size.
2764 */
2765 if (mp == NULL)
2766 iocbp->ioc_count = 0;
2767 else if (iocbp->ioc_cmd != TI_CAPABILITY)
2768 iocbp->ioc_count = msgsize(mp);
2769 else {
2770 iocbp->ioc_count = MIN(MBLKL(mp), iocbp->ioc_count);
2771 /* Truncate message if too large */
2772 mp->b_wptr = mp->b_rptr + iocbp->ioc_count;
2773 }
2774
2775 TILOG("iosendreply: ioc_cmd = %d, ", iocbp->ioc_cmd);
2776 putnext(RD(q), ioc_mp);
2777 }
2778
2779 /*
2780 * Send M_IOCACK for errors.
2781 */
2782 static void
tim_send_ioc_error_ack(queue_t * q,struct tim_tim * tp,mblk_t * mp)2783 tim_send_ioc_error_ack(queue_t *q, struct tim_tim *tp, mblk_t *mp)
2784 {
2785 struct T_error_ack *tea = (struct T_error_ack *)mp->b_rptr;
2786 t_scalar_t error_prim;
2787
2788 mp->b_wptr = mp->b_rptr + sizeof (struct T_error_ack);
2789 ASSERT(mp->b_wptr <= mp->b_datap->db_lim);
2790 error_prim = tea->ERROR_prim;
2791
2792 ASSERT(tp->tim_iocsave != NULL);
2793 ASSERT(tp->tim_iocsave->b_cont != mp);
2794
2795 /* Always send this to the read side of the queue */
2796 q = RD(q);
2797
2798 TILOG("tim_send_ioc_error_ack: prim = %d\n", tp->tim_saved_prim);
2799
2800 if (tp->tim_saved_prim != error_prim) {
2801 putnext(q, mp);
2802 } else if (error_prim == T_CAPABILITY_REQ) {
2803 TILOG("timodrproc: T_ERROR_ACK/T_CAPABILITY_REQ\n", 0);
2804 ASSERT(tp->tim_iocsave->b_cont == NULL);
2805
2806 tim_tcap_genreply(q, tp);
2807 freemsg(mp);
2808 } else {
2809 struct iocblk *iocbp = (struct iocblk *)tp->tim_iocsave->b_rptr;
2810
2811 TILOG("tim_send_ioc_error_ack: T_ERROR_ACK: prim %d\n",
2812 error_prim);
2813 ASSERT(tp->tim_iocsave->b_cont == NULL);
2814
2815 switch (error_prim) {
2816 default:
2817 TILOG("timodrproc: Unknown T_ERROR_ACK: tlierror %d\n",
2818 tea->TLI_error);
2819
2820 putnext(q, mp);
2821 break;
2822
2823 case T_INFO_REQ:
2824 case T_SVR4_OPTMGMT_REQ:
2825 case T_OPTMGMT_REQ:
2826 case O_T_BIND_REQ:
2827 case T_BIND_REQ:
2828 case T_UNBIND_REQ:
2829 case T_ADDR_REQ:
2830 case T_CAPABILITY_REQ:
2831
2832 TILOG("ioc_err_ack: T_ERROR_ACK: tlierror %x\n",
2833 tea->TLI_error);
2834
2835 /* get saved ioctl msg and set values */
2836 iocbp->ioc_count = 0;
2837 iocbp->ioc_error = 0;
2838 iocbp->ioc_rval = tea->TLI_error;
2839 if (iocbp->ioc_rval == TSYSERR)
2840 iocbp->ioc_rval |= tea->UNIX_error << 8;
2841 tp->tim_iocsave->b_datap->db_type = M_IOCACK;
2842 freemsg(mp);
2843 putnext(q, tp->tim_iocsave);
2844 tp->tim_iocsave = NULL;
2845 tp->tim_saved_prim = -1;
2846 tp->tim_flags &= ~(WAITIOCACK | TI_CAP_RECVD |
2847 CAP_WANTS_INFO | WAIT_IOCINFOACK);
2848 break;
2849 }
2850 }
2851 }
2852
2853 /*
2854 * Send reply to a usual message or ioctl message upstream.
2855 * Should be called from the read side only.
2856 */
2857 static void
tim_send_reply(queue_t * q,mblk_t * mp,struct tim_tim * tp,t_scalar_t prim)2858 tim_send_reply(queue_t *q, mblk_t *mp, struct tim_tim *tp, t_scalar_t prim)
2859 {
2860 ASSERT(mp != NULL && q != NULL && tp != NULL);
2861 ASSERT(q == RD(q));
2862
2863 /* Restore db_type - recover() might have changed it */
2864 mp->b_datap->db_type = M_PCPROTO;
2865
2866 if (((tp->tim_flags & WAITIOCACK) == 0) || (tp->tim_saved_prim != prim))
2867 putnext(q, mp);
2868 else {
2869 ASSERT(tp->tim_iocsave != NULL);
2870 tim_ioctl_send_reply(q, tp->tim_iocsave, mp);
2871 tp->tim_iocsave = NULL;
2872 tp->tim_saved_prim = -1;
2873 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
2874 TI_CAP_RECVD | CAP_WANTS_INFO);
2875 }
2876 }
2877
2878 /*
2879 * Reply to TI_SYNC reequest without sending anything downstream.
2880 */
2881 static void
tim_answer_ti_sync(queue_t * q,mblk_t * mp,struct tim_tim * tp,mblk_t * ackmp,uint32_t tsr_flags)2882 tim_answer_ti_sync(queue_t *q, mblk_t *mp, struct tim_tim *tp,
2883 mblk_t *ackmp, uint32_t tsr_flags)
2884 {
2885 struct ti_sync_ack *tsap;
2886
2887 ASSERT(q != NULL && q == WR(q) && ackmp != NULL);
2888
2889 tsap = (struct ti_sync_ack *)ackmp->b_rptr;
2890 bzero(tsap, sizeof (struct ti_sync_ack));
2891 ackmp->b_wptr = ackmp->b_rptr + sizeof (struct ti_sync_ack);
2892
2893 if (tsr_flags == 0 ||
2894 (tsr_flags & ~(TSRF_QLEN_REQ | TSRF_IS_EXP_IN_RCVBUF)) != 0) {
2895 /*
2896 * unsupported/bad flag setting
2897 * or no flag set.
2898 */
2899 TILOG("timodwproc: unsupported/bad flag setting %x\n",
2900 tsr_flags);
2901 freemsg(ackmp);
2902 miocnak(q, mp, 0, EINVAL);
2903 return;
2904 }
2905
2906 if ((tsr_flags & TSRF_QLEN_REQ) != 0)
2907 tsap->tsa_qlen = tp->tim_backlog;
2908
2909 if ((tsr_flags & TSRF_IS_EXP_IN_RCVBUF) != 0 &&
2910 ti_expind_on_rdqueues(RD(q))) {
2911 /*
2912 * Expedited data is queued on
2913 * the stream read side
2914 */
2915 tsap->tsa_flags |= TSAF_EXP_QUEUED;
2916 }
2917
2918 tim_ioctl_send_reply(q, mp, ackmp);
2919 tp->tim_iocsave = NULL;
2920 tp->tim_saved_prim = -1;
2921 tp->tim_flags &= ~(WAITIOCACK | WAIT_IOCINFOACK |
2922 TI_CAP_RECVD | CAP_WANTS_INFO);
2923 }
2924
2925 /*
2926 * Send TPI message from IOCTL message, ssave original ioctl header and TPI
2927 * message type. Should be called from write side only.
2928 */
2929 static void
tim_send_ioctl_tpi_msg(queue_t * q,mblk_t * mp,struct tim_tim * tp,struct iocblk * iocb)2930 tim_send_ioctl_tpi_msg(queue_t *q, mblk_t *mp, struct tim_tim *tp,
2931 struct iocblk *iocb)
2932 {
2933 mblk_t *tmp;
2934 int ioc_cmd = iocb->ioc_cmd;
2935
2936 ASSERT(q != NULL && mp != NULL && tp != NULL);
2937 ASSERT(q == WR(q));
2938 ASSERT(mp->b_cont != NULL);
2939
2940 tp->tim_iocsave = mp;
2941 tmp = mp->b_cont;
2942
2943 mp->b_cont = NULL;
2944 tp->tim_flags |= WAITIOCACK;
2945 tp->tim_saved_prim = ((union T_primitives *)tmp->b_rptr)->type;
2946
2947 /*
2948 * For TI_GETINFO, the attached message is a T_INFO_REQ
2949 * For TI_SYNC, we generate the T_INFO_REQ message above
2950 * For TI_CAPABILITY the attached message is either
2951 * T_CAPABILITY_REQ or T_INFO_REQ.
2952 * Among TPI request messages possible,
2953 * T_INFO_REQ/T_CAPABILITY_ACK messages are a M_PCPROTO, rest
2954 * are M_PROTO
2955 */
2956 if (ioc_cmd == TI_GETINFO || ioc_cmd == TI_SYNC ||
2957 ioc_cmd == TI_CAPABILITY) {
2958 tmp->b_datap->db_type = M_PCPROTO;
2959 } else {
2960 tmp->b_datap->db_type = M_PROTO;
2961 }
2962
2963 /* Verify credentials in STREAM */
2964 ASSERT(iocb->ioc_cr == NULL || iocb->ioc_cr == DB_CRED(tmp));
2965
2966 ASSERT(DB_CRED(tmp) != NULL);
2967
2968 TILOG("timodwproc: sending down %d\n", tp->tim_saved_prim);
2969 putnext(q, tmp);
2970 }
2971
2972 static void
tim_clear_peer(struct tim_tim * tp)2973 tim_clear_peer(struct tim_tim *tp)
2974 {
2975 mutex_enter(&tp->tim_mutex);
2976 if (tp->tim_peercred != NULL) {
2977 crfree(tp->tim_peercred);
2978 tp->tim_peercred = NULL;
2979 }
2980 tp->tim_peerlen = 0;
2981 mutex_exit(&tp->tim_mutex);
2982 }
2983