17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*a26eed27Sgt29601 * Common Development and Distribution License (the "License"). 6*a26eed27Sgt29601 * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*a26eed27Sgt29601 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate /* Copyright (c) 1990 Mentat Inc. */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 287c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 297c478bd9Sstevel@tonic-gate 307c478bd9Sstevel@tonic-gate 317c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 327c478bd9Sstevel@tonic-gate 337c478bd9Sstevel@tonic-gate /* 347c478bd9Sstevel@tonic-gate * Kernel RPC filtering module 357c478bd9Sstevel@tonic-gate */ 367c478bd9Sstevel@tonic-gate 377c478bd9Sstevel@tonic-gate #include <sys/param.h> 387c478bd9Sstevel@tonic-gate #include <sys/types.h> 397c478bd9Sstevel@tonic-gate #include <sys/stream.h> 407c478bd9Sstevel@tonic-gate #include <sys/stropts.h> 417c478bd9Sstevel@tonic-gate #include <sys/tihdr.h> 427c478bd9Sstevel@tonic-gate #include <sys/timod.h> 437c478bd9Sstevel@tonic-gate #include <sys/tiuser.h> 447c478bd9Sstevel@tonic-gate #include <sys/debug.h> 457c478bd9Sstevel@tonic-gate #include <sys/signal.h> 467c478bd9Sstevel@tonic-gate #include <sys/pcb.h> 477c478bd9Sstevel@tonic-gate #include <sys/user.h> 487c478bd9Sstevel@tonic-gate #include <sys/errno.h> 497c478bd9Sstevel@tonic-gate #include <sys/cred.h> 507c478bd9Sstevel@tonic-gate #include <sys/policy.h> 517c478bd9Sstevel@tonic-gate #include <sys/inline.h> 527c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 537c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 547c478bd9Sstevel@tonic-gate #include <sys/file.h> 557c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 567c478bd9Sstevel@tonic-gate #include <sys/systm.h> 577c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 587c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 597c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 607c478bd9Sstevel@tonic-gate #include <sys/callb.h> 617c478bd9Sstevel@tonic-gate 627c478bd9Sstevel@tonic-gate #include <sys/strlog.h> 637c478bd9Sstevel@tonic-gate #include <rpc/rpc_com.h> 647c478bd9Sstevel@tonic-gate #include <inet/common.h> 657c478bd9Sstevel@tonic-gate #include <rpc/types.h> 667c478bd9Sstevel@tonic-gate #include <sys/time.h> 677c478bd9Sstevel@tonic-gate #include <rpc/xdr.h> 687c478bd9Sstevel@tonic-gate #include <rpc/auth.h> 697c478bd9Sstevel@tonic-gate #include <rpc/clnt.h> 707c478bd9Sstevel@tonic-gate #include <rpc/rpc_msg.h> 717c478bd9Sstevel@tonic-gate #include <rpc/clnt.h> 727c478bd9Sstevel@tonic-gate #include <rpc/svc.h> 737c478bd9Sstevel@tonic-gate #include <rpc/rpcsys.h> 747c478bd9Sstevel@tonic-gate #include <rpc/rpc_rdma.h> 757c478bd9Sstevel@tonic-gate 767c478bd9Sstevel@tonic-gate /* 777c478bd9Sstevel@tonic-gate * This is the loadable module wrapper. 787c478bd9Sstevel@tonic-gate */ 797c478bd9Sstevel@tonic-gate #include <sys/conf.h> 807c478bd9Sstevel@tonic-gate #include <sys/modctl.h> 817c478bd9Sstevel@tonic-gate #include <sys/syscall.h> 827c478bd9Sstevel@tonic-gate 837c478bd9Sstevel@tonic-gate extern struct streamtab rpcinfo; 847c478bd9Sstevel@tonic-gate 857c478bd9Sstevel@tonic-gate static struct fmodsw fsw = { 867c478bd9Sstevel@tonic-gate "rpcmod", 877c478bd9Sstevel@tonic-gate &rpcinfo, 887c478bd9Sstevel@tonic-gate D_NEW|D_MP, 897c478bd9Sstevel@tonic-gate }; 907c478bd9Sstevel@tonic-gate 917c478bd9Sstevel@tonic-gate /* 927c478bd9Sstevel@tonic-gate * Module linkage information for the kernel. 937c478bd9Sstevel@tonic-gate */ 947c478bd9Sstevel@tonic-gate 957c478bd9Sstevel@tonic-gate static struct modlstrmod modlstrmod = { 967c478bd9Sstevel@tonic-gate &mod_strmodops, "rpc interface str mod", &fsw 977c478bd9Sstevel@tonic-gate }; 987c478bd9Sstevel@tonic-gate 997c478bd9Sstevel@tonic-gate /* 1007c478bd9Sstevel@tonic-gate * For the RPC system call. 1017c478bd9Sstevel@tonic-gate */ 1027c478bd9Sstevel@tonic-gate static struct sysent rpcsysent = { 1037c478bd9Sstevel@tonic-gate 2, 1047c478bd9Sstevel@tonic-gate SE_32RVAL1 | SE_ARGC | SE_NOUNLOAD, 1057c478bd9Sstevel@tonic-gate rpcsys 1067c478bd9Sstevel@tonic-gate }; 1077c478bd9Sstevel@tonic-gate 1087c478bd9Sstevel@tonic-gate static struct modlsys modlsys = { 1097c478bd9Sstevel@tonic-gate &mod_syscallops, 1107c478bd9Sstevel@tonic-gate "RPC syscall", 1117c478bd9Sstevel@tonic-gate &rpcsysent 1127c478bd9Sstevel@tonic-gate }; 1137c478bd9Sstevel@tonic-gate 1147c478bd9Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 1157c478bd9Sstevel@tonic-gate static struct modlsys modlsys32 = { 1167c478bd9Sstevel@tonic-gate &mod_syscallops32, 1177c478bd9Sstevel@tonic-gate "32-bit RPC syscall", 1187c478bd9Sstevel@tonic-gate &rpcsysent 1197c478bd9Sstevel@tonic-gate }; 1207c478bd9Sstevel@tonic-gate #endif /* _SYSCALL32_IMPL */ 1217c478bd9Sstevel@tonic-gate 1227c478bd9Sstevel@tonic-gate static struct modlinkage modlinkage = { 1237c478bd9Sstevel@tonic-gate MODREV_1, 1247c478bd9Sstevel@tonic-gate { 1257c478bd9Sstevel@tonic-gate &modlsys, 1267c478bd9Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 1277c478bd9Sstevel@tonic-gate &modlsys32, 1287c478bd9Sstevel@tonic-gate #endif 1297c478bd9Sstevel@tonic-gate &modlstrmod, 1307c478bd9Sstevel@tonic-gate NULL 1317c478bd9Sstevel@tonic-gate } 1327c478bd9Sstevel@tonic-gate }; 1337c478bd9Sstevel@tonic-gate 1347c478bd9Sstevel@tonic-gate int 1357c478bd9Sstevel@tonic-gate _init(void) 1367c478bd9Sstevel@tonic-gate { 1377c478bd9Sstevel@tonic-gate int error = 0; 1387c478bd9Sstevel@tonic-gate callb_id_t cid; 1397c478bd9Sstevel@tonic-gate int status; 1407c478bd9Sstevel@tonic-gate 1417c478bd9Sstevel@tonic-gate svc_init(); 1427c478bd9Sstevel@tonic-gate clnt_init(); 1437c478bd9Sstevel@tonic-gate cid = callb_add(connmgr_cpr_reset, 0, CB_CL_CPR_RPC, "rpc"); 1447c478bd9Sstevel@tonic-gate 1457c478bd9Sstevel@tonic-gate if (error = mod_install(&modlinkage)) { 1467c478bd9Sstevel@tonic-gate /* 1477c478bd9Sstevel@tonic-gate * Could not install module, cleanup previous 1487c478bd9Sstevel@tonic-gate * initialization work. 1497c478bd9Sstevel@tonic-gate */ 1507c478bd9Sstevel@tonic-gate clnt_fini(); 1517c478bd9Sstevel@tonic-gate if (cid != NULL) 1527c478bd9Sstevel@tonic-gate (void) callb_delete(cid); 1537c478bd9Sstevel@tonic-gate 1547c478bd9Sstevel@tonic-gate return (error); 1557c478bd9Sstevel@tonic-gate } 1567c478bd9Sstevel@tonic-gate 1577c478bd9Sstevel@tonic-gate /* 1587c478bd9Sstevel@tonic-gate * Load up the RDMA plugins and initialize the stats. Even if the 1597c478bd9Sstevel@tonic-gate * plugins loadup fails, but rpcmod was successfully installed the 1607c478bd9Sstevel@tonic-gate * counters still get initialized. 1617c478bd9Sstevel@tonic-gate */ 1627c478bd9Sstevel@tonic-gate rw_init(&rdma_lock, NULL, RW_DEFAULT, NULL); 1637c478bd9Sstevel@tonic-gate mutex_init(&rdma_modload_lock, NULL, MUTEX_DEFAULT, NULL); 1647c478bd9Sstevel@tonic-gate mt_kstat_init(); 1657c478bd9Sstevel@tonic-gate 1667c478bd9Sstevel@tonic-gate /* 1677c478bd9Sstevel@tonic-gate * Get our identification into ldi. This is used for loading 1687c478bd9Sstevel@tonic-gate * other modules, e.g. rpcib. 1697c478bd9Sstevel@tonic-gate */ 1707c478bd9Sstevel@tonic-gate status = ldi_ident_from_mod(&modlinkage, &rpcmod_li); 1717c478bd9Sstevel@tonic-gate if (status != 0) { 1727c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "ldi_ident_from_mod fails with %d", status); 1737c478bd9Sstevel@tonic-gate rpcmod_li = NULL; 1747c478bd9Sstevel@tonic-gate } 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate return (error); 1777c478bd9Sstevel@tonic-gate } 1787c478bd9Sstevel@tonic-gate 1797c478bd9Sstevel@tonic-gate /* 1807c478bd9Sstevel@tonic-gate * The unload entry point fails, because we advertise entry points into 1817c478bd9Sstevel@tonic-gate * rpcmod from the rest of kRPC: rpcmod_release(). 1827c478bd9Sstevel@tonic-gate */ 1837c478bd9Sstevel@tonic-gate int 1847c478bd9Sstevel@tonic-gate _fini(void) 1857c478bd9Sstevel@tonic-gate { 1867c478bd9Sstevel@tonic-gate return (EBUSY); 1877c478bd9Sstevel@tonic-gate } 1887c478bd9Sstevel@tonic-gate 1897c478bd9Sstevel@tonic-gate int 1907c478bd9Sstevel@tonic-gate _info(struct modinfo *modinfop) 1917c478bd9Sstevel@tonic-gate { 1927c478bd9Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 1937c478bd9Sstevel@tonic-gate } 1947c478bd9Sstevel@tonic-gate 1957c478bd9Sstevel@tonic-gate extern int nulldev(); 1967c478bd9Sstevel@tonic-gate 1977c478bd9Sstevel@tonic-gate #define RPCMOD_ID 2049 1987c478bd9Sstevel@tonic-gate 1997c478bd9Sstevel@tonic-gate int rmm_open(), rmm_close(); 2007c478bd9Sstevel@tonic-gate 2017c478bd9Sstevel@tonic-gate /* 2027c478bd9Sstevel@tonic-gate * To save instructions, since STREAMS ignores the return value 2037c478bd9Sstevel@tonic-gate * from these functions, they are defined as void here. Kind of icky, but... 2047c478bd9Sstevel@tonic-gate */ 2057c478bd9Sstevel@tonic-gate void rmm_rput(queue_t *, mblk_t *); 2067c478bd9Sstevel@tonic-gate void rmm_wput(queue_t *, mblk_t *); 2077c478bd9Sstevel@tonic-gate void rmm_rsrv(queue_t *); 2087c478bd9Sstevel@tonic-gate void rmm_wsrv(queue_t *); 2097c478bd9Sstevel@tonic-gate 2107c478bd9Sstevel@tonic-gate int rpcmodopen(), rpcmodclose(); 2117c478bd9Sstevel@tonic-gate void rpcmodrput(), rpcmodwput(); 2127c478bd9Sstevel@tonic-gate void rpcmodrsrv(), rpcmodwsrv(); 2137c478bd9Sstevel@tonic-gate 2147c478bd9Sstevel@tonic-gate static void rpcmodwput_other(queue_t *, mblk_t *); 2157c478bd9Sstevel@tonic-gate static int mir_close(queue_t *q); 2167c478bd9Sstevel@tonic-gate static int mir_open(queue_t *q, dev_t *devp, int flag, int sflag, 2177c478bd9Sstevel@tonic-gate cred_t *credp); 2187c478bd9Sstevel@tonic-gate static void mir_rput(queue_t *q, mblk_t *mp); 2197c478bd9Sstevel@tonic-gate static void mir_rsrv(queue_t *q); 2207c478bd9Sstevel@tonic-gate static void mir_wput(queue_t *q, mblk_t *mp); 2217c478bd9Sstevel@tonic-gate static void mir_wsrv(queue_t *q); 2227c478bd9Sstevel@tonic-gate 2237c478bd9Sstevel@tonic-gate static struct module_info rpcmod_info = 2247c478bd9Sstevel@tonic-gate {RPCMOD_ID, "rpcmod", 0, INFPSZ, 256*1024, 1024}; 2257c478bd9Sstevel@tonic-gate 2267c478bd9Sstevel@tonic-gate /* 2277c478bd9Sstevel@tonic-gate * Read side has no service procedure. 2287c478bd9Sstevel@tonic-gate */ 2297c478bd9Sstevel@tonic-gate static struct qinit rpcmodrinit = { 2307c478bd9Sstevel@tonic-gate (int (*)())rmm_rput, 2317c478bd9Sstevel@tonic-gate (int (*)())rmm_rsrv, 2327c478bd9Sstevel@tonic-gate rmm_open, 2337c478bd9Sstevel@tonic-gate rmm_close, 2347c478bd9Sstevel@tonic-gate nulldev, 2357c478bd9Sstevel@tonic-gate &rpcmod_info, 2367c478bd9Sstevel@tonic-gate NULL 2377c478bd9Sstevel@tonic-gate }; 2387c478bd9Sstevel@tonic-gate 2397c478bd9Sstevel@tonic-gate /* 2407c478bd9Sstevel@tonic-gate * The write put procedure is simply putnext to conserve stack space. 2417c478bd9Sstevel@tonic-gate * The write service procedure is not used to queue data, but instead to 2427c478bd9Sstevel@tonic-gate * synchronize with flow control. 2437c478bd9Sstevel@tonic-gate */ 2447c478bd9Sstevel@tonic-gate static struct qinit rpcmodwinit = { 2457c478bd9Sstevel@tonic-gate (int (*)())rmm_wput, 2467c478bd9Sstevel@tonic-gate (int (*)())rmm_wsrv, 2477c478bd9Sstevel@tonic-gate rmm_open, 2487c478bd9Sstevel@tonic-gate rmm_close, 2497c478bd9Sstevel@tonic-gate nulldev, 2507c478bd9Sstevel@tonic-gate &rpcmod_info, 2517c478bd9Sstevel@tonic-gate NULL 2527c478bd9Sstevel@tonic-gate }; 2537c478bd9Sstevel@tonic-gate struct streamtab rpcinfo = { &rpcmodrinit, &rpcmodwinit, NULL, NULL }; 2547c478bd9Sstevel@tonic-gate 2557c478bd9Sstevel@tonic-gate struct xprt_style_ops { 2567c478bd9Sstevel@tonic-gate int (*xo_open)(); 2577c478bd9Sstevel@tonic-gate int (*xo_close)(); 2587c478bd9Sstevel@tonic-gate void (*xo_wput)(); 2597c478bd9Sstevel@tonic-gate void (*xo_wsrv)(); 2607c478bd9Sstevel@tonic-gate void (*xo_rput)(); 2617c478bd9Sstevel@tonic-gate void (*xo_rsrv)(); 2627c478bd9Sstevel@tonic-gate }; 2637c478bd9Sstevel@tonic-gate 2647c478bd9Sstevel@tonic-gate static struct xprt_style_ops xprt_clts_ops = { 2657c478bd9Sstevel@tonic-gate rpcmodopen, 2667c478bd9Sstevel@tonic-gate rpcmodclose, 2677c478bd9Sstevel@tonic-gate rpcmodwput, 2687c478bd9Sstevel@tonic-gate rpcmodwsrv, 2697c478bd9Sstevel@tonic-gate rpcmodrput, 2707c478bd9Sstevel@tonic-gate NULL 2717c478bd9Sstevel@tonic-gate }; 2727c478bd9Sstevel@tonic-gate 2737c478bd9Sstevel@tonic-gate static struct xprt_style_ops xprt_cots_ops = { 2747c478bd9Sstevel@tonic-gate mir_open, 2757c478bd9Sstevel@tonic-gate mir_close, 2767c478bd9Sstevel@tonic-gate mir_wput, 2777c478bd9Sstevel@tonic-gate mir_wsrv, 2787c478bd9Sstevel@tonic-gate mir_rput, 2797c478bd9Sstevel@tonic-gate mir_rsrv 2807c478bd9Sstevel@tonic-gate }; 2817c478bd9Sstevel@tonic-gate 2827c478bd9Sstevel@tonic-gate /* 2837c478bd9Sstevel@tonic-gate * Per rpcmod "slot" data structure. q->q_ptr points to one of these. 2847c478bd9Sstevel@tonic-gate */ 2857c478bd9Sstevel@tonic-gate struct rpcm { 2867c478bd9Sstevel@tonic-gate void *rm_krpc_cell; /* Reserved for use by KRPC */ 2877c478bd9Sstevel@tonic-gate struct xprt_style_ops *rm_ops; 2887c478bd9Sstevel@tonic-gate int rm_type; /* Client or server side stream */ 2897c478bd9Sstevel@tonic-gate #define RM_CLOSING 0x1 /* somebody is trying to close slot */ 2907c478bd9Sstevel@tonic-gate uint_t rm_state; /* state of the slot. see above */ 2917c478bd9Sstevel@tonic-gate uint_t rm_ref; /* cnt of external references to slot */ 2927c478bd9Sstevel@tonic-gate kmutex_t rm_lock; /* mutex protecting above fields */ 2937c478bd9Sstevel@tonic-gate kcondvar_t rm_cwait; /* condition for closing */ 2947c478bd9Sstevel@tonic-gate zoneid_t rm_zoneid; /* zone which pushed rpcmod */ 2957c478bd9Sstevel@tonic-gate }; 2967c478bd9Sstevel@tonic-gate 2977c478bd9Sstevel@tonic-gate struct temp_slot { 2987c478bd9Sstevel@tonic-gate void *cell; 2997c478bd9Sstevel@tonic-gate struct xprt_style_ops *ops; 3007c478bd9Sstevel@tonic-gate int type; 3017c478bd9Sstevel@tonic-gate mblk_t *info_ack; 3027c478bd9Sstevel@tonic-gate kmutex_t lock; 3037c478bd9Sstevel@tonic-gate kcondvar_t wait; 3047c478bd9Sstevel@tonic-gate }; 3057c478bd9Sstevel@tonic-gate 3067c478bd9Sstevel@tonic-gate void tmp_rput(queue_t *q, mblk_t *mp); 3077c478bd9Sstevel@tonic-gate 3087c478bd9Sstevel@tonic-gate struct xprt_style_ops tmpops = { 3097c478bd9Sstevel@tonic-gate NULL, 3107c478bd9Sstevel@tonic-gate NULL, 3117c478bd9Sstevel@tonic-gate putnext, 3127c478bd9Sstevel@tonic-gate NULL, 3137c478bd9Sstevel@tonic-gate tmp_rput, 3147c478bd9Sstevel@tonic-gate NULL 3157c478bd9Sstevel@tonic-gate }; 3167c478bd9Sstevel@tonic-gate 3177c478bd9Sstevel@tonic-gate void 3187c478bd9Sstevel@tonic-gate tmp_rput(queue_t *q, mblk_t *mp) 3197c478bd9Sstevel@tonic-gate { 3207c478bd9Sstevel@tonic-gate struct temp_slot *t = (struct temp_slot *)(q->q_ptr); 3217c478bd9Sstevel@tonic-gate struct T_info_ack *pptr; 3227c478bd9Sstevel@tonic-gate 3237c478bd9Sstevel@tonic-gate switch (mp->b_datap->db_type) { 3247c478bd9Sstevel@tonic-gate case M_PCPROTO: 3257c478bd9Sstevel@tonic-gate pptr = (struct T_info_ack *)mp->b_rptr; 3267c478bd9Sstevel@tonic-gate switch (pptr->PRIM_type) { 3277c478bd9Sstevel@tonic-gate case T_INFO_ACK: 3287c478bd9Sstevel@tonic-gate mutex_enter(&t->lock); 3297c478bd9Sstevel@tonic-gate t->info_ack = mp; 3307c478bd9Sstevel@tonic-gate cv_signal(&t->wait); 3317c478bd9Sstevel@tonic-gate mutex_exit(&t->lock); 3327c478bd9Sstevel@tonic-gate return; 3337c478bd9Sstevel@tonic-gate default: 3347c478bd9Sstevel@tonic-gate break; 3357c478bd9Sstevel@tonic-gate } 3367c478bd9Sstevel@tonic-gate default: 3377c478bd9Sstevel@tonic-gate break; 3387c478bd9Sstevel@tonic-gate } 3397c478bd9Sstevel@tonic-gate 3407c478bd9Sstevel@tonic-gate /* 3417c478bd9Sstevel@tonic-gate * Not an info-ack, so free it. This is ok because we should 3427c478bd9Sstevel@tonic-gate * not be receiving data until the open finishes: rpcmod 3437c478bd9Sstevel@tonic-gate * is pushed well before the end-point is bound to an address. 3447c478bd9Sstevel@tonic-gate */ 3457c478bd9Sstevel@tonic-gate freemsg(mp); 3467c478bd9Sstevel@tonic-gate } 3477c478bd9Sstevel@tonic-gate 3487c478bd9Sstevel@tonic-gate int 3497c478bd9Sstevel@tonic-gate rmm_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *crp) 3507c478bd9Sstevel@tonic-gate { 3517c478bd9Sstevel@tonic-gate mblk_t *bp; 3527c478bd9Sstevel@tonic-gate struct temp_slot ts, *t; 3537c478bd9Sstevel@tonic-gate struct T_info_ack *pptr; 3547c478bd9Sstevel@tonic-gate int error = 0; 3557c478bd9Sstevel@tonic-gate int procson = 0; 3567c478bd9Sstevel@tonic-gate 3577c478bd9Sstevel@tonic-gate ASSERT(q != NULL); 3587c478bd9Sstevel@tonic-gate /* 3597c478bd9Sstevel@tonic-gate * Check for re-opens. 3607c478bd9Sstevel@tonic-gate */ 3617c478bd9Sstevel@tonic-gate if (q->q_ptr) { 3627c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_RPCMODOPEN_END, 3637c478bd9Sstevel@tonic-gate "rpcmodopen_end:(%s)", "q->qptr"); 3647c478bd9Sstevel@tonic-gate return (0); 3657c478bd9Sstevel@tonic-gate } 3667c478bd9Sstevel@tonic-gate 3677c478bd9Sstevel@tonic-gate t = &ts; 3687c478bd9Sstevel@tonic-gate bzero(t, sizeof (*t)); 3697c478bd9Sstevel@tonic-gate q->q_ptr = (void *)t; 3707c478bd9Sstevel@tonic-gate /* WR(q)->q_ptr = (void *)t; */ 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate /* 3737c478bd9Sstevel@tonic-gate * Allocate the required messages upfront. 3747c478bd9Sstevel@tonic-gate */ 3757c478bd9Sstevel@tonic-gate if ((bp = allocb(sizeof (struct T_info_req) + 3767c478bd9Sstevel@tonic-gate sizeof (struct T_info_ack), BPRI_LO)) == (mblk_t *)NULL) { 3777c478bd9Sstevel@tonic-gate return (ENOBUFS); 3787c478bd9Sstevel@tonic-gate } 3797c478bd9Sstevel@tonic-gate 3807c478bd9Sstevel@tonic-gate mutex_init(&t->lock, NULL, MUTEX_DEFAULT, NULL); 3817c478bd9Sstevel@tonic-gate cv_init(&t->wait, NULL, CV_DEFAULT, NULL); 3827c478bd9Sstevel@tonic-gate 3837c478bd9Sstevel@tonic-gate t->ops = &tmpops; 3847c478bd9Sstevel@tonic-gate 3857c478bd9Sstevel@tonic-gate qprocson(q); 3867c478bd9Sstevel@tonic-gate procson = 1; 3877c478bd9Sstevel@tonic-gate bp->b_datap->db_type = M_PCPROTO; 3887c478bd9Sstevel@tonic-gate *(int32_t *)bp->b_wptr = (int32_t)T_INFO_REQ; 3897c478bd9Sstevel@tonic-gate bp->b_wptr += sizeof (struct T_info_req); 3907c478bd9Sstevel@tonic-gate putnext(WR(q), bp); 3917c478bd9Sstevel@tonic-gate 3927c478bd9Sstevel@tonic-gate mutex_enter(&t->lock); 3937c478bd9Sstevel@tonic-gate while ((bp = t->info_ack) == NULL) { 3947c478bd9Sstevel@tonic-gate if (cv_wait_sig(&t->wait, &t->lock) == 0) { 3957c478bd9Sstevel@tonic-gate error = EINTR; 3967c478bd9Sstevel@tonic-gate break; 3977c478bd9Sstevel@tonic-gate } 3987c478bd9Sstevel@tonic-gate } 3997c478bd9Sstevel@tonic-gate mutex_exit(&t->lock); 4007c478bd9Sstevel@tonic-gate mutex_destroy(&t->lock); 4017c478bd9Sstevel@tonic-gate cv_destroy(&t->wait); 4027c478bd9Sstevel@tonic-gate if (error) 4037c478bd9Sstevel@tonic-gate goto out; 4047c478bd9Sstevel@tonic-gate 4057c478bd9Sstevel@tonic-gate pptr = (struct T_info_ack *)t->info_ack->b_rptr; 4067c478bd9Sstevel@tonic-gate 4077c478bd9Sstevel@tonic-gate if (pptr->SERV_type == T_CLTS) { 4087c478bd9Sstevel@tonic-gate error = rpcmodopen(q, devp, flag, sflag, crp); 4097c478bd9Sstevel@tonic-gate if (error == 0) { 4107c478bd9Sstevel@tonic-gate t = (struct temp_slot *)q->q_ptr; 4117c478bd9Sstevel@tonic-gate t->ops = &xprt_clts_ops; 4127c478bd9Sstevel@tonic-gate } 4137c478bd9Sstevel@tonic-gate } else { 4147c478bd9Sstevel@tonic-gate error = mir_open(q, devp, flag, sflag, crp); 4157c478bd9Sstevel@tonic-gate if (error == 0) { 4167c478bd9Sstevel@tonic-gate t = (struct temp_slot *)q->q_ptr; 4177c478bd9Sstevel@tonic-gate t->ops = &xprt_cots_ops; 4187c478bd9Sstevel@tonic-gate } 4197c478bd9Sstevel@tonic-gate } 4207c478bd9Sstevel@tonic-gate 4217c478bd9Sstevel@tonic-gate out: 4227c478bd9Sstevel@tonic-gate freemsg(bp); 4237c478bd9Sstevel@tonic-gate 4247c478bd9Sstevel@tonic-gate if (error && procson) 4257c478bd9Sstevel@tonic-gate qprocsoff(q); 4267c478bd9Sstevel@tonic-gate 4277c478bd9Sstevel@tonic-gate return (error); 4287c478bd9Sstevel@tonic-gate } 4297c478bd9Sstevel@tonic-gate 4307c478bd9Sstevel@tonic-gate void 4317c478bd9Sstevel@tonic-gate rmm_rput(queue_t *q, mblk_t *mp) 4327c478bd9Sstevel@tonic-gate { 4337c478bd9Sstevel@tonic-gate (*((struct temp_slot *)q->q_ptr)->ops->xo_rput)(q, mp); 4347c478bd9Sstevel@tonic-gate } 4357c478bd9Sstevel@tonic-gate 4367c478bd9Sstevel@tonic-gate void 4377c478bd9Sstevel@tonic-gate rmm_rsrv(queue_t *q) 4387c478bd9Sstevel@tonic-gate { 4397c478bd9Sstevel@tonic-gate (*((struct temp_slot *)q->q_ptr)->ops->xo_rsrv)(q); 4407c478bd9Sstevel@tonic-gate } 4417c478bd9Sstevel@tonic-gate 4427c478bd9Sstevel@tonic-gate void 4437c478bd9Sstevel@tonic-gate rmm_wput(queue_t *q, mblk_t *mp) 4447c478bd9Sstevel@tonic-gate { 4457c478bd9Sstevel@tonic-gate (*((struct temp_slot *)q->q_ptr)->ops->xo_wput)(q, mp); 4467c478bd9Sstevel@tonic-gate } 4477c478bd9Sstevel@tonic-gate 4487c478bd9Sstevel@tonic-gate void 4497c478bd9Sstevel@tonic-gate rmm_wsrv(queue_t *q) 4507c478bd9Sstevel@tonic-gate { 4517c478bd9Sstevel@tonic-gate (*((struct temp_slot *)q->q_ptr)->ops->xo_wsrv)(q); 4527c478bd9Sstevel@tonic-gate } 4537c478bd9Sstevel@tonic-gate 4547c478bd9Sstevel@tonic-gate int 4557c478bd9Sstevel@tonic-gate rmm_close(queue_t *q, int flag, cred_t *crp) 4567c478bd9Sstevel@tonic-gate { 4577c478bd9Sstevel@tonic-gate return ((*((struct temp_slot *)q->q_ptr)->ops->xo_close)(q, flag, crp)); 4587c478bd9Sstevel@tonic-gate } 4597c478bd9Sstevel@tonic-gate 4607c478bd9Sstevel@tonic-gate /* 4617c478bd9Sstevel@tonic-gate * rpcmodopen - open routine gets called when the module gets pushed 4627c478bd9Sstevel@tonic-gate * onto the stream. 4637c478bd9Sstevel@tonic-gate */ 4647c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 4657c478bd9Sstevel@tonic-gate int 4667c478bd9Sstevel@tonic-gate rpcmodopen(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *crp) 4677c478bd9Sstevel@tonic-gate { 4687c478bd9Sstevel@tonic-gate struct rpcm *rmp; 4697c478bd9Sstevel@tonic-gate 4707c478bd9Sstevel@tonic-gate extern void (*rpc_rele)(queue_t *, mblk_t *); 4717c478bd9Sstevel@tonic-gate static void rpcmod_release(queue_t *, mblk_t *); 4727c478bd9Sstevel@tonic-gate 4737c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_RPCMODOPEN_START, "rpcmodopen_start:"); 4747c478bd9Sstevel@tonic-gate 4757c478bd9Sstevel@tonic-gate /* 4767c478bd9Sstevel@tonic-gate * Initialize entry points to release a rpcmod slot (and an input 4777c478bd9Sstevel@tonic-gate * message if supplied) and to send an output message to the module 4787c478bd9Sstevel@tonic-gate * below rpcmod. 4797c478bd9Sstevel@tonic-gate */ 4807c478bd9Sstevel@tonic-gate if (rpc_rele == NULL) 4817c478bd9Sstevel@tonic-gate rpc_rele = rpcmod_release; 4827c478bd9Sstevel@tonic-gate 4837c478bd9Sstevel@tonic-gate /* 4847c478bd9Sstevel@tonic-gate * Only sufficiently privileged users can use this module, and it 4857c478bd9Sstevel@tonic-gate * is assumed that they will use this module properly, and NOT send 4867c478bd9Sstevel@tonic-gate * bulk data from downstream. 4877c478bd9Sstevel@tonic-gate */ 4887c478bd9Sstevel@tonic-gate if (secpolicy_rpcmod_open(crp) != 0) 4897c478bd9Sstevel@tonic-gate return (EPERM); 4907c478bd9Sstevel@tonic-gate 4917c478bd9Sstevel@tonic-gate /* 4927c478bd9Sstevel@tonic-gate * Allocate slot data structure. 4937c478bd9Sstevel@tonic-gate */ 4947c478bd9Sstevel@tonic-gate rmp = kmem_zalloc(sizeof (*rmp), KM_SLEEP); 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate mutex_init(&rmp->rm_lock, NULL, MUTEX_DEFAULT, NULL); 4977c478bd9Sstevel@tonic-gate cv_init(&rmp->rm_cwait, NULL, CV_DEFAULT, NULL); 498108322fbScarlsonj rmp->rm_zoneid = rpc_zoneid(); 4997c478bd9Sstevel@tonic-gate /* 5007c478bd9Sstevel@tonic-gate * slot type will be set by kRPC client and server ioctl's 5017c478bd9Sstevel@tonic-gate */ 5027c478bd9Sstevel@tonic-gate rmp->rm_type = 0; 5037c478bd9Sstevel@tonic-gate 5047c478bd9Sstevel@tonic-gate q->q_ptr = (void *)rmp; 5057c478bd9Sstevel@tonic-gate WR(q)->q_ptr = (void *)rmp; 5067c478bd9Sstevel@tonic-gate 5077c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_RPCMODOPEN_END, "rpcmodopen_end:(%s)", "end"); 5087c478bd9Sstevel@tonic-gate return (0); 5097c478bd9Sstevel@tonic-gate } 5107c478bd9Sstevel@tonic-gate 5117c478bd9Sstevel@tonic-gate /* 5127c478bd9Sstevel@tonic-gate * rpcmodclose - This routine gets called when the module gets popped 5137c478bd9Sstevel@tonic-gate * off of the stream. 5147c478bd9Sstevel@tonic-gate */ 5157c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 5167c478bd9Sstevel@tonic-gate int 5177c478bd9Sstevel@tonic-gate rpcmodclose(queue_t *q, int flag, cred_t *crp) 5187c478bd9Sstevel@tonic-gate { 5197c478bd9Sstevel@tonic-gate struct rpcm *rmp; 5207c478bd9Sstevel@tonic-gate 5217c478bd9Sstevel@tonic-gate ASSERT(q != NULL); 5227c478bd9Sstevel@tonic-gate rmp = (struct rpcm *)q->q_ptr; 5237c478bd9Sstevel@tonic-gate 5247c478bd9Sstevel@tonic-gate /* 5257c478bd9Sstevel@tonic-gate * Mark our state as closing. 5267c478bd9Sstevel@tonic-gate */ 5277c478bd9Sstevel@tonic-gate mutex_enter(&rmp->rm_lock); 5287c478bd9Sstevel@tonic-gate rmp->rm_state |= RM_CLOSING; 5297c478bd9Sstevel@tonic-gate 5307c478bd9Sstevel@tonic-gate /* 5317c478bd9Sstevel@tonic-gate * Check and see if there are any messages on the queue. If so, send 5327c478bd9Sstevel@tonic-gate * the messages, regardless whether the downstream module is ready to 5337c478bd9Sstevel@tonic-gate * accept data. 5347c478bd9Sstevel@tonic-gate */ 5357c478bd9Sstevel@tonic-gate if (rmp->rm_type == RPC_SERVER) { 5367c478bd9Sstevel@tonic-gate flushq(q, FLUSHDATA); 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate qenable(WR(q)); 5397c478bd9Sstevel@tonic-gate 5407c478bd9Sstevel@tonic-gate if (rmp->rm_ref) { 5417c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 5427c478bd9Sstevel@tonic-gate /* 5437c478bd9Sstevel@tonic-gate * call into SVC to clean the queue 5447c478bd9Sstevel@tonic-gate */ 5457c478bd9Sstevel@tonic-gate svc_queueclean(q); 5467c478bd9Sstevel@tonic-gate mutex_enter(&rmp->rm_lock); 5477c478bd9Sstevel@tonic-gate 5487c478bd9Sstevel@tonic-gate /* 5497c478bd9Sstevel@tonic-gate * Block while there are kRPC threads with a reference 5507c478bd9Sstevel@tonic-gate * to this message. 5517c478bd9Sstevel@tonic-gate */ 5527c478bd9Sstevel@tonic-gate while (rmp->rm_ref) 5537c478bd9Sstevel@tonic-gate cv_wait(&rmp->rm_cwait, &rmp->rm_lock); 5547c478bd9Sstevel@tonic-gate } 5557c478bd9Sstevel@tonic-gate 5567c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 5577c478bd9Sstevel@tonic-gate 5587c478bd9Sstevel@tonic-gate /* 5597c478bd9Sstevel@tonic-gate * It is now safe to remove this queue from the stream. No kRPC 5607c478bd9Sstevel@tonic-gate * threads have a reference to the stream, and none ever will, 5617c478bd9Sstevel@tonic-gate * because RM_CLOSING is set. 5627c478bd9Sstevel@tonic-gate */ 5637c478bd9Sstevel@tonic-gate qprocsoff(q); 5647c478bd9Sstevel@tonic-gate 5657c478bd9Sstevel@tonic-gate /* Notify kRPC that this stream is going away. */ 5667c478bd9Sstevel@tonic-gate svc_queueclose(q); 5677c478bd9Sstevel@tonic-gate } else { 5687c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 5697c478bd9Sstevel@tonic-gate qprocsoff(q); 5707c478bd9Sstevel@tonic-gate } 5717c478bd9Sstevel@tonic-gate 5727c478bd9Sstevel@tonic-gate q->q_ptr = NULL; 5737c478bd9Sstevel@tonic-gate WR(q)->q_ptr = NULL; 5747c478bd9Sstevel@tonic-gate mutex_destroy(&rmp->rm_lock); 5757c478bd9Sstevel@tonic-gate cv_destroy(&rmp->rm_cwait); 5767c478bd9Sstevel@tonic-gate kmem_free(rmp, sizeof (*rmp)); 5777c478bd9Sstevel@tonic-gate return (0); 5787c478bd9Sstevel@tonic-gate } 5797c478bd9Sstevel@tonic-gate 5807c478bd9Sstevel@tonic-gate #ifdef DEBUG 5817c478bd9Sstevel@tonic-gate int rpcmod_send_msg_up = 0; 5827c478bd9Sstevel@tonic-gate int rpcmod_send_uderr = 0; 5837c478bd9Sstevel@tonic-gate int rpcmod_send_dup = 0; 5847c478bd9Sstevel@tonic-gate int rpcmod_send_dup_cnt = 0; 5857c478bd9Sstevel@tonic-gate #endif 5867c478bd9Sstevel@tonic-gate 5877c478bd9Sstevel@tonic-gate /* 5887c478bd9Sstevel@tonic-gate * rpcmodrput - Module read put procedure. This is called from 5897c478bd9Sstevel@tonic-gate * the module, driver, or stream head downstream. 5907c478bd9Sstevel@tonic-gate */ 5917c478bd9Sstevel@tonic-gate void 5927c478bd9Sstevel@tonic-gate rpcmodrput(queue_t *q, mblk_t *mp) 5937c478bd9Sstevel@tonic-gate { 5947c478bd9Sstevel@tonic-gate struct rpcm *rmp; 5957c478bd9Sstevel@tonic-gate union T_primitives *pptr; 5967c478bd9Sstevel@tonic-gate int hdrsz; 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_RPCMODRPUT_START, "rpcmodrput_start:"); 5997c478bd9Sstevel@tonic-gate 6007c478bd9Sstevel@tonic-gate ASSERT(q != NULL); 6017c478bd9Sstevel@tonic-gate rmp = (struct rpcm *)q->q_ptr; 6027c478bd9Sstevel@tonic-gate 6037c478bd9Sstevel@tonic-gate if (rmp->rm_type == 0) { 6047c478bd9Sstevel@tonic-gate freemsg(mp); 6057c478bd9Sstevel@tonic-gate return; 6067c478bd9Sstevel@tonic-gate } 6077c478bd9Sstevel@tonic-gate 6087c478bd9Sstevel@tonic-gate #ifdef DEBUG 6097c478bd9Sstevel@tonic-gate if (rpcmod_send_msg_up > 0) { 6107c478bd9Sstevel@tonic-gate mblk_t *nmp = copymsg(mp); 6117c478bd9Sstevel@tonic-gate if (nmp) { 6127c478bd9Sstevel@tonic-gate putnext(q, nmp); 6137c478bd9Sstevel@tonic-gate rpcmod_send_msg_up--; 6147c478bd9Sstevel@tonic-gate } 6157c478bd9Sstevel@tonic-gate } 6167c478bd9Sstevel@tonic-gate if ((rpcmod_send_uderr > 0) && mp->b_datap->db_type == M_PROTO) { 6177c478bd9Sstevel@tonic-gate mblk_t *nmp; 6187c478bd9Sstevel@tonic-gate struct T_unitdata_ind *data; 6197c478bd9Sstevel@tonic-gate struct T_uderror_ind *ud; 6207c478bd9Sstevel@tonic-gate int d; 6217c478bd9Sstevel@tonic-gate data = (struct T_unitdata_ind *)mp->b_rptr; 6227c478bd9Sstevel@tonic-gate if (data->PRIM_type == T_UNITDATA_IND) { 6237c478bd9Sstevel@tonic-gate d = sizeof (*ud) - sizeof (*data); 6247c478bd9Sstevel@tonic-gate nmp = allocb(mp->b_wptr - mp->b_rptr + d, BPRI_HI); 6257c478bd9Sstevel@tonic-gate if (nmp) { 6267c478bd9Sstevel@tonic-gate ud = (struct T_uderror_ind *)nmp->b_rptr; 6277c478bd9Sstevel@tonic-gate ud->PRIM_type = T_UDERROR_IND; 6287c478bd9Sstevel@tonic-gate ud->DEST_length = data->SRC_length; 6297c478bd9Sstevel@tonic-gate ud->DEST_offset = data->SRC_offset + d; 6307c478bd9Sstevel@tonic-gate ud->OPT_length = data->OPT_length; 6317c478bd9Sstevel@tonic-gate ud->OPT_offset = data->OPT_offset + d; 6327c478bd9Sstevel@tonic-gate ud->ERROR_type = ENETDOWN; 6337c478bd9Sstevel@tonic-gate if (data->SRC_length) { 6347c478bd9Sstevel@tonic-gate bcopy(mp->b_rptr + 6357c478bd9Sstevel@tonic-gate data->SRC_offset, 6367c478bd9Sstevel@tonic-gate nmp->b_rptr + 6377c478bd9Sstevel@tonic-gate ud->DEST_offset, 6387c478bd9Sstevel@tonic-gate data->SRC_length); 6397c478bd9Sstevel@tonic-gate } 6407c478bd9Sstevel@tonic-gate if (data->OPT_length) { 6417c478bd9Sstevel@tonic-gate bcopy(mp->b_rptr + 6427c478bd9Sstevel@tonic-gate data->OPT_offset, 6437c478bd9Sstevel@tonic-gate nmp->b_rptr + 6447c478bd9Sstevel@tonic-gate ud->OPT_offset, 6457c478bd9Sstevel@tonic-gate data->OPT_length); 6467c478bd9Sstevel@tonic-gate } 6477c478bd9Sstevel@tonic-gate nmp->b_wptr += d; 6487c478bd9Sstevel@tonic-gate nmp->b_wptr += (mp->b_wptr - mp->b_rptr); 6497c478bd9Sstevel@tonic-gate nmp->b_datap->db_type = M_PROTO; 6507c478bd9Sstevel@tonic-gate putnext(q, nmp); 6517c478bd9Sstevel@tonic-gate rpcmod_send_uderr--; 6527c478bd9Sstevel@tonic-gate } 6537c478bd9Sstevel@tonic-gate } 6547c478bd9Sstevel@tonic-gate } 6557c478bd9Sstevel@tonic-gate #endif 6567c478bd9Sstevel@tonic-gate switch (mp->b_datap->db_type) { 6577c478bd9Sstevel@tonic-gate default: 6587c478bd9Sstevel@tonic-gate putnext(q, mp); 6597c478bd9Sstevel@tonic-gate break; 6607c478bd9Sstevel@tonic-gate 6617c478bd9Sstevel@tonic-gate case M_PROTO: 6627c478bd9Sstevel@tonic-gate case M_PCPROTO: 6637c478bd9Sstevel@tonic-gate ASSERT((mp->b_wptr - mp->b_rptr) >= sizeof (int32_t)); 6647c478bd9Sstevel@tonic-gate pptr = (union T_primitives *)mp->b_rptr; 6657c478bd9Sstevel@tonic-gate 6667c478bd9Sstevel@tonic-gate /* 6677c478bd9Sstevel@tonic-gate * Forward this message to krpc if it is data. 6687c478bd9Sstevel@tonic-gate */ 6697c478bd9Sstevel@tonic-gate if (pptr->type == T_UNITDATA_IND) { 6707c478bd9Sstevel@tonic-gate mblk_t *nmp; 6717c478bd9Sstevel@tonic-gate 6727c478bd9Sstevel@tonic-gate /* 6737c478bd9Sstevel@tonic-gate * Check if the module is being popped. 6747c478bd9Sstevel@tonic-gate */ 6757c478bd9Sstevel@tonic-gate mutex_enter(&rmp->rm_lock); 6767c478bd9Sstevel@tonic-gate if (rmp->rm_state & RM_CLOSING) { 6777c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 6787c478bd9Sstevel@tonic-gate putnext(q, mp); 6797c478bd9Sstevel@tonic-gate break; 6807c478bd9Sstevel@tonic-gate } 6817c478bd9Sstevel@tonic-gate 6827c478bd9Sstevel@tonic-gate switch (rmp->rm_type) { 6837c478bd9Sstevel@tonic-gate case RPC_CLIENT: 6847c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 6857c478bd9Sstevel@tonic-gate hdrsz = mp->b_wptr - mp->b_rptr; 6867c478bd9Sstevel@tonic-gate 6877c478bd9Sstevel@tonic-gate /* 6887c478bd9Sstevel@tonic-gate * Make sure the header is sane. 6897c478bd9Sstevel@tonic-gate */ 6907c478bd9Sstevel@tonic-gate if (hdrsz < TUNITDATAINDSZ || 6917c478bd9Sstevel@tonic-gate hdrsz < (pptr->unitdata_ind.OPT_length + 6927c478bd9Sstevel@tonic-gate pptr->unitdata_ind.OPT_offset) || 6937c478bd9Sstevel@tonic-gate hdrsz < (pptr->unitdata_ind.SRC_length + 6947c478bd9Sstevel@tonic-gate pptr->unitdata_ind.SRC_offset)) { 6957c478bd9Sstevel@tonic-gate freemsg(mp); 6967c478bd9Sstevel@tonic-gate return; 6977c478bd9Sstevel@tonic-gate } 6987c478bd9Sstevel@tonic-gate 6997c478bd9Sstevel@tonic-gate /* 7007c478bd9Sstevel@tonic-gate * Call clnt_clts_dispatch_notify, so that it can 7017c478bd9Sstevel@tonic-gate * pass the message to the proper caller. Don't 7027c478bd9Sstevel@tonic-gate * discard the header just yet since the client may 7037c478bd9Sstevel@tonic-gate * need the sender's address. 7047c478bd9Sstevel@tonic-gate */ 7057c478bd9Sstevel@tonic-gate clnt_clts_dispatch_notify(mp, hdrsz, rmp->rm_zoneid); 7067c478bd9Sstevel@tonic-gate return; 7077c478bd9Sstevel@tonic-gate case RPC_SERVER: 7087c478bd9Sstevel@tonic-gate /* 7097c478bd9Sstevel@tonic-gate * rm_krpc_cell is exclusively used by the kRPC 7107c478bd9Sstevel@tonic-gate * CLTS server 7117c478bd9Sstevel@tonic-gate */ 7127c478bd9Sstevel@tonic-gate if (rmp->rm_krpc_cell) { 7137c478bd9Sstevel@tonic-gate #ifdef DEBUG 7147c478bd9Sstevel@tonic-gate /* 7157c478bd9Sstevel@tonic-gate * Test duplicate request cache and 7167c478bd9Sstevel@tonic-gate * rm_ref count handling by sending a 7177c478bd9Sstevel@tonic-gate * duplicate every so often, if 7187c478bd9Sstevel@tonic-gate * desired. 7197c478bd9Sstevel@tonic-gate */ 7207c478bd9Sstevel@tonic-gate if (rpcmod_send_dup && 7217c478bd9Sstevel@tonic-gate rpcmod_send_dup_cnt++ % 7227c478bd9Sstevel@tonic-gate rpcmod_send_dup) 7237c478bd9Sstevel@tonic-gate nmp = copymsg(mp); 7247c478bd9Sstevel@tonic-gate else 7257c478bd9Sstevel@tonic-gate nmp = NULL; 7267c478bd9Sstevel@tonic-gate #endif 7277c478bd9Sstevel@tonic-gate /* 7287c478bd9Sstevel@tonic-gate * Raise the reference count on this 7297c478bd9Sstevel@tonic-gate * module to prevent it from being 7307c478bd9Sstevel@tonic-gate * popped before krpc generates the 7317c478bd9Sstevel@tonic-gate * reply. 7327c478bd9Sstevel@tonic-gate */ 7337c478bd9Sstevel@tonic-gate rmp->rm_ref++; 7347c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 7357c478bd9Sstevel@tonic-gate 7367c478bd9Sstevel@tonic-gate /* 7377c478bd9Sstevel@tonic-gate * Submit the message to krpc. 7387c478bd9Sstevel@tonic-gate */ 7397c478bd9Sstevel@tonic-gate svc_queuereq(q, mp); 7407c478bd9Sstevel@tonic-gate #ifdef DEBUG 7417c478bd9Sstevel@tonic-gate /* 7427c478bd9Sstevel@tonic-gate * Send duplicate if we created one. 7437c478bd9Sstevel@tonic-gate */ 7447c478bd9Sstevel@tonic-gate if (nmp) { 7457c478bd9Sstevel@tonic-gate mutex_enter(&rmp->rm_lock); 7467c478bd9Sstevel@tonic-gate rmp->rm_ref++; 7477c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 7487c478bd9Sstevel@tonic-gate svc_queuereq(q, nmp); 7497c478bd9Sstevel@tonic-gate } 7507c478bd9Sstevel@tonic-gate #endif 7517c478bd9Sstevel@tonic-gate } else { 7527c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 7537c478bd9Sstevel@tonic-gate freemsg(mp); 7547c478bd9Sstevel@tonic-gate } 7557c478bd9Sstevel@tonic-gate return; 7567c478bd9Sstevel@tonic-gate default: 7577c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 7587c478bd9Sstevel@tonic-gate freemsg(mp); 7597c478bd9Sstevel@tonic-gate return; 7607c478bd9Sstevel@tonic-gate } /* end switch(rmp->rm_type) */ 7617c478bd9Sstevel@tonic-gate } else if (pptr->type == T_UDERROR_IND) { 7627c478bd9Sstevel@tonic-gate mutex_enter(&rmp->rm_lock); 7637c478bd9Sstevel@tonic-gate hdrsz = mp->b_wptr - mp->b_rptr; 7647c478bd9Sstevel@tonic-gate 7657c478bd9Sstevel@tonic-gate /* 7667c478bd9Sstevel@tonic-gate * Make sure the header is sane 7677c478bd9Sstevel@tonic-gate */ 7687c478bd9Sstevel@tonic-gate if (hdrsz < TUDERRORINDSZ || 7697c478bd9Sstevel@tonic-gate hdrsz < (pptr->uderror_ind.OPT_length + 7707c478bd9Sstevel@tonic-gate pptr->uderror_ind.OPT_offset) || 7717c478bd9Sstevel@tonic-gate hdrsz < (pptr->uderror_ind.DEST_length + 7727c478bd9Sstevel@tonic-gate pptr->uderror_ind.DEST_offset)) { 7737c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 7747c478bd9Sstevel@tonic-gate freemsg(mp); 7757c478bd9Sstevel@tonic-gate return; 7767c478bd9Sstevel@tonic-gate } 7777c478bd9Sstevel@tonic-gate 7787c478bd9Sstevel@tonic-gate /* 7797c478bd9Sstevel@tonic-gate * In the case where a unit data error has been 7807c478bd9Sstevel@tonic-gate * received, all we need to do is clear the message from 7817c478bd9Sstevel@tonic-gate * the queue. 7827c478bd9Sstevel@tonic-gate */ 7837c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 7847c478bd9Sstevel@tonic-gate freemsg(mp); 7857c478bd9Sstevel@tonic-gate RPCLOG(32, "rpcmodrput: unitdata error received at " 7867c478bd9Sstevel@tonic-gate "%ld\n", gethrestime_sec()); 7877c478bd9Sstevel@tonic-gate return; 7887c478bd9Sstevel@tonic-gate } /* end else if (pptr->type == T_UDERROR_IND) */ 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate putnext(q, mp); 7917c478bd9Sstevel@tonic-gate break; 7927c478bd9Sstevel@tonic-gate } /* end switch (mp->b_datap->db_type) */ 7937c478bd9Sstevel@tonic-gate 7947c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_RPCMODRPUT_END, 7957c478bd9Sstevel@tonic-gate "rpcmodrput_end:"); 7967c478bd9Sstevel@tonic-gate /* 7977c478bd9Sstevel@tonic-gate * Return codes are not looked at by the STREAMS framework. 7987c478bd9Sstevel@tonic-gate */ 7997c478bd9Sstevel@tonic-gate } 8007c478bd9Sstevel@tonic-gate 8017c478bd9Sstevel@tonic-gate /* 8027c478bd9Sstevel@tonic-gate * write put procedure 8037c478bd9Sstevel@tonic-gate */ 8047c478bd9Sstevel@tonic-gate void 8057c478bd9Sstevel@tonic-gate rpcmodwput(queue_t *q, mblk_t *mp) 8067c478bd9Sstevel@tonic-gate { 8077c478bd9Sstevel@tonic-gate struct rpcm *rmp; 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate ASSERT(q != NULL); 8107c478bd9Sstevel@tonic-gate 8117c478bd9Sstevel@tonic-gate switch (mp->b_datap->db_type) { 8127c478bd9Sstevel@tonic-gate case M_PROTO: 8137c478bd9Sstevel@tonic-gate case M_PCPROTO: 8147c478bd9Sstevel@tonic-gate break; 8157c478bd9Sstevel@tonic-gate default: 8167c478bd9Sstevel@tonic-gate rpcmodwput_other(q, mp); 8177c478bd9Sstevel@tonic-gate return; 8187c478bd9Sstevel@tonic-gate } 8197c478bd9Sstevel@tonic-gate 8207c478bd9Sstevel@tonic-gate /* 8217c478bd9Sstevel@tonic-gate * Check to see if we can send the message downstream. 8227c478bd9Sstevel@tonic-gate */ 8237c478bd9Sstevel@tonic-gate if (canputnext(q)) { 8247c478bd9Sstevel@tonic-gate putnext(q, mp); 8257c478bd9Sstevel@tonic-gate return; 8267c478bd9Sstevel@tonic-gate } 8277c478bd9Sstevel@tonic-gate 8287c478bd9Sstevel@tonic-gate rmp = (struct rpcm *)q->q_ptr; 8297c478bd9Sstevel@tonic-gate ASSERT(rmp != NULL); 8307c478bd9Sstevel@tonic-gate 8317c478bd9Sstevel@tonic-gate /* 8327c478bd9Sstevel@tonic-gate * The first canputnext failed. Try again except this time with the 8337c478bd9Sstevel@tonic-gate * lock held, so that we can check the state of the stream to see if 8347c478bd9Sstevel@tonic-gate * it is closing. If either of these conditions evaluate to true 8357c478bd9Sstevel@tonic-gate * then send the meesage. 8367c478bd9Sstevel@tonic-gate */ 8377c478bd9Sstevel@tonic-gate mutex_enter(&rmp->rm_lock); 8387c478bd9Sstevel@tonic-gate if (canputnext(q) || (rmp->rm_state & RM_CLOSING)) { 8397c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 8407c478bd9Sstevel@tonic-gate putnext(q, mp); 8417c478bd9Sstevel@tonic-gate } else { 8427c478bd9Sstevel@tonic-gate /* 8437c478bd9Sstevel@tonic-gate * canputnext failed again and the stream is not closing. 8447c478bd9Sstevel@tonic-gate * Place the message on the queue and let the service 8457c478bd9Sstevel@tonic-gate * procedure handle the message. 8467c478bd9Sstevel@tonic-gate */ 8477c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 8487c478bd9Sstevel@tonic-gate (void) putq(q, mp); 8497c478bd9Sstevel@tonic-gate } 8507c478bd9Sstevel@tonic-gate } 8517c478bd9Sstevel@tonic-gate 8527c478bd9Sstevel@tonic-gate static void 8537c478bd9Sstevel@tonic-gate rpcmodwput_other(queue_t *q, mblk_t *mp) 8547c478bd9Sstevel@tonic-gate { 8557c478bd9Sstevel@tonic-gate struct rpcm *rmp; 8567c478bd9Sstevel@tonic-gate struct iocblk *iocp; 8577c478bd9Sstevel@tonic-gate 8587c478bd9Sstevel@tonic-gate rmp = (struct rpcm *)q->q_ptr; 8597c478bd9Sstevel@tonic-gate ASSERT(rmp != NULL); 8607c478bd9Sstevel@tonic-gate 8617c478bd9Sstevel@tonic-gate switch (mp->b_datap->db_type) { 8627c478bd9Sstevel@tonic-gate case M_IOCTL: 8637c478bd9Sstevel@tonic-gate iocp = (struct iocblk *)mp->b_rptr; 8647c478bd9Sstevel@tonic-gate ASSERT(iocp != NULL); 8657c478bd9Sstevel@tonic-gate switch (iocp->ioc_cmd) { 8667c478bd9Sstevel@tonic-gate case RPC_CLIENT: 8677c478bd9Sstevel@tonic-gate case RPC_SERVER: 8687c478bd9Sstevel@tonic-gate mutex_enter(&rmp->rm_lock); 8697c478bd9Sstevel@tonic-gate rmp->rm_type = iocp->ioc_cmd; 8707c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 8717c478bd9Sstevel@tonic-gate mp->b_datap->db_type = M_IOCACK; 8727c478bd9Sstevel@tonic-gate qreply(q, mp); 8737c478bd9Sstevel@tonic-gate return; 8747c478bd9Sstevel@tonic-gate default: 8757c478bd9Sstevel@tonic-gate /* 8767c478bd9Sstevel@tonic-gate * pass the ioctl downstream and hope someone 8777c478bd9Sstevel@tonic-gate * down there knows how to handle it. 8787c478bd9Sstevel@tonic-gate */ 8797c478bd9Sstevel@tonic-gate putnext(q, mp); 8807c478bd9Sstevel@tonic-gate return; 8817c478bd9Sstevel@tonic-gate } 8827c478bd9Sstevel@tonic-gate default: 8837c478bd9Sstevel@tonic-gate break; 8847c478bd9Sstevel@tonic-gate } 8857c478bd9Sstevel@tonic-gate /* 8867c478bd9Sstevel@tonic-gate * This is something we definitely do not know how to handle, just 8877c478bd9Sstevel@tonic-gate * pass the message downstream 8887c478bd9Sstevel@tonic-gate */ 8897c478bd9Sstevel@tonic-gate putnext(q, mp); 8907c478bd9Sstevel@tonic-gate } 8917c478bd9Sstevel@tonic-gate 8927c478bd9Sstevel@tonic-gate /* 8937c478bd9Sstevel@tonic-gate * Module write service procedure. This is called by downstream modules 8947c478bd9Sstevel@tonic-gate * for back enabling during flow control. 8957c478bd9Sstevel@tonic-gate */ 8967c478bd9Sstevel@tonic-gate void 8977c478bd9Sstevel@tonic-gate rpcmodwsrv(queue_t *q) 8987c478bd9Sstevel@tonic-gate { 8997c478bd9Sstevel@tonic-gate struct rpcm *rmp; 9007c478bd9Sstevel@tonic-gate mblk_t *mp = NULL; 9017c478bd9Sstevel@tonic-gate 9027c478bd9Sstevel@tonic-gate rmp = (struct rpcm *)q->q_ptr; 9037c478bd9Sstevel@tonic-gate ASSERT(rmp != NULL); 9047c478bd9Sstevel@tonic-gate 9057c478bd9Sstevel@tonic-gate /* 9067c478bd9Sstevel@tonic-gate * Get messages that may be queued and send them down stream 9077c478bd9Sstevel@tonic-gate */ 9087c478bd9Sstevel@tonic-gate while ((mp = getq(q)) != NULL) { 9097c478bd9Sstevel@tonic-gate /* 9107c478bd9Sstevel@tonic-gate * Optimize the service procedure for the server-side, by 9117c478bd9Sstevel@tonic-gate * avoiding a call to canputnext(). 9127c478bd9Sstevel@tonic-gate */ 9137c478bd9Sstevel@tonic-gate if (rmp->rm_type == RPC_SERVER || canputnext(q)) { 9147c478bd9Sstevel@tonic-gate putnext(q, mp); 9157c478bd9Sstevel@tonic-gate continue; 9167c478bd9Sstevel@tonic-gate } 9177c478bd9Sstevel@tonic-gate (void) putbq(q, mp); 9187c478bd9Sstevel@tonic-gate return; 9197c478bd9Sstevel@tonic-gate } 9207c478bd9Sstevel@tonic-gate } 9217c478bd9Sstevel@tonic-gate 9227c478bd9Sstevel@tonic-gate static void 9237c478bd9Sstevel@tonic-gate rpcmod_release(queue_t *q, mblk_t *bp) 9247c478bd9Sstevel@tonic-gate { 9257c478bd9Sstevel@tonic-gate struct rpcm *rmp; 9267c478bd9Sstevel@tonic-gate 9277c478bd9Sstevel@tonic-gate /* 9287c478bd9Sstevel@tonic-gate * For now, just free the message. 9297c478bd9Sstevel@tonic-gate */ 9307c478bd9Sstevel@tonic-gate if (bp) 9317c478bd9Sstevel@tonic-gate freemsg(bp); 9327c478bd9Sstevel@tonic-gate rmp = (struct rpcm *)q->q_ptr; 9337c478bd9Sstevel@tonic-gate 9347c478bd9Sstevel@tonic-gate mutex_enter(&rmp->rm_lock); 9357c478bd9Sstevel@tonic-gate rmp->rm_ref--; 9367c478bd9Sstevel@tonic-gate 9377c478bd9Sstevel@tonic-gate if (rmp->rm_ref == 0 && (rmp->rm_state & RM_CLOSING)) { 9387c478bd9Sstevel@tonic-gate cv_broadcast(&rmp->rm_cwait); 9397c478bd9Sstevel@tonic-gate } 9407c478bd9Sstevel@tonic-gate 9417c478bd9Sstevel@tonic-gate mutex_exit(&rmp->rm_lock); 9427c478bd9Sstevel@tonic-gate } 9437c478bd9Sstevel@tonic-gate 9447c478bd9Sstevel@tonic-gate /* 9457c478bd9Sstevel@tonic-gate * This part of rpcmod is pushed on a connection-oriented transport for use 9467c478bd9Sstevel@tonic-gate * by RPC. It serves to bypass the Stream head, implements 9477c478bd9Sstevel@tonic-gate * the record marking protocol, and dispatches incoming RPC messages. 9487c478bd9Sstevel@tonic-gate */ 9497c478bd9Sstevel@tonic-gate 9507c478bd9Sstevel@tonic-gate /* Default idle timer values */ 9517c478bd9Sstevel@tonic-gate #define MIR_CLNT_IDLE_TIMEOUT (5 * (60 * 1000L)) /* 5 minutes */ 9527c478bd9Sstevel@tonic-gate #define MIR_SVC_IDLE_TIMEOUT (6 * (60 * 1000L)) /* 6 minutes */ 9537c478bd9Sstevel@tonic-gate #define MIR_SVC_ORDREL_TIMEOUT (10 * (60 * 1000L)) /* 10 minutes */ 9547c478bd9Sstevel@tonic-gate #define MIR_LASTFRAG 0x80000000 /* Record marker */ 9557c478bd9Sstevel@tonic-gate 9567c478bd9Sstevel@tonic-gate #define DLEN(mp) (mp->b_cont ? msgdsize(mp) : (mp->b_wptr - mp->b_rptr)) 9577c478bd9Sstevel@tonic-gate 9587c478bd9Sstevel@tonic-gate typedef struct mir_s { 9597c478bd9Sstevel@tonic-gate void *mir_krpc_cell; /* Reserved for KRPC use. This field */ 9607c478bd9Sstevel@tonic-gate /* must be first in the structure. */ 9617c478bd9Sstevel@tonic-gate struct xprt_style_ops *rm_ops; 9627c478bd9Sstevel@tonic-gate int mir_type; /* Client or server side stream */ 9637c478bd9Sstevel@tonic-gate 9647c478bd9Sstevel@tonic-gate mblk_t *mir_head_mp; /* RPC msg in progress */ 9657c478bd9Sstevel@tonic-gate /* 9667c478bd9Sstevel@tonic-gate * mir_head_mp points the first mblk being collected in 9677c478bd9Sstevel@tonic-gate * the current RPC message. Record headers are removed 9687c478bd9Sstevel@tonic-gate * before data is linked into mir_head_mp. 9697c478bd9Sstevel@tonic-gate */ 9707c478bd9Sstevel@tonic-gate mblk_t *mir_tail_mp; /* Last mblk in mir_head_mp */ 9717c478bd9Sstevel@tonic-gate /* 9727c478bd9Sstevel@tonic-gate * mir_tail_mp points to the last mblk in the message 9737c478bd9Sstevel@tonic-gate * chain starting at mir_head_mp. It is only valid 9747c478bd9Sstevel@tonic-gate * if mir_head_mp is non-NULL and is used to add new 9757c478bd9Sstevel@tonic-gate * data blocks to the end of chain quickly. 9767c478bd9Sstevel@tonic-gate */ 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate int32_t mir_frag_len; /* Bytes seen in the current frag */ 9797c478bd9Sstevel@tonic-gate /* 9807c478bd9Sstevel@tonic-gate * mir_frag_len starts at -4 for beginning of each fragment. 9817c478bd9Sstevel@tonic-gate * When this length is negative, it indicates the number of 9827c478bd9Sstevel@tonic-gate * bytes that rpcmod needs to complete the record marker 9837c478bd9Sstevel@tonic-gate * header. When it is positive or zero, it holds the number 9847c478bd9Sstevel@tonic-gate * of bytes that have arrived for the current fragment and 9857c478bd9Sstevel@tonic-gate * are held in mir_header_mp. 9867c478bd9Sstevel@tonic-gate */ 9877c478bd9Sstevel@tonic-gate 9887c478bd9Sstevel@tonic-gate int32_t mir_frag_header; 9897c478bd9Sstevel@tonic-gate /* 9907c478bd9Sstevel@tonic-gate * Fragment header as collected for the current fragment. 9917c478bd9Sstevel@tonic-gate * It holds the last-fragment indicator and the number 9927c478bd9Sstevel@tonic-gate * of bytes in the fragment. 9937c478bd9Sstevel@tonic-gate */ 9947c478bd9Sstevel@tonic-gate 9957c478bd9Sstevel@tonic-gate unsigned int 9967c478bd9Sstevel@tonic-gate mir_ordrel_pending : 1, /* Sent T_ORDREL_REQ */ 9977c478bd9Sstevel@tonic-gate mir_hold_inbound : 1, /* Hold inbound messages on server */ 9987c478bd9Sstevel@tonic-gate /* side until outbound flow control */ 9997c478bd9Sstevel@tonic-gate /* is relieved. */ 10007c478bd9Sstevel@tonic-gate mir_closing : 1, /* The stream is being closed */ 10017c478bd9Sstevel@tonic-gate mir_inrservice : 1, /* data queued or rd srv proc running */ 10027c478bd9Sstevel@tonic-gate mir_inwservice : 1, /* data queued or wr srv proc running */ 10037c478bd9Sstevel@tonic-gate mir_inwflushdata : 1, /* flush M_DATAs when srv runs */ 10047c478bd9Sstevel@tonic-gate /* 10057c478bd9Sstevel@tonic-gate * On client streams, mir_clntreq is 0 or 1; it is set 10067c478bd9Sstevel@tonic-gate * to 1 whenever a new request is sent out (mir_wput) 10077c478bd9Sstevel@tonic-gate * and cleared when the timer fires (mir_timer). If 10087c478bd9Sstevel@tonic-gate * the timer fires with this value equal to 0, then the 10097c478bd9Sstevel@tonic-gate * stream is considered idle and KRPC is notified. 10107c478bd9Sstevel@tonic-gate */ 10117c478bd9Sstevel@tonic-gate mir_clntreq : 1, 10127c478bd9Sstevel@tonic-gate /* 10137c478bd9Sstevel@tonic-gate * On server streams, stop accepting messages 10147c478bd9Sstevel@tonic-gate */ 10157c478bd9Sstevel@tonic-gate mir_svc_no_more_msgs : 1, 10167c478bd9Sstevel@tonic-gate mir_listen_stream : 1, /* listen end point */ 1017*a26eed27Sgt29601 mir_unused : 1, /* no longer used */ 10187c478bd9Sstevel@tonic-gate mir_timer_call : 1, 10197c478bd9Sstevel@tonic-gate mir_junk_fill_thru_bit_31 : 21; 10207c478bd9Sstevel@tonic-gate 1021*a26eed27Sgt29601 int mir_setup_complete; /* server has initialized everything */ 10227c478bd9Sstevel@tonic-gate timeout_id_t mir_timer_id; /* Timer for idle checks */ 10237c478bd9Sstevel@tonic-gate clock_t mir_idle_timeout; /* Allowed idle time before shutdown */ 10247c478bd9Sstevel@tonic-gate /* 10257c478bd9Sstevel@tonic-gate * This value is copied from clnt_idle_timeout or 10267c478bd9Sstevel@tonic-gate * svc_idle_timeout during the appropriate ioctl. 10277c478bd9Sstevel@tonic-gate * Kept in milliseconds 10287c478bd9Sstevel@tonic-gate */ 10297c478bd9Sstevel@tonic-gate clock_t mir_use_timestamp; /* updated on client with each use */ 10307c478bd9Sstevel@tonic-gate /* 10317c478bd9Sstevel@tonic-gate * This value is set to lbolt 10327c478bd9Sstevel@tonic-gate * every time a client stream sends or receives data. 10337c478bd9Sstevel@tonic-gate * Even if the timer message arrives, we don't shutdown 10347c478bd9Sstevel@tonic-gate * client unless: 10357c478bd9Sstevel@tonic-gate * lbolt >= MSEC_TO_TICK(mir_idle_timeout)+mir_use_timestamp. 10367c478bd9Sstevel@tonic-gate * This value is kept in HZ. 10377c478bd9Sstevel@tonic-gate */ 10387c478bd9Sstevel@tonic-gate 10397c478bd9Sstevel@tonic-gate uint_t *mir_max_msg_sizep; /* Reference to sanity check size */ 10407c478bd9Sstevel@tonic-gate /* 10417c478bd9Sstevel@tonic-gate * This pointer is set to &clnt_max_msg_size or 10427c478bd9Sstevel@tonic-gate * &svc_max_msg_size during the appropriate ioctl. 10437c478bd9Sstevel@tonic-gate */ 10447c478bd9Sstevel@tonic-gate zoneid_t mir_zoneid; /* zone which pushed rpcmod */ 10457c478bd9Sstevel@tonic-gate /* Server-side fields. */ 10467c478bd9Sstevel@tonic-gate int mir_ref_cnt; /* Reference count: server side only */ 10477c478bd9Sstevel@tonic-gate /* counts the number of references */ 10487c478bd9Sstevel@tonic-gate /* that a kernel RPC server thread */ 10497c478bd9Sstevel@tonic-gate /* (see svc_run()) has on this rpcmod */ 10507c478bd9Sstevel@tonic-gate /* slot. Effectively, it is the */ 10517c478bd9Sstevel@tonic-gate /* number * of unprocessed messages */ 10527c478bd9Sstevel@tonic-gate /* that have been passed up to the */ 10537c478bd9Sstevel@tonic-gate /* KRPC layer */ 10547c478bd9Sstevel@tonic-gate 10557c478bd9Sstevel@tonic-gate mblk_t *mir_svc_pend_mp; /* Pending T_ORDREL_IND or */ 10567c478bd9Sstevel@tonic-gate /* T_DISCON_IND */ 10577c478bd9Sstevel@tonic-gate 10587c478bd9Sstevel@tonic-gate /* 10597c478bd9Sstevel@tonic-gate * these fields are for both client and server, but for debugging, 10607c478bd9Sstevel@tonic-gate * it is easier to have these last in the structure. 10617c478bd9Sstevel@tonic-gate */ 10627c478bd9Sstevel@tonic-gate kmutex_t mir_mutex; /* Mutex and condvar for close */ 10637c478bd9Sstevel@tonic-gate kcondvar_t mir_condvar; /* synchronization. */ 10647c478bd9Sstevel@tonic-gate kcondvar_t mir_timer_cv; /* Timer routine sync. */ 10657c478bd9Sstevel@tonic-gate } mir_t; 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate #define MIR_SVC_QUIESCED(mir) \ 10687c478bd9Sstevel@tonic-gate (mir->mir_ref_cnt == 0 && mir->mir_inrservice == 0) 10697c478bd9Sstevel@tonic-gate 10707c478bd9Sstevel@tonic-gate #define MIR_CLEAR_INRSRV(mir_ptr) { \ 10717c478bd9Sstevel@tonic-gate (mir_ptr)->mir_inrservice = 0; \ 10727c478bd9Sstevel@tonic-gate if ((mir_ptr)->mir_type == RPC_SERVER && \ 10737c478bd9Sstevel@tonic-gate (mir_ptr)->mir_closing) \ 10747c478bd9Sstevel@tonic-gate cv_signal(&(mir_ptr)->mir_condvar); \ 10757c478bd9Sstevel@tonic-gate } 10767c478bd9Sstevel@tonic-gate 10777c478bd9Sstevel@tonic-gate /* 10787c478bd9Sstevel@tonic-gate * Don't block service procedure (and mir_close) if 10797c478bd9Sstevel@tonic-gate * we are in the process of closing. 10807c478bd9Sstevel@tonic-gate */ 10817c478bd9Sstevel@tonic-gate #define MIR_WCANPUTNEXT(mir_ptr, write_q) \ 10827c478bd9Sstevel@tonic-gate (canputnext(write_q) || ((mir_ptr)->mir_svc_no_more_msgs == 1)) 10837c478bd9Sstevel@tonic-gate 10847c478bd9Sstevel@tonic-gate static int mir_clnt_dup_request(queue_t *q, mblk_t *mp); 10857c478bd9Sstevel@tonic-gate static void mir_rput_proto(queue_t *q, mblk_t *mp); 10867c478bd9Sstevel@tonic-gate static int mir_svc_policy_notify(queue_t *q, int event); 10877c478bd9Sstevel@tonic-gate static void mir_svc_release(queue_t *wq, mblk_t *mp); 10887c478bd9Sstevel@tonic-gate static void mir_svc_start(queue_t *wq); 10897c478bd9Sstevel@tonic-gate static void mir_svc_idle_start(queue_t *, mir_t *); 10907c478bd9Sstevel@tonic-gate static void mir_svc_idle_stop(queue_t *, mir_t *); 10917c478bd9Sstevel@tonic-gate static void mir_svc_start_close(queue_t *, mir_t *); 10927c478bd9Sstevel@tonic-gate static void mir_clnt_idle_do_stop(queue_t *); 10937c478bd9Sstevel@tonic-gate static void mir_clnt_idle_stop(queue_t *, mir_t *); 10947c478bd9Sstevel@tonic-gate static void mir_clnt_idle_start(queue_t *, mir_t *); 10957c478bd9Sstevel@tonic-gate static void mir_wput(queue_t *q, mblk_t *mp); 10967c478bd9Sstevel@tonic-gate static void mir_wput_other(queue_t *q, mblk_t *mp); 10977c478bd9Sstevel@tonic-gate static void mir_wsrv(queue_t *q); 10987c478bd9Sstevel@tonic-gate static void mir_disconnect(queue_t *, mir_t *ir); 10997c478bd9Sstevel@tonic-gate static int mir_check_len(queue_t *, int32_t, mblk_t *); 11007c478bd9Sstevel@tonic-gate static void mir_timer(void *); 11017c478bd9Sstevel@tonic-gate 11027c478bd9Sstevel@tonic-gate extern void (*mir_rele)(queue_t *, mblk_t *); 11037c478bd9Sstevel@tonic-gate extern void (*mir_start)(queue_t *); 11047c478bd9Sstevel@tonic-gate extern void (*clnt_stop_idle)(queue_t *); 11057c478bd9Sstevel@tonic-gate 11067c478bd9Sstevel@tonic-gate clock_t clnt_idle_timeout = MIR_CLNT_IDLE_TIMEOUT; 11077c478bd9Sstevel@tonic-gate clock_t svc_idle_timeout = MIR_SVC_IDLE_TIMEOUT; 11087c478bd9Sstevel@tonic-gate 11097c478bd9Sstevel@tonic-gate /* 11107c478bd9Sstevel@tonic-gate * Timeout for subsequent notifications of idle connection. This is 11117c478bd9Sstevel@tonic-gate * typically used to clean up after a wedged orderly release. 11127c478bd9Sstevel@tonic-gate */ 11137c478bd9Sstevel@tonic-gate clock_t svc_ordrel_timeout = MIR_SVC_ORDREL_TIMEOUT; /* milliseconds */ 11147c478bd9Sstevel@tonic-gate 11157c478bd9Sstevel@tonic-gate extern uint_t *clnt_max_msg_sizep; 11167c478bd9Sstevel@tonic-gate extern uint_t *svc_max_msg_sizep; 11177c478bd9Sstevel@tonic-gate uint_t clnt_max_msg_size = RPC_MAXDATASIZE; 11187c478bd9Sstevel@tonic-gate uint_t svc_max_msg_size = RPC_MAXDATASIZE; 11197c478bd9Sstevel@tonic-gate uint_t mir_krpc_cell_null; 11207c478bd9Sstevel@tonic-gate 11217c478bd9Sstevel@tonic-gate static void 11227c478bd9Sstevel@tonic-gate mir_timer_stop(mir_t *mir) 11237c478bd9Sstevel@tonic-gate { 11247c478bd9Sstevel@tonic-gate timeout_id_t tid; 11257c478bd9Sstevel@tonic-gate 11267c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&mir->mir_mutex)); 11277c478bd9Sstevel@tonic-gate 11287c478bd9Sstevel@tonic-gate /* 11297c478bd9Sstevel@tonic-gate * Since the mir_mutex lock needs to be released to call 11307c478bd9Sstevel@tonic-gate * untimeout(), we need to make sure that no other thread 11317c478bd9Sstevel@tonic-gate * can start/stop the timer (changing mir_timer_id) during 11327c478bd9Sstevel@tonic-gate * that time. The mir_timer_call bit and the mir_timer_cv 11337c478bd9Sstevel@tonic-gate * condition variable are used to synchronize this. Setting 11347c478bd9Sstevel@tonic-gate * mir_timer_call also tells mir_timer() (refer to the comments 11357c478bd9Sstevel@tonic-gate * in mir_timer()) that it does not need to do anything. 11367c478bd9Sstevel@tonic-gate */ 11377c478bd9Sstevel@tonic-gate while (mir->mir_timer_call) 11387c478bd9Sstevel@tonic-gate cv_wait(&mir->mir_timer_cv, &mir->mir_mutex); 11397c478bd9Sstevel@tonic-gate mir->mir_timer_call = B_TRUE; 11407c478bd9Sstevel@tonic-gate 11417c478bd9Sstevel@tonic-gate if ((tid = mir->mir_timer_id) != 0) { 11427c478bd9Sstevel@tonic-gate mir->mir_timer_id = 0; 11437c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 11447c478bd9Sstevel@tonic-gate (void) untimeout(tid); 11457c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 11467c478bd9Sstevel@tonic-gate } 11477c478bd9Sstevel@tonic-gate mir->mir_timer_call = B_FALSE; 11487c478bd9Sstevel@tonic-gate cv_broadcast(&mir->mir_timer_cv); 11497c478bd9Sstevel@tonic-gate } 11507c478bd9Sstevel@tonic-gate 11517c478bd9Sstevel@tonic-gate static void 11527c478bd9Sstevel@tonic-gate mir_timer_start(queue_t *q, mir_t *mir, clock_t intrvl) 11537c478bd9Sstevel@tonic-gate { 11547c478bd9Sstevel@tonic-gate timeout_id_t tid; 11557c478bd9Sstevel@tonic-gate 11567c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&mir->mir_mutex)); 11577c478bd9Sstevel@tonic-gate 11587c478bd9Sstevel@tonic-gate while (mir->mir_timer_call) 11597c478bd9Sstevel@tonic-gate cv_wait(&mir->mir_timer_cv, &mir->mir_mutex); 11607c478bd9Sstevel@tonic-gate mir->mir_timer_call = B_TRUE; 11617c478bd9Sstevel@tonic-gate 11627c478bd9Sstevel@tonic-gate if ((tid = mir->mir_timer_id) != 0) { 11637c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 11647c478bd9Sstevel@tonic-gate (void) untimeout(tid); 11657c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 11667c478bd9Sstevel@tonic-gate } 11677c478bd9Sstevel@tonic-gate /* Only start the timer when it is not closing. */ 11687c478bd9Sstevel@tonic-gate if (!mir->mir_closing) { 11697c478bd9Sstevel@tonic-gate mir->mir_timer_id = timeout(mir_timer, q, 11707c478bd9Sstevel@tonic-gate MSEC_TO_TICK(intrvl)); 11717c478bd9Sstevel@tonic-gate } 11727c478bd9Sstevel@tonic-gate mir->mir_timer_call = B_FALSE; 11737c478bd9Sstevel@tonic-gate cv_broadcast(&mir->mir_timer_cv); 11747c478bd9Sstevel@tonic-gate } 11757c478bd9Sstevel@tonic-gate 11767c478bd9Sstevel@tonic-gate static int 11777c478bd9Sstevel@tonic-gate mir_clnt_dup_request(queue_t *q, mblk_t *mp) 11787c478bd9Sstevel@tonic-gate { 11797c478bd9Sstevel@tonic-gate mblk_t *mp1; 11807c478bd9Sstevel@tonic-gate uint32_t new_xid; 11817c478bd9Sstevel@tonic-gate uint32_t old_xid; 11827c478bd9Sstevel@tonic-gate 11837c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&((mir_t *)q->q_ptr)->mir_mutex)); 11847c478bd9Sstevel@tonic-gate new_xid = BE32_TO_U32(&mp->b_rptr[4]); 11857c478bd9Sstevel@tonic-gate /* 11867c478bd9Sstevel@tonic-gate * This loop is a bit tacky -- it walks the STREAMS list of 11877c478bd9Sstevel@tonic-gate * flow-controlled messages. 11887c478bd9Sstevel@tonic-gate */ 11897c478bd9Sstevel@tonic-gate if ((mp1 = q->q_first) != NULL) { 11907c478bd9Sstevel@tonic-gate do { 11917c478bd9Sstevel@tonic-gate old_xid = BE32_TO_U32(&mp1->b_rptr[4]); 11927c478bd9Sstevel@tonic-gate if (new_xid == old_xid) 11937c478bd9Sstevel@tonic-gate return (1); 11947c478bd9Sstevel@tonic-gate } while ((mp1 = mp1->b_next) != NULL); 11957c478bd9Sstevel@tonic-gate } 11967c478bd9Sstevel@tonic-gate return (0); 11977c478bd9Sstevel@tonic-gate } 11987c478bd9Sstevel@tonic-gate 11997c478bd9Sstevel@tonic-gate static int 12007c478bd9Sstevel@tonic-gate mir_close(queue_t *q) 12017c478bd9Sstevel@tonic-gate { 12027c478bd9Sstevel@tonic-gate mir_t *mir; 12037c478bd9Sstevel@tonic-gate mblk_t *mp; 12047c478bd9Sstevel@tonic-gate bool_t queue_cleaned = FALSE; 12057c478bd9Sstevel@tonic-gate 12067c478bd9Sstevel@tonic-gate RPCLOG(32, "rpcmod: mir_close of q 0x%p\n", (void *)q); 12077c478bd9Sstevel@tonic-gate mir = (mir_t *)q->q_ptr; 12087c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex)); 12097c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 12107c478bd9Sstevel@tonic-gate if ((mp = mir->mir_head_mp) != NULL) { 12117c478bd9Sstevel@tonic-gate mir->mir_head_mp = (mblk_t *)0; 12127c478bd9Sstevel@tonic-gate freemsg(mp); 12137c478bd9Sstevel@tonic-gate } 12147c478bd9Sstevel@tonic-gate /* 12157c478bd9Sstevel@tonic-gate * Set mir_closing so we get notified when MIR_SVC_QUIESCED() 12167c478bd9Sstevel@tonic-gate * is TRUE. And mir_timer_start() won't start the timer again. 12177c478bd9Sstevel@tonic-gate */ 12187c478bd9Sstevel@tonic-gate mir->mir_closing = B_TRUE; 12197c478bd9Sstevel@tonic-gate mir_timer_stop(mir); 12207c478bd9Sstevel@tonic-gate 12217c478bd9Sstevel@tonic-gate if (mir->mir_type == RPC_SERVER) { 12227c478bd9Sstevel@tonic-gate flushq(q, FLUSHDATA); /* Ditch anything waiting on read q */ 12237c478bd9Sstevel@tonic-gate 12247c478bd9Sstevel@tonic-gate /* 12257c478bd9Sstevel@tonic-gate * This will prevent more requests from arriving and 12267c478bd9Sstevel@tonic-gate * will force rpcmod to ignore flow control. 12277c478bd9Sstevel@tonic-gate */ 12287c478bd9Sstevel@tonic-gate mir_svc_start_close(WR(q), mir); 12297c478bd9Sstevel@tonic-gate 12307c478bd9Sstevel@tonic-gate while ((!MIR_SVC_QUIESCED(mir)) || mir->mir_inwservice == 1) { 12317c478bd9Sstevel@tonic-gate 12327c478bd9Sstevel@tonic-gate if (mir->mir_ref_cnt && !mir->mir_inrservice && 12337c478bd9Sstevel@tonic-gate (queue_cleaned == FALSE)) { 12347c478bd9Sstevel@tonic-gate /* 12357c478bd9Sstevel@tonic-gate * call into SVC to clean the queue 12367c478bd9Sstevel@tonic-gate */ 12377c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 12387c478bd9Sstevel@tonic-gate svc_queueclean(q); 12397c478bd9Sstevel@tonic-gate queue_cleaned = TRUE; 12407c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 12417c478bd9Sstevel@tonic-gate continue; 12427c478bd9Sstevel@tonic-gate } 12437c478bd9Sstevel@tonic-gate 12447c478bd9Sstevel@tonic-gate /* 12457c478bd9Sstevel@tonic-gate * Bugid 1253810 - Force the write service 12467c478bd9Sstevel@tonic-gate * procedure to send its messages, regardless 12477c478bd9Sstevel@tonic-gate * whether the downstream module is ready 12487c478bd9Sstevel@tonic-gate * to accept data. 12497c478bd9Sstevel@tonic-gate */ 12507c478bd9Sstevel@tonic-gate if (mir->mir_inwservice == 1) 12517c478bd9Sstevel@tonic-gate qenable(WR(q)); 12527c478bd9Sstevel@tonic-gate 12537c478bd9Sstevel@tonic-gate cv_wait(&mir->mir_condvar, &mir->mir_mutex); 12547c478bd9Sstevel@tonic-gate } 12557c478bd9Sstevel@tonic-gate 12567c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 12577c478bd9Sstevel@tonic-gate qprocsoff(q); 12587c478bd9Sstevel@tonic-gate 12597c478bd9Sstevel@tonic-gate /* Notify KRPC that this stream is going away. */ 12607c478bd9Sstevel@tonic-gate svc_queueclose(q); 12617c478bd9Sstevel@tonic-gate } else { 12627c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 12637c478bd9Sstevel@tonic-gate qprocsoff(q); 12647c478bd9Sstevel@tonic-gate } 12657c478bd9Sstevel@tonic-gate 12667c478bd9Sstevel@tonic-gate mutex_destroy(&mir->mir_mutex); 12677c478bd9Sstevel@tonic-gate cv_destroy(&mir->mir_condvar); 12687c478bd9Sstevel@tonic-gate cv_destroy(&mir->mir_timer_cv); 12697c478bd9Sstevel@tonic-gate kmem_free(mir, sizeof (mir_t)); 12707c478bd9Sstevel@tonic-gate return (0); 12717c478bd9Sstevel@tonic-gate } 12727c478bd9Sstevel@tonic-gate 12737c478bd9Sstevel@tonic-gate /* 12747c478bd9Sstevel@tonic-gate * This is server side only (RPC_SERVER). 12757c478bd9Sstevel@tonic-gate * 12767c478bd9Sstevel@tonic-gate * Exit idle mode. 12777c478bd9Sstevel@tonic-gate */ 12787c478bd9Sstevel@tonic-gate static void 12797c478bd9Sstevel@tonic-gate mir_svc_idle_stop(queue_t *q, mir_t *mir) 12807c478bd9Sstevel@tonic-gate { 12817c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&mir->mir_mutex)); 12827c478bd9Sstevel@tonic-gate ASSERT((q->q_flag & QREADR) == 0); 12837c478bd9Sstevel@tonic-gate ASSERT(mir->mir_type == RPC_SERVER); 12847c478bd9Sstevel@tonic-gate RPCLOG(16, "rpcmod: mir_svc_idle_stop of q 0x%p\n", (void *)q); 12857c478bd9Sstevel@tonic-gate 12867c478bd9Sstevel@tonic-gate mir_timer_stop(mir); 12877c478bd9Sstevel@tonic-gate } 12887c478bd9Sstevel@tonic-gate 12897c478bd9Sstevel@tonic-gate /* 12907c478bd9Sstevel@tonic-gate * This is server side only (RPC_SERVER). 12917c478bd9Sstevel@tonic-gate * 12927c478bd9Sstevel@tonic-gate * Start idle processing, which will include setting idle timer if the 12937c478bd9Sstevel@tonic-gate * stream is not being closed. 12947c478bd9Sstevel@tonic-gate */ 12957c478bd9Sstevel@tonic-gate static void 12967c478bd9Sstevel@tonic-gate mir_svc_idle_start(queue_t *q, mir_t *mir) 12977c478bd9Sstevel@tonic-gate { 12987c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&mir->mir_mutex)); 12997c478bd9Sstevel@tonic-gate ASSERT((q->q_flag & QREADR) == 0); 13007c478bd9Sstevel@tonic-gate ASSERT(mir->mir_type == RPC_SERVER); 13017c478bd9Sstevel@tonic-gate RPCLOG(16, "rpcmod: mir_svc_idle_start q 0x%p\n", (void *)q); 13027c478bd9Sstevel@tonic-gate 13037c478bd9Sstevel@tonic-gate /* 13047c478bd9Sstevel@tonic-gate * Don't re-start idle timer if we are closing queues. 13057c478bd9Sstevel@tonic-gate */ 13067c478bd9Sstevel@tonic-gate if (mir->mir_closing) { 13077c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_svc_idle_start - closing: 0x%p\n", 13087c478bd9Sstevel@tonic-gate (void *)q); 13097c478bd9Sstevel@tonic-gate 13107c478bd9Sstevel@tonic-gate /* 13117c478bd9Sstevel@tonic-gate * We will call mir_svc_idle_start() whenever MIR_SVC_QUIESCED() 13127c478bd9Sstevel@tonic-gate * is true. When it is true, and we are in the process of 13137c478bd9Sstevel@tonic-gate * closing the stream, signal any thread waiting in 13147c478bd9Sstevel@tonic-gate * mir_close(). 13157c478bd9Sstevel@tonic-gate */ 13167c478bd9Sstevel@tonic-gate if (mir->mir_inwservice == 0) 13177c478bd9Sstevel@tonic-gate cv_signal(&mir->mir_condvar); 13187c478bd9Sstevel@tonic-gate 13197c478bd9Sstevel@tonic-gate } else { 13207c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_svc_idle_start - reset %s timer\n", 13217c478bd9Sstevel@tonic-gate mir->mir_ordrel_pending ? "ordrel" : "normal"); 13227c478bd9Sstevel@tonic-gate /* 13237c478bd9Sstevel@tonic-gate * Normal condition, start the idle timer. If an orderly 13247c478bd9Sstevel@tonic-gate * release has been sent, set the timeout to wait for the 13257c478bd9Sstevel@tonic-gate * client to close its side of the connection. Otherwise, 13267c478bd9Sstevel@tonic-gate * use the normal idle timeout. 13277c478bd9Sstevel@tonic-gate */ 13287c478bd9Sstevel@tonic-gate mir_timer_start(q, mir, mir->mir_ordrel_pending ? 13297c478bd9Sstevel@tonic-gate svc_ordrel_timeout : mir->mir_idle_timeout); 13307c478bd9Sstevel@tonic-gate } 13317c478bd9Sstevel@tonic-gate } 13327c478bd9Sstevel@tonic-gate 13337c478bd9Sstevel@tonic-gate /* ARGSUSED */ 13347c478bd9Sstevel@tonic-gate static int 13357c478bd9Sstevel@tonic-gate mir_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 13367c478bd9Sstevel@tonic-gate { 13377c478bd9Sstevel@tonic-gate mir_t *mir; 13387c478bd9Sstevel@tonic-gate 13397c478bd9Sstevel@tonic-gate RPCLOG(32, "rpcmod: mir_open of q 0x%p\n", (void *)q); 13407c478bd9Sstevel@tonic-gate /* Set variables used directly by KRPC. */ 13417c478bd9Sstevel@tonic-gate if (!mir_rele) 13427c478bd9Sstevel@tonic-gate mir_rele = mir_svc_release; 13437c478bd9Sstevel@tonic-gate if (!mir_start) 13447c478bd9Sstevel@tonic-gate mir_start = mir_svc_start; 13457c478bd9Sstevel@tonic-gate if (!clnt_stop_idle) 13467c478bd9Sstevel@tonic-gate clnt_stop_idle = mir_clnt_idle_do_stop; 13477c478bd9Sstevel@tonic-gate if (!clnt_max_msg_sizep) 13487c478bd9Sstevel@tonic-gate clnt_max_msg_sizep = &clnt_max_msg_size; 13497c478bd9Sstevel@tonic-gate if (!svc_max_msg_sizep) 13507c478bd9Sstevel@tonic-gate svc_max_msg_sizep = &svc_max_msg_size; 13517c478bd9Sstevel@tonic-gate 13527c478bd9Sstevel@tonic-gate /* Allocate a zero'ed out mir structure for this stream. */ 13537c478bd9Sstevel@tonic-gate mir = kmem_zalloc(sizeof (mir_t), KM_SLEEP); 13547c478bd9Sstevel@tonic-gate 13557c478bd9Sstevel@tonic-gate /* 13567c478bd9Sstevel@tonic-gate * We set hold inbound here so that incoming messages will 13577c478bd9Sstevel@tonic-gate * be held on the read-side queue until the stream is completely 13587c478bd9Sstevel@tonic-gate * initialized with a RPC_CLIENT or RPC_SERVER ioctl. During 13597c478bd9Sstevel@tonic-gate * the ioctl processing, the flag is cleared and any messages that 13607c478bd9Sstevel@tonic-gate * arrived between the open and the ioctl are delivered to KRPC. 13617c478bd9Sstevel@tonic-gate * 13627c478bd9Sstevel@tonic-gate * Early data should never arrive on a client stream since 13637c478bd9Sstevel@tonic-gate * servers only respond to our requests and we do not send any. 13647c478bd9Sstevel@tonic-gate * until after the stream is initialized. Early data is 13657c478bd9Sstevel@tonic-gate * very common on a server stream where the client will start 13667c478bd9Sstevel@tonic-gate * sending data as soon as the connection is made (and this 13677c478bd9Sstevel@tonic-gate * is especially true with TCP where the protocol accepts the 13687c478bd9Sstevel@tonic-gate * connection before nfsd or KRPC is notified about it). 13697c478bd9Sstevel@tonic-gate */ 13707c478bd9Sstevel@tonic-gate 13717c478bd9Sstevel@tonic-gate mir->mir_hold_inbound = 1; 13727c478bd9Sstevel@tonic-gate 13737c478bd9Sstevel@tonic-gate /* 13747c478bd9Sstevel@tonic-gate * Start the record marker looking for a 4-byte header. When 13757c478bd9Sstevel@tonic-gate * this length is negative, it indicates that rpcmod is looking 13767c478bd9Sstevel@tonic-gate * for bytes to consume for the record marker header. When it 13777c478bd9Sstevel@tonic-gate * is positive, it holds the number of bytes that have arrived 13787c478bd9Sstevel@tonic-gate * for the current fragment and are being held in mir_header_mp. 13797c478bd9Sstevel@tonic-gate */ 13807c478bd9Sstevel@tonic-gate 13817c478bd9Sstevel@tonic-gate mir->mir_frag_len = -(int32_t)sizeof (uint32_t); 13827c478bd9Sstevel@tonic-gate 1383108322fbScarlsonj mir->mir_zoneid = rpc_zoneid(); 13847c478bd9Sstevel@tonic-gate mutex_init(&mir->mir_mutex, NULL, MUTEX_DEFAULT, NULL); 13857c478bd9Sstevel@tonic-gate cv_init(&mir->mir_condvar, NULL, CV_DRIVER, NULL); 13867c478bd9Sstevel@tonic-gate cv_init(&mir->mir_timer_cv, NULL, CV_DRIVER, NULL); 13877c478bd9Sstevel@tonic-gate 13887c478bd9Sstevel@tonic-gate q->q_ptr = (char *)mir; 13897c478bd9Sstevel@tonic-gate WR(q)->q_ptr = (char *)mir; 13907c478bd9Sstevel@tonic-gate 13917c478bd9Sstevel@tonic-gate /* 13927c478bd9Sstevel@tonic-gate * We noenable the read-side queue because we don't want it 13937c478bd9Sstevel@tonic-gate * automatically enabled by putq. We enable it explicitly 13947c478bd9Sstevel@tonic-gate * in mir_wsrv when appropriate. (See additional comments on 13957c478bd9Sstevel@tonic-gate * flow control at the beginning of mir_rsrv.) 13967c478bd9Sstevel@tonic-gate */ 13977c478bd9Sstevel@tonic-gate noenable(q); 13987c478bd9Sstevel@tonic-gate 13997c478bd9Sstevel@tonic-gate qprocson(q); 14007c478bd9Sstevel@tonic-gate return (0); 14017c478bd9Sstevel@tonic-gate } 14027c478bd9Sstevel@tonic-gate 14037c478bd9Sstevel@tonic-gate /* 14047c478bd9Sstevel@tonic-gate * Read-side put routine for both the client and server side. Does the 14057c478bd9Sstevel@tonic-gate * record marking for incoming RPC messages, and when complete, dispatches 14067c478bd9Sstevel@tonic-gate * the message to either the client or server. 14077c478bd9Sstevel@tonic-gate */ 14087c478bd9Sstevel@tonic-gate static void 14097c478bd9Sstevel@tonic-gate mir_do_rput(queue_t *q, mblk_t *mp, int srv) 14107c478bd9Sstevel@tonic-gate { 14117c478bd9Sstevel@tonic-gate mblk_t *cont_mp; 14127c478bd9Sstevel@tonic-gate int excess; 14137c478bd9Sstevel@tonic-gate int32_t frag_len; 14147c478bd9Sstevel@tonic-gate int32_t frag_header; 14157c478bd9Sstevel@tonic-gate mblk_t *head_mp; 14167c478bd9Sstevel@tonic-gate int len; 14177c478bd9Sstevel@tonic-gate mir_t *mir; 14187c478bd9Sstevel@tonic-gate mblk_t *mp1; 14197c478bd9Sstevel@tonic-gate unsigned char *rptr; 14207c478bd9Sstevel@tonic-gate mblk_t *tail_mp; 14217c478bd9Sstevel@tonic-gate unsigned char *wptr; 14227c478bd9Sstevel@tonic-gate boolean_t stop_timer = B_FALSE; 14237c478bd9Sstevel@tonic-gate 14247c478bd9Sstevel@tonic-gate mir = (mir_t *)q->q_ptr; 14257c478bd9Sstevel@tonic-gate ASSERT(mir != NULL); 14267c478bd9Sstevel@tonic-gate 14277c478bd9Sstevel@tonic-gate /* 14287c478bd9Sstevel@tonic-gate * If the stream has not been set up as a RPC_CLIENT or RPC_SERVER 14297c478bd9Sstevel@tonic-gate * with the corresponding ioctl, then don't accept 14307c478bd9Sstevel@tonic-gate * any inbound data. This should never happen for streams 14317c478bd9Sstevel@tonic-gate * created by nfsd or client-side KRPC because they are careful 14327c478bd9Sstevel@tonic-gate * to set the mode of the stream before doing anything else. 14337c478bd9Sstevel@tonic-gate */ 14347c478bd9Sstevel@tonic-gate if (mir->mir_type == 0) { 14357c478bd9Sstevel@tonic-gate freemsg(mp); 14367c478bd9Sstevel@tonic-gate return; 14377c478bd9Sstevel@tonic-gate } 14387c478bd9Sstevel@tonic-gate 14397c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex)); 14407c478bd9Sstevel@tonic-gate 14417c478bd9Sstevel@tonic-gate switch (mp->b_datap->db_type) { 14427c478bd9Sstevel@tonic-gate case M_DATA: 14437c478bd9Sstevel@tonic-gate break; 14447c478bd9Sstevel@tonic-gate case M_PROTO: 14457c478bd9Sstevel@tonic-gate case M_PCPROTO: 14467c478bd9Sstevel@tonic-gate rptr = mp->b_rptr; 14477c478bd9Sstevel@tonic-gate if (mp->b_wptr - rptr < sizeof (uint32_t)) { 14487c478bd9Sstevel@tonic-gate RPCLOG(1, "mir_rput: runt TPI message (%d bytes)\n", 14497c478bd9Sstevel@tonic-gate (int)(mp->b_wptr - rptr)); 14507c478bd9Sstevel@tonic-gate freemsg(mp); 14517c478bd9Sstevel@tonic-gate return; 14527c478bd9Sstevel@tonic-gate } 14537c478bd9Sstevel@tonic-gate if (((union T_primitives *)rptr)->type != T_DATA_IND) { 14547c478bd9Sstevel@tonic-gate mir_rput_proto(q, mp); 14557c478bd9Sstevel@tonic-gate return; 14567c478bd9Sstevel@tonic-gate } 14577c478bd9Sstevel@tonic-gate 14587c478bd9Sstevel@tonic-gate /* Throw away the T_DATA_IND block and continue with data. */ 14597c478bd9Sstevel@tonic-gate mp1 = mp; 14607c478bd9Sstevel@tonic-gate mp = mp->b_cont; 14617c478bd9Sstevel@tonic-gate freeb(mp1); 14627c478bd9Sstevel@tonic-gate break; 14637c478bd9Sstevel@tonic-gate case M_SETOPTS: 14647c478bd9Sstevel@tonic-gate /* 14657c478bd9Sstevel@tonic-gate * If a module on the stream is trying set the Stream head's 14667c478bd9Sstevel@tonic-gate * high water mark, then set our hiwater to the requested 14677c478bd9Sstevel@tonic-gate * value. We are the "stream head" for all inbound 14687c478bd9Sstevel@tonic-gate * data messages since messages are passed directly to KRPC. 14697c478bd9Sstevel@tonic-gate */ 14707c478bd9Sstevel@tonic-gate if ((mp->b_wptr - mp->b_rptr) >= sizeof (struct stroptions)) { 14717c478bd9Sstevel@tonic-gate struct stroptions *stropts; 14727c478bd9Sstevel@tonic-gate 14737c478bd9Sstevel@tonic-gate stropts = (struct stroptions *)mp->b_rptr; 14747c478bd9Sstevel@tonic-gate if ((stropts->so_flags & SO_HIWAT) && 14757c478bd9Sstevel@tonic-gate !(stropts->so_flags & SO_BAND)) { 14767c478bd9Sstevel@tonic-gate (void) strqset(q, QHIWAT, 0, stropts->so_hiwat); 14777c478bd9Sstevel@tonic-gate } 14787c478bd9Sstevel@tonic-gate } 14797c478bd9Sstevel@tonic-gate putnext(q, mp); 14807c478bd9Sstevel@tonic-gate return; 14817c478bd9Sstevel@tonic-gate case M_FLUSH: 14827c478bd9Sstevel@tonic-gate RPCLOG(32, "mir_do_rput: ignoring M_FLUSH on q 0x%p. ", 14837c478bd9Sstevel@tonic-gate (void *)q); 14847c478bd9Sstevel@tonic-gate RPCLOG(32, "M_FLUSH is %x\n", (uint_t)*mp->b_rptr); 14857c478bd9Sstevel@tonic-gate 14867c478bd9Sstevel@tonic-gate putnext(q, mp); 14877c478bd9Sstevel@tonic-gate return; 14887c478bd9Sstevel@tonic-gate default: 14897c478bd9Sstevel@tonic-gate putnext(q, mp); 14907c478bd9Sstevel@tonic-gate return; 14917c478bd9Sstevel@tonic-gate } 14927c478bd9Sstevel@tonic-gate 14937c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 14947c478bd9Sstevel@tonic-gate 14957c478bd9Sstevel@tonic-gate /* 14967c478bd9Sstevel@tonic-gate * If this connection is closing, don't accept any new messages. 14977c478bd9Sstevel@tonic-gate */ 14987c478bd9Sstevel@tonic-gate if (mir->mir_svc_no_more_msgs) { 14997c478bd9Sstevel@tonic-gate ASSERT(mir->mir_type == RPC_SERVER); 15007c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 15017c478bd9Sstevel@tonic-gate freemsg(mp); 15027c478bd9Sstevel@tonic-gate return; 15037c478bd9Sstevel@tonic-gate } 15047c478bd9Sstevel@tonic-gate 15057c478bd9Sstevel@tonic-gate /* Get local copies for quicker access. */ 15067c478bd9Sstevel@tonic-gate frag_len = mir->mir_frag_len; 15077c478bd9Sstevel@tonic-gate frag_header = mir->mir_frag_header; 15087c478bd9Sstevel@tonic-gate head_mp = mir->mir_head_mp; 15097c478bd9Sstevel@tonic-gate tail_mp = mir->mir_tail_mp; 15107c478bd9Sstevel@tonic-gate 15117c478bd9Sstevel@tonic-gate /* Loop, processing each message block in the mp chain separately. */ 15127c478bd9Sstevel@tonic-gate do { 15137c478bd9Sstevel@tonic-gate /* 15147c478bd9Sstevel@tonic-gate * cont_mp is used in the do/while condition below to 15157c478bd9Sstevel@tonic-gate * walk to the next block in the STREAMS message. 15167c478bd9Sstevel@tonic-gate * mp->b_cont may be nil'ed during processing so we 15177c478bd9Sstevel@tonic-gate * can't rely on it to find the next block. 15187c478bd9Sstevel@tonic-gate */ 15197c478bd9Sstevel@tonic-gate cont_mp = mp->b_cont; 15207c478bd9Sstevel@tonic-gate 15217c478bd9Sstevel@tonic-gate /* 15227c478bd9Sstevel@tonic-gate * Get local copies of rptr and wptr for our processing. 15237c478bd9Sstevel@tonic-gate * These always point into "mp" (the current block being 15247c478bd9Sstevel@tonic-gate * processed), but rptr is updated as we consume any 15257c478bd9Sstevel@tonic-gate * record header in this message, and wptr is updated to 15267c478bd9Sstevel@tonic-gate * point to the end of the data for the current fragment, 15277c478bd9Sstevel@tonic-gate * if it ends in this block. The main point is that 15287c478bd9Sstevel@tonic-gate * they are not always the same as b_rptr and b_wptr. 15297c478bd9Sstevel@tonic-gate * b_rptr and b_wptr will be updated when appropriate. 15307c478bd9Sstevel@tonic-gate */ 15317c478bd9Sstevel@tonic-gate rptr = mp->b_rptr; 15327c478bd9Sstevel@tonic-gate wptr = mp->b_wptr; 15337c478bd9Sstevel@tonic-gate same_mblk:; 15347c478bd9Sstevel@tonic-gate len = (int)(wptr - rptr); 15357c478bd9Sstevel@tonic-gate if (len <= 0) { 15367c478bd9Sstevel@tonic-gate /* 15377c478bd9Sstevel@tonic-gate * If we have processed all of the data in the message 15387c478bd9Sstevel@tonic-gate * or the block is empty to begin with, then we're 15397c478bd9Sstevel@tonic-gate * done with this block and can go on to cont_mp, 15407c478bd9Sstevel@tonic-gate * if there is one. 15417c478bd9Sstevel@tonic-gate * 15427c478bd9Sstevel@tonic-gate * First, we check to see if the current block is 15437c478bd9Sstevel@tonic-gate * now zero-length and, if so, we free it. 15447c478bd9Sstevel@tonic-gate * This happens when either the block was empty 15457c478bd9Sstevel@tonic-gate * to begin with or we consumed all of the data 15467c478bd9Sstevel@tonic-gate * for the record marking header. 15477c478bd9Sstevel@tonic-gate */ 15487c478bd9Sstevel@tonic-gate if (rptr <= mp->b_rptr) { 15497c478bd9Sstevel@tonic-gate /* 15507c478bd9Sstevel@tonic-gate * If head_mp is non-NULL, add cont_mp to the 15517c478bd9Sstevel@tonic-gate * mblk list. XXX But there is a possibility 15527c478bd9Sstevel@tonic-gate * that tail_mp = mp or even head_mp = mp XXX 15537c478bd9Sstevel@tonic-gate */ 15547c478bd9Sstevel@tonic-gate if (head_mp) { 15557c478bd9Sstevel@tonic-gate if (head_mp == mp) 15567c478bd9Sstevel@tonic-gate head_mp = NULL; 15577c478bd9Sstevel@tonic-gate else if (tail_mp != mp) { 15587c478bd9Sstevel@tonic-gate ASSERT((tail_mp->b_cont == NULL) || (tail_mp->b_cont == mp)); 15597c478bd9Sstevel@tonic-gate tail_mp->b_cont = cont_mp; 15607c478bd9Sstevel@tonic-gate /* 15617c478bd9Sstevel@tonic-gate * It's possible that, because 15627c478bd9Sstevel@tonic-gate * of a very short mblk (0-3 15637c478bd9Sstevel@tonic-gate * bytes), we've ended up here 15647c478bd9Sstevel@tonic-gate * and that cont_mp could be 15657c478bd9Sstevel@tonic-gate * NULL (if we're at the end 15667c478bd9Sstevel@tonic-gate * of an mblk chain). If so, 15677c478bd9Sstevel@tonic-gate * don't set tail_mp to 15687c478bd9Sstevel@tonic-gate * cont_mp, because the next 15697c478bd9Sstevel@tonic-gate * time we access it, we'll 15707c478bd9Sstevel@tonic-gate * dereference a NULL pointer 15717c478bd9Sstevel@tonic-gate * and crash. Just leave 15727c478bd9Sstevel@tonic-gate * tail_mp pointing at the 15737c478bd9Sstevel@tonic-gate * current end of chain. 15747c478bd9Sstevel@tonic-gate */ 15757c478bd9Sstevel@tonic-gate if (cont_mp) 15767c478bd9Sstevel@tonic-gate tail_mp = cont_mp; 15777c478bd9Sstevel@tonic-gate } else { 15787c478bd9Sstevel@tonic-gate mblk_t *smp = head_mp; 15797c478bd9Sstevel@tonic-gate 15807c478bd9Sstevel@tonic-gate while ((smp->b_cont != NULL) && 15817c478bd9Sstevel@tonic-gate (smp->b_cont != mp)) 15827c478bd9Sstevel@tonic-gate smp = smp->b_cont; 15837c478bd9Sstevel@tonic-gate smp->b_cont = cont_mp; 15847c478bd9Sstevel@tonic-gate /* 15857c478bd9Sstevel@tonic-gate * Don't set tail_mp to cont_mp 15867c478bd9Sstevel@tonic-gate * if it's NULL. Instead, set 15877c478bd9Sstevel@tonic-gate * tail_mp to smp, which is the 15887c478bd9Sstevel@tonic-gate * end of the chain starting 15897c478bd9Sstevel@tonic-gate * at head_mp. 15907c478bd9Sstevel@tonic-gate */ 15917c478bd9Sstevel@tonic-gate if (cont_mp) 15927c478bd9Sstevel@tonic-gate tail_mp = cont_mp; 15937c478bd9Sstevel@tonic-gate else 15947c478bd9Sstevel@tonic-gate tail_mp = smp; 15957c478bd9Sstevel@tonic-gate } 15967c478bd9Sstevel@tonic-gate } 15977c478bd9Sstevel@tonic-gate freeb(mp); 15987c478bd9Sstevel@tonic-gate } 15997c478bd9Sstevel@tonic-gate continue; 16007c478bd9Sstevel@tonic-gate } 16017c478bd9Sstevel@tonic-gate 16027c478bd9Sstevel@tonic-gate /* 16037c478bd9Sstevel@tonic-gate * frag_len starts at -4 and is incremented past the record 16047c478bd9Sstevel@tonic-gate * marking header to 0, and then becomes positive as real data 16057c478bd9Sstevel@tonic-gate * bytes are received for the message. While frag_len is less 16067c478bd9Sstevel@tonic-gate * than zero, we need more bytes for the record marking 16077c478bd9Sstevel@tonic-gate * header. 16087c478bd9Sstevel@tonic-gate */ 16097c478bd9Sstevel@tonic-gate if (frag_len < 0) { 16107c478bd9Sstevel@tonic-gate uchar_t *up = rptr; 16117c478bd9Sstevel@tonic-gate /* 16127c478bd9Sstevel@tonic-gate * Collect as many bytes as we need for the record 16137c478bd9Sstevel@tonic-gate * marking header and that are available in this block. 16147c478bd9Sstevel@tonic-gate */ 16157c478bd9Sstevel@tonic-gate do { 16167c478bd9Sstevel@tonic-gate --len; 16177c478bd9Sstevel@tonic-gate frag_len++; 16187c478bd9Sstevel@tonic-gate frag_header <<= 8; 16197c478bd9Sstevel@tonic-gate frag_header += (*up++ & 0xFF); 16207c478bd9Sstevel@tonic-gate } while (len > 0 && frag_len < 0); 16217c478bd9Sstevel@tonic-gate 16227c478bd9Sstevel@tonic-gate if (rptr == mp->b_rptr) { 16237c478bd9Sstevel@tonic-gate /* 16247c478bd9Sstevel@tonic-gate * The record header is located at the 16257c478bd9Sstevel@tonic-gate * beginning of the block, so just walk 16267c478bd9Sstevel@tonic-gate * b_rptr past it. 16277c478bd9Sstevel@tonic-gate */ 16287c478bd9Sstevel@tonic-gate mp->b_rptr = rptr = up; 16297c478bd9Sstevel@tonic-gate } else { 16307c478bd9Sstevel@tonic-gate /* 16317c478bd9Sstevel@tonic-gate * The record header is located in the middle 16327c478bd9Sstevel@tonic-gate * of a block, so copy any remaining data up. 16337c478bd9Sstevel@tonic-gate * This happens when an RPC message is 16347c478bd9Sstevel@tonic-gate * fragmented into multiple pieces and 16357c478bd9Sstevel@tonic-gate * a middle (or end) fragment immediately 16367c478bd9Sstevel@tonic-gate * follows a previous fragment in the same 16377c478bd9Sstevel@tonic-gate * message block. 16387c478bd9Sstevel@tonic-gate */ 16397c478bd9Sstevel@tonic-gate wptr = &rptr[len]; 16407c478bd9Sstevel@tonic-gate mp->b_wptr = wptr; 16417c478bd9Sstevel@tonic-gate if (len) { 16427c478bd9Sstevel@tonic-gate RPCLOG(32, "mir_do_rput: copying %d " 16437c478bd9Sstevel@tonic-gate "bytes of data up", len); 16447c478bd9Sstevel@tonic-gate RPCLOG(32, " db_ref %d\n", 16457c478bd9Sstevel@tonic-gate (uint_t)mp->b_datap->db_ref); 16467c478bd9Sstevel@tonic-gate bcopy(up, rptr, len); 16477c478bd9Sstevel@tonic-gate } 16487c478bd9Sstevel@tonic-gate } 16497c478bd9Sstevel@tonic-gate 16507c478bd9Sstevel@tonic-gate /* 16517c478bd9Sstevel@tonic-gate * If we haven't received the complete record header 16527c478bd9Sstevel@tonic-gate * yet, then loop around to get the next block in the 16537c478bd9Sstevel@tonic-gate * STREAMS message. The logic at same_mblk label will 16547c478bd9Sstevel@tonic-gate * free the current block if it has become empty. 16557c478bd9Sstevel@tonic-gate */ 16567c478bd9Sstevel@tonic-gate if (frag_len < 0) { 16577c478bd9Sstevel@tonic-gate RPCLOG(32, "mir_do_rput: frag_len is still < 0 " 16587c478bd9Sstevel@tonic-gate "(%d)", len); 16597c478bd9Sstevel@tonic-gate goto same_mblk; 16607c478bd9Sstevel@tonic-gate } 16617c478bd9Sstevel@tonic-gate 16627c478bd9Sstevel@tonic-gate #ifdef RPCDEBUG 16637c478bd9Sstevel@tonic-gate if ((frag_header & MIR_LASTFRAG) == 0) { 16647c478bd9Sstevel@tonic-gate RPCLOG0(32, "mir_do_rput: multi-fragment " 16657c478bd9Sstevel@tonic-gate "record\n"); 16667c478bd9Sstevel@tonic-gate } 16677c478bd9Sstevel@tonic-gate { 16687c478bd9Sstevel@tonic-gate uint_t l = frag_header & ~MIR_LASTFRAG; 16697c478bd9Sstevel@tonic-gate 16707c478bd9Sstevel@tonic-gate if (l != 0 && mir->mir_max_msg_sizep && 16717c478bd9Sstevel@tonic-gate l >= *mir->mir_max_msg_sizep) { 16727c478bd9Sstevel@tonic-gate RPCLOG(32, "mir_do_rput: fragment size" 16737c478bd9Sstevel@tonic-gate " (%d) > maximum", l); 16747c478bd9Sstevel@tonic-gate RPCLOG(32, " (%u)\n", 16757c478bd9Sstevel@tonic-gate *mir->mir_max_msg_sizep); 16767c478bd9Sstevel@tonic-gate } 16777c478bd9Sstevel@tonic-gate } 16787c478bd9Sstevel@tonic-gate #endif 16797c478bd9Sstevel@tonic-gate /* 16807c478bd9Sstevel@tonic-gate * At this point we have retrieved the complete record 16817c478bd9Sstevel@tonic-gate * header for this fragment. If the current block is 16827c478bd9Sstevel@tonic-gate * empty, then we need to free it and walk to the next 16837c478bd9Sstevel@tonic-gate * block. 16847c478bd9Sstevel@tonic-gate */ 16857c478bd9Sstevel@tonic-gate if (mp->b_rptr >= wptr) { 16867c478bd9Sstevel@tonic-gate /* 16877c478bd9Sstevel@tonic-gate * If this is not the last fragment or if we 16887c478bd9Sstevel@tonic-gate * have not received all the data for this 16897c478bd9Sstevel@tonic-gate * RPC message, then loop around to the next 16907c478bd9Sstevel@tonic-gate * block. 16917c478bd9Sstevel@tonic-gate */ 16927c478bd9Sstevel@tonic-gate if (!(frag_header & MIR_LASTFRAG) || 16937c478bd9Sstevel@tonic-gate (frag_len - 16947c478bd9Sstevel@tonic-gate (frag_header & ~MIR_LASTFRAG)) || 16957c478bd9Sstevel@tonic-gate !head_mp) 16967c478bd9Sstevel@tonic-gate goto same_mblk; 16977c478bd9Sstevel@tonic-gate 16987c478bd9Sstevel@tonic-gate /* 16997c478bd9Sstevel@tonic-gate * Quick walk to next block in the 17007c478bd9Sstevel@tonic-gate * STREAMS message. 17017c478bd9Sstevel@tonic-gate */ 17027c478bd9Sstevel@tonic-gate freeb(mp); 17037c478bd9Sstevel@tonic-gate continue; 17047c478bd9Sstevel@tonic-gate } 17057c478bd9Sstevel@tonic-gate } 17067c478bd9Sstevel@tonic-gate 17077c478bd9Sstevel@tonic-gate /* 17087c478bd9Sstevel@tonic-gate * We've collected the complete record header. The data 17097c478bd9Sstevel@tonic-gate * in the current block is added to the end of the RPC 17107c478bd9Sstevel@tonic-gate * message. Note that tail_mp is the same as mp after 17117c478bd9Sstevel@tonic-gate * this linkage. 17127c478bd9Sstevel@tonic-gate */ 17137c478bd9Sstevel@tonic-gate if (!head_mp) 17147c478bd9Sstevel@tonic-gate head_mp = mp; 17157c478bd9Sstevel@tonic-gate else if (tail_mp != mp) { 17167c478bd9Sstevel@tonic-gate ASSERT((tail_mp->b_cont == NULL) || 17177c478bd9Sstevel@tonic-gate (tail_mp->b_cont == mp)); 17187c478bd9Sstevel@tonic-gate tail_mp->b_cont = mp; 17197c478bd9Sstevel@tonic-gate } 17207c478bd9Sstevel@tonic-gate tail_mp = mp; 17217c478bd9Sstevel@tonic-gate 17227c478bd9Sstevel@tonic-gate /* 17237c478bd9Sstevel@tonic-gate * Add the length of this block to the accumulated 17247c478bd9Sstevel@tonic-gate * fragment length. 17257c478bd9Sstevel@tonic-gate */ 17267c478bd9Sstevel@tonic-gate frag_len += len; 17277c478bd9Sstevel@tonic-gate excess = frag_len - (frag_header & ~MIR_LASTFRAG); 17287c478bd9Sstevel@tonic-gate /* 17297c478bd9Sstevel@tonic-gate * If we have not received all the data for this fragment, 17307c478bd9Sstevel@tonic-gate * then walk to the next block. 17317c478bd9Sstevel@tonic-gate */ 17327c478bd9Sstevel@tonic-gate if (excess < 0) 17337c478bd9Sstevel@tonic-gate continue; 17347c478bd9Sstevel@tonic-gate 17357c478bd9Sstevel@tonic-gate /* 17367c478bd9Sstevel@tonic-gate * We've received a complete fragment, so reset frag_len 17377c478bd9Sstevel@tonic-gate * for the next one. 17387c478bd9Sstevel@tonic-gate */ 17397c478bd9Sstevel@tonic-gate frag_len = -(int32_t)sizeof (uint32_t); 17407c478bd9Sstevel@tonic-gate 17417c478bd9Sstevel@tonic-gate /* 17427c478bd9Sstevel@tonic-gate * Update rptr to point to the beginning of the next 17437c478bd9Sstevel@tonic-gate * fragment in this block. If there are no more bytes 17447c478bd9Sstevel@tonic-gate * in the block (excess is 0), then rptr will be equal 17457c478bd9Sstevel@tonic-gate * to wptr. 17467c478bd9Sstevel@tonic-gate */ 17477c478bd9Sstevel@tonic-gate rptr = wptr - excess; 17487c478bd9Sstevel@tonic-gate 17497c478bd9Sstevel@tonic-gate /* 17507c478bd9Sstevel@tonic-gate * Now we check to see if this fragment is the last one in 17517c478bd9Sstevel@tonic-gate * the RPC message. 17527c478bd9Sstevel@tonic-gate */ 17537c478bd9Sstevel@tonic-gate if (!(frag_header & MIR_LASTFRAG)) { 17547c478bd9Sstevel@tonic-gate /* 17557c478bd9Sstevel@tonic-gate * This isn't the last one, so start processing the 17567c478bd9Sstevel@tonic-gate * next fragment. 17577c478bd9Sstevel@tonic-gate */ 17587c478bd9Sstevel@tonic-gate frag_header = 0; 17597c478bd9Sstevel@tonic-gate 17607c478bd9Sstevel@tonic-gate /* 17617c478bd9Sstevel@tonic-gate * If excess is 0, the next fragment 17627c478bd9Sstevel@tonic-gate * starts at the beginning of the next block -- 17637c478bd9Sstevel@tonic-gate * we "continue" to the end of the while loop and 17647c478bd9Sstevel@tonic-gate * walk to cont_mp. 17657c478bd9Sstevel@tonic-gate */ 17667c478bd9Sstevel@tonic-gate if (excess == 0) 17677c478bd9Sstevel@tonic-gate continue; 17687c478bd9Sstevel@tonic-gate RPCLOG0(32, "mir_do_rput: multi-fragment message with " 17697c478bd9Sstevel@tonic-gate "two or more fragments in one mblk\n"); 17707c478bd9Sstevel@tonic-gate 17717c478bd9Sstevel@tonic-gate /* 17727c478bd9Sstevel@tonic-gate * If excess is non-0, then the next fragment starts 17737c478bd9Sstevel@tonic-gate * in this block. rptr points to the beginning 17747c478bd9Sstevel@tonic-gate * of the next fragment and we "goto same_mblk" 17757c478bd9Sstevel@tonic-gate * to continue processing. 17767c478bd9Sstevel@tonic-gate */ 17777c478bd9Sstevel@tonic-gate goto same_mblk; 17787c478bd9Sstevel@tonic-gate } 17797c478bd9Sstevel@tonic-gate 17807c478bd9Sstevel@tonic-gate /* 17817c478bd9Sstevel@tonic-gate * We've got a complete RPC message. Before passing it 17827c478bd9Sstevel@tonic-gate * upstream, check to see if there is extra data in this 17837c478bd9Sstevel@tonic-gate * message block. If so, then we separate the excess 17847c478bd9Sstevel@tonic-gate * from the complete message. The excess data is processed 17857c478bd9Sstevel@tonic-gate * after the current message goes upstream. 17867c478bd9Sstevel@tonic-gate */ 17877c478bd9Sstevel@tonic-gate if (excess > 0) { 17887c478bd9Sstevel@tonic-gate RPCLOG(32, "mir_do_rput: end of record, but excess " 17897c478bd9Sstevel@tonic-gate "data (%d bytes) in this mblk. dupb/copyb " 17907c478bd9Sstevel@tonic-gate "needed\n", excess); 17917c478bd9Sstevel@tonic-gate 17927c478bd9Sstevel@tonic-gate /* Duplicate only the overlapping block. */ 17937c478bd9Sstevel@tonic-gate mp1 = dupb(tail_mp); 17947c478bd9Sstevel@tonic-gate 17957c478bd9Sstevel@tonic-gate /* 17967c478bd9Sstevel@tonic-gate * dupb() might have failed due to ref count wrap around 17977c478bd9Sstevel@tonic-gate * so try a copyb(). 17987c478bd9Sstevel@tonic-gate */ 17997c478bd9Sstevel@tonic-gate if (mp1 == NULL) 18007c478bd9Sstevel@tonic-gate mp1 = copyb(tail_mp); 18017c478bd9Sstevel@tonic-gate 18027c478bd9Sstevel@tonic-gate /* 18037c478bd9Sstevel@tonic-gate * Do not use bufcall() to schedule a "buffer 18047c478bd9Sstevel@tonic-gate * availability event." The reason is that 18057c478bd9Sstevel@tonic-gate * bufcall() has problems. For example, if memory 18067c478bd9Sstevel@tonic-gate * runs out, bufcall() itself will fail since it 18077c478bd9Sstevel@tonic-gate * needs to allocate memory. The most appropriate 18087c478bd9Sstevel@tonic-gate * action right now is to disconnect this connection 18097c478bd9Sstevel@tonic-gate * as the system is under stress. We should try to 18107c478bd9Sstevel@tonic-gate * free up resources. 18117c478bd9Sstevel@tonic-gate */ 18127c478bd9Sstevel@tonic-gate if (mp1 == NULL) { 18137c478bd9Sstevel@tonic-gate freemsg(head_mp); 18147c478bd9Sstevel@tonic-gate RPCLOG0(1, "mir_do_rput: dupb/copyb failed\n"); 18157c478bd9Sstevel@tonic-gate mir->mir_frag_header = 0; 18167c478bd9Sstevel@tonic-gate mir->mir_frag_len = -(int)sizeof (uint32_t); 18177c478bd9Sstevel@tonic-gate mir->mir_head_mp = NULL; 18187c478bd9Sstevel@tonic-gate mir->mir_tail_mp = NULL; 18197c478bd9Sstevel@tonic-gate 18207c478bd9Sstevel@tonic-gate mir_disconnect(q, mir); 18217c478bd9Sstevel@tonic-gate return; 18227c478bd9Sstevel@tonic-gate } 18237c478bd9Sstevel@tonic-gate 18247c478bd9Sstevel@tonic-gate /* 18257c478bd9Sstevel@tonic-gate * The new message block is linked with the 18267c478bd9Sstevel@tonic-gate * continuation block in cont_mp. We then point 18277c478bd9Sstevel@tonic-gate * cont_mp to the new block so that we will 18287c478bd9Sstevel@tonic-gate * process it next. 18297c478bd9Sstevel@tonic-gate */ 18307c478bd9Sstevel@tonic-gate mp1->b_cont = cont_mp; 18317c478bd9Sstevel@tonic-gate cont_mp = mp1; 18327c478bd9Sstevel@tonic-gate /* 18337c478bd9Sstevel@tonic-gate * Data in the new block begins at the 18347c478bd9Sstevel@tonic-gate * next fragment (rptr). 18357c478bd9Sstevel@tonic-gate */ 18367c478bd9Sstevel@tonic-gate cont_mp->b_rptr += (rptr - tail_mp->b_rptr); 18377c478bd9Sstevel@tonic-gate ASSERT(cont_mp->b_rptr >= cont_mp->b_datap->db_base); 18387c478bd9Sstevel@tonic-gate ASSERT(cont_mp->b_rptr <= cont_mp->b_wptr); 18397c478bd9Sstevel@tonic-gate 18407c478bd9Sstevel@tonic-gate /* Data in the current fragment ends at rptr. */ 18417c478bd9Sstevel@tonic-gate tail_mp->b_wptr = rptr; 18427c478bd9Sstevel@tonic-gate ASSERT(tail_mp->b_wptr <= tail_mp->b_datap->db_lim); 18437c478bd9Sstevel@tonic-gate ASSERT(tail_mp->b_wptr >= tail_mp->b_rptr); 18447c478bd9Sstevel@tonic-gate 18457c478bd9Sstevel@tonic-gate } 18467c478bd9Sstevel@tonic-gate 18477c478bd9Sstevel@tonic-gate /* tail_mp is the last block with data for this RPC message. */ 18487c478bd9Sstevel@tonic-gate tail_mp->b_cont = NULL; 18497c478bd9Sstevel@tonic-gate 18507c478bd9Sstevel@tonic-gate /* Pass the RPC message to the current consumer. */ 18517c478bd9Sstevel@tonic-gate switch (mir->mir_type) { 18527c478bd9Sstevel@tonic-gate case RPC_CLIENT: 18537c478bd9Sstevel@tonic-gate if (clnt_dispatch_notify(head_mp, mir->mir_zoneid)) { 18547c478bd9Sstevel@tonic-gate /* 18557c478bd9Sstevel@tonic-gate * Mark this stream as active. This marker 18567c478bd9Sstevel@tonic-gate * is used in mir_timer(). 18577c478bd9Sstevel@tonic-gate */ 18587c478bd9Sstevel@tonic-gate 18597c478bd9Sstevel@tonic-gate mir->mir_clntreq = 1; 18607c478bd9Sstevel@tonic-gate mir->mir_use_timestamp = lbolt; 18617c478bd9Sstevel@tonic-gate } else 18627c478bd9Sstevel@tonic-gate freemsg(head_mp); 18637c478bd9Sstevel@tonic-gate break; 18647c478bd9Sstevel@tonic-gate 18657c478bd9Sstevel@tonic-gate case RPC_SERVER: 18667c478bd9Sstevel@tonic-gate /* 18677c478bd9Sstevel@tonic-gate * Check for flow control before passing the 18687c478bd9Sstevel@tonic-gate * message to KRPC. 18697c478bd9Sstevel@tonic-gate */ 18707c478bd9Sstevel@tonic-gate 18717c478bd9Sstevel@tonic-gate if (!mir->mir_hold_inbound) { 18727c478bd9Sstevel@tonic-gate if (mir->mir_krpc_cell) { 18737c478bd9Sstevel@tonic-gate /* 18747c478bd9Sstevel@tonic-gate * If the reference count is 0 18757c478bd9Sstevel@tonic-gate * (not including this request), 18767c478bd9Sstevel@tonic-gate * then the stream is transitioning 18777c478bd9Sstevel@tonic-gate * from idle to non-idle. In this case, 18787c478bd9Sstevel@tonic-gate * we cancel the idle timer. 18797c478bd9Sstevel@tonic-gate */ 18807c478bd9Sstevel@tonic-gate if (mir->mir_ref_cnt++ == 0) 18817c478bd9Sstevel@tonic-gate stop_timer = B_TRUE; 18827c478bd9Sstevel@tonic-gate if (mir_check_len(q, 18837c478bd9Sstevel@tonic-gate (int32_t)msgdsize(mp), mp)) 18847c478bd9Sstevel@tonic-gate return; 18857c478bd9Sstevel@tonic-gate svc_queuereq(q, head_mp); /* to KRPC */ 18867c478bd9Sstevel@tonic-gate } else { 18877c478bd9Sstevel@tonic-gate /* 18887c478bd9Sstevel@tonic-gate * Count # of times this happens. Should be 18897c478bd9Sstevel@tonic-gate * never, but experience shows otherwise. 18907c478bd9Sstevel@tonic-gate */ 18917c478bd9Sstevel@tonic-gate mir_krpc_cell_null++; 18927c478bd9Sstevel@tonic-gate freemsg(head_mp); 18937c478bd9Sstevel@tonic-gate } 18947c478bd9Sstevel@tonic-gate 18957c478bd9Sstevel@tonic-gate } else { 18967c478bd9Sstevel@tonic-gate /* 18977c478bd9Sstevel@tonic-gate * If the outbound side of the stream is 18987c478bd9Sstevel@tonic-gate * flow controlled, then hold this message 18997c478bd9Sstevel@tonic-gate * until client catches up. mir_hold_inbound 19007c478bd9Sstevel@tonic-gate * is set in mir_wput and cleared in mir_wsrv. 19017c478bd9Sstevel@tonic-gate */ 19027c478bd9Sstevel@tonic-gate if (srv) 19037c478bd9Sstevel@tonic-gate (void) putbq(q, head_mp); 19047c478bd9Sstevel@tonic-gate else 19057c478bd9Sstevel@tonic-gate (void) putq(q, head_mp); 19067c478bd9Sstevel@tonic-gate mir->mir_inrservice = B_TRUE; 19077c478bd9Sstevel@tonic-gate } 19087c478bd9Sstevel@tonic-gate break; 19097c478bd9Sstevel@tonic-gate default: 19107c478bd9Sstevel@tonic-gate RPCLOG(1, "mir_rput: unknown mir_type %d\n", 19117c478bd9Sstevel@tonic-gate mir->mir_type); 19127c478bd9Sstevel@tonic-gate freemsg(head_mp); 19137c478bd9Sstevel@tonic-gate break; 19147c478bd9Sstevel@tonic-gate } 19157c478bd9Sstevel@tonic-gate 19167c478bd9Sstevel@tonic-gate /* 19177c478bd9Sstevel@tonic-gate * Reset head_mp and frag_header since we're starting on a 19187c478bd9Sstevel@tonic-gate * new RPC fragment and message. 19197c478bd9Sstevel@tonic-gate */ 19207c478bd9Sstevel@tonic-gate head_mp = NULL; 19217c478bd9Sstevel@tonic-gate tail_mp = NULL; 19227c478bd9Sstevel@tonic-gate frag_header = 0; 19237c478bd9Sstevel@tonic-gate } while ((mp = cont_mp) != NULL); 19247c478bd9Sstevel@tonic-gate 19257c478bd9Sstevel@tonic-gate /* 19267c478bd9Sstevel@tonic-gate * Do a sanity check on the message length. If this message is 19277c478bd9Sstevel@tonic-gate * getting excessively large, shut down the connection. 19287c478bd9Sstevel@tonic-gate */ 19297c478bd9Sstevel@tonic-gate if (head_mp != NULL && mir->mir_setup_complete && 19307c478bd9Sstevel@tonic-gate mir_check_len(q, frag_len, head_mp)) 19317c478bd9Sstevel@tonic-gate return; 19327c478bd9Sstevel@tonic-gate 19337c478bd9Sstevel@tonic-gate /* Save our local copies back in the mir structure. */ 19347c478bd9Sstevel@tonic-gate mir->mir_frag_header = frag_header; 19357c478bd9Sstevel@tonic-gate mir->mir_frag_len = frag_len; 19367c478bd9Sstevel@tonic-gate mir->mir_head_mp = head_mp; 19377c478bd9Sstevel@tonic-gate mir->mir_tail_mp = tail_mp; 19387c478bd9Sstevel@tonic-gate 19397c478bd9Sstevel@tonic-gate /* 19407c478bd9Sstevel@tonic-gate * The timer is stopped after the whole message chain is processed. 19417c478bd9Sstevel@tonic-gate * The reason is that stopping the timer releases the mir_mutex 19427c478bd9Sstevel@tonic-gate * lock temporarily. This means that the request can be serviced 19437c478bd9Sstevel@tonic-gate * while we are still processing the message chain. This is not 19447c478bd9Sstevel@tonic-gate * good. So we stop the timer here instead. 19457c478bd9Sstevel@tonic-gate * 19467c478bd9Sstevel@tonic-gate * Note that if the timer fires before we stop it, it will not 19477c478bd9Sstevel@tonic-gate * do any harm as MIR_SVC_QUIESCED() is false and mir_timer() 19487c478bd9Sstevel@tonic-gate * will just return; 19497c478bd9Sstevel@tonic-gate */ 19507c478bd9Sstevel@tonic-gate if (stop_timer) { 19517c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_do_rput stopping idle timer on 0x%p because " 19527c478bd9Sstevel@tonic-gate "ref cnt going to non zero\n", (void *) WR(q)); 19537c478bd9Sstevel@tonic-gate mir_svc_idle_stop(WR(q), mir); 19547c478bd9Sstevel@tonic-gate } 19557c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 19567c478bd9Sstevel@tonic-gate } 19577c478bd9Sstevel@tonic-gate 19587c478bd9Sstevel@tonic-gate static void 19597c478bd9Sstevel@tonic-gate mir_rput(queue_t *q, mblk_t *mp) 19607c478bd9Sstevel@tonic-gate { 19617c478bd9Sstevel@tonic-gate mir_do_rput(q, mp, 0); 19627c478bd9Sstevel@tonic-gate } 19637c478bd9Sstevel@tonic-gate 19647c478bd9Sstevel@tonic-gate static void 19657c478bd9Sstevel@tonic-gate mir_rput_proto(queue_t *q, mblk_t *mp) 19667c478bd9Sstevel@tonic-gate { 19677c478bd9Sstevel@tonic-gate mir_t *mir = (mir_t *)q->q_ptr; 19687c478bd9Sstevel@tonic-gate uint32_t type; 19697c478bd9Sstevel@tonic-gate uint32_t reason = 0; 19707c478bd9Sstevel@tonic-gate 19717c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex)); 19727c478bd9Sstevel@tonic-gate 19737c478bd9Sstevel@tonic-gate type = ((union T_primitives *)mp->b_rptr)->type; 19747c478bd9Sstevel@tonic-gate switch (mir->mir_type) { 19757c478bd9Sstevel@tonic-gate case RPC_CLIENT: 19767c478bd9Sstevel@tonic-gate switch (type) { 19777c478bd9Sstevel@tonic-gate case T_DISCON_IND: 19787c478bd9Sstevel@tonic-gate reason = 19797c478bd9Sstevel@tonic-gate ((struct T_discon_ind *)(mp->b_rptr))->DISCON_reason; 19807c478bd9Sstevel@tonic-gate /*FALLTHROUGH*/ 19817c478bd9Sstevel@tonic-gate case T_ORDREL_IND: 19827c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 19837c478bd9Sstevel@tonic-gate if (mir->mir_head_mp) { 19847c478bd9Sstevel@tonic-gate freemsg(mir->mir_head_mp); 19857c478bd9Sstevel@tonic-gate mir->mir_head_mp = (mblk_t *)0; 19867c478bd9Sstevel@tonic-gate mir->mir_tail_mp = (mblk_t *)0; 19877c478bd9Sstevel@tonic-gate } 19887c478bd9Sstevel@tonic-gate /* 19897c478bd9Sstevel@tonic-gate * We are disconnecting, but not necessarily 19907c478bd9Sstevel@tonic-gate * closing. By not closing, we will fail to 19917c478bd9Sstevel@tonic-gate * pick up a possibly changed global timeout value, 19927c478bd9Sstevel@tonic-gate * unless we store it now. 19937c478bd9Sstevel@tonic-gate */ 19947c478bd9Sstevel@tonic-gate mir->mir_idle_timeout = clnt_idle_timeout; 19957c478bd9Sstevel@tonic-gate mir_clnt_idle_stop(WR(q), mir); 19967c478bd9Sstevel@tonic-gate 19977c478bd9Sstevel@tonic-gate /* 19987c478bd9Sstevel@tonic-gate * Even though we are unconnected, we still 19997c478bd9Sstevel@tonic-gate * leave the idle timer going on the client. The 20007c478bd9Sstevel@tonic-gate * reason for is that if we've disconnected due 20017c478bd9Sstevel@tonic-gate * to a server-side disconnect, reset, or connection 20027c478bd9Sstevel@tonic-gate * timeout, there is a possibility the client may 20037c478bd9Sstevel@tonic-gate * retry the RPC request. This retry needs to done on 20047c478bd9Sstevel@tonic-gate * the same bound address for the server to interpret 20057c478bd9Sstevel@tonic-gate * it as such. However, we don't want 20067c478bd9Sstevel@tonic-gate * to wait forever for that possibility. If the 20077c478bd9Sstevel@tonic-gate * end-point stays unconnected for mir_idle_timeout 20087c478bd9Sstevel@tonic-gate * units of time, then that is a signal to the 20097c478bd9Sstevel@tonic-gate * connection manager to give up waiting for the 20107c478bd9Sstevel@tonic-gate * application (eg. NFS) to send a retry. 20117c478bd9Sstevel@tonic-gate */ 20127c478bd9Sstevel@tonic-gate mir_clnt_idle_start(WR(q), mir); 20137c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 20147c478bd9Sstevel@tonic-gate clnt_dispatch_notifyall(WR(q), type, reason); 20157c478bd9Sstevel@tonic-gate freemsg(mp); 20167c478bd9Sstevel@tonic-gate return; 20177c478bd9Sstevel@tonic-gate case T_ERROR_ACK: 20187c478bd9Sstevel@tonic-gate { 20197c478bd9Sstevel@tonic-gate struct T_error_ack *terror; 20207c478bd9Sstevel@tonic-gate 20217c478bd9Sstevel@tonic-gate terror = (struct T_error_ack *)mp->b_rptr; 20227c478bd9Sstevel@tonic-gate RPCLOG(1, "mir_rput_proto T_ERROR_ACK for queue 0x%p", 20237c478bd9Sstevel@tonic-gate (void *)q); 20247c478bd9Sstevel@tonic-gate RPCLOG(1, " ERROR_prim: %s,", 20257c478bd9Sstevel@tonic-gate rpc_tpiprim2name(terror->ERROR_prim)); 20267c478bd9Sstevel@tonic-gate RPCLOG(1, " TLI_error: %s,", 20277c478bd9Sstevel@tonic-gate rpc_tpierr2name(terror->TLI_error)); 20287c478bd9Sstevel@tonic-gate RPCLOG(1, " UNIX_error: %d\n", terror->UNIX_error); 20297c478bd9Sstevel@tonic-gate if (terror->ERROR_prim == T_DISCON_REQ) { 20307c478bd9Sstevel@tonic-gate clnt_dispatch_notifyall(WR(q), type, reason); 20317c478bd9Sstevel@tonic-gate freemsg(mp); 20327c478bd9Sstevel@tonic-gate return; 20337c478bd9Sstevel@tonic-gate } else { 20347c478bd9Sstevel@tonic-gate if (clnt_dispatch_notifyconn(WR(q), mp)) 20357c478bd9Sstevel@tonic-gate return; 20367c478bd9Sstevel@tonic-gate } 20377c478bd9Sstevel@tonic-gate break; 20387c478bd9Sstevel@tonic-gate } 20397c478bd9Sstevel@tonic-gate case T_OK_ACK: 20407c478bd9Sstevel@tonic-gate { 20417c478bd9Sstevel@tonic-gate struct T_ok_ack *tok = (struct T_ok_ack *)mp->b_rptr; 20427c478bd9Sstevel@tonic-gate 20437c478bd9Sstevel@tonic-gate if (tok->CORRECT_prim == T_DISCON_REQ) { 20447c478bd9Sstevel@tonic-gate clnt_dispatch_notifyall(WR(q), type, reason); 20457c478bd9Sstevel@tonic-gate freemsg(mp); 20467c478bd9Sstevel@tonic-gate return; 20477c478bd9Sstevel@tonic-gate } else { 20487c478bd9Sstevel@tonic-gate if (clnt_dispatch_notifyconn(WR(q), mp)) 20497c478bd9Sstevel@tonic-gate return; 20507c478bd9Sstevel@tonic-gate } 20517c478bd9Sstevel@tonic-gate break; 20527c478bd9Sstevel@tonic-gate } 20537c478bd9Sstevel@tonic-gate case T_CONN_CON: 20547c478bd9Sstevel@tonic-gate case T_INFO_ACK: 20557c478bd9Sstevel@tonic-gate case T_OPTMGMT_ACK: 20567c478bd9Sstevel@tonic-gate if (clnt_dispatch_notifyconn(WR(q), mp)) 20577c478bd9Sstevel@tonic-gate return; 20587c478bd9Sstevel@tonic-gate break; 20597c478bd9Sstevel@tonic-gate case T_BIND_ACK: 20607c478bd9Sstevel@tonic-gate break; 20617c478bd9Sstevel@tonic-gate default: 20627c478bd9Sstevel@tonic-gate RPCLOG(1, "mir_rput: unexpected message %d " 20637c478bd9Sstevel@tonic-gate "for KRPC client\n", 20647c478bd9Sstevel@tonic-gate ((union T_primitives *)mp->b_rptr)->type); 20657c478bd9Sstevel@tonic-gate break; 20667c478bd9Sstevel@tonic-gate } 20677c478bd9Sstevel@tonic-gate break; 20687c478bd9Sstevel@tonic-gate 20697c478bd9Sstevel@tonic-gate case RPC_SERVER: 20707c478bd9Sstevel@tonic-gate switch (type) { 20717c478bd9Sstevel@tonic-gate case T_BIND_ACK: 20727c478bd9Sstevel@tonic-gate { 20737c478bd9Sstevel@tonic-gate struct T_bind_ack *tbind; 20747c478bd9Sstevel@tonic-gate 20757c478bd9Sstevel@tonic-gate /* 20767c478bd9Sstevel@tonic-gate * If this is a listening stream, then shut 20777c478bd9Sstevel@tonic-gate * off the idle timer. 20787c478bd9Sstevel@tonic-gate */ 20797c478bd9Sstevel@tonic-gate tbind = (struct T_bind_ack *)mp->b_rptr; 20807c478bd9Sstevel@tonic-gate if (tbind->CONIND_number > 0) { 20817c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 20827c478bd9Sstevel@tonic-gate mir_svc_idle_stop(WR(q), mir); 20837c478bd9Sstevel@tonic-gate 20847c478bd9Sstevel@tonic-gate /* 20857c478bd9Sstevel@tonic-gate * mark this as a listen endpoint 20867c478bd9Sstevel@tonic-gate * for special handling. 20877c478bd9Sstevel@tonic-gate */ 20887c478bd9Sstevel@tonic-gate 20897c478bd9Sstevel@tonic-gate mir->mir_listen_stream = 1; 20907c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 20917c478bd9Sstevel@tonic-gate } 20927c478bd9Sstevel@tonic-gate break; 20937c478bd9Sstevel@tonic-gate } 20947c478bd9Sstevel@tonic-gate case T_DISCON_IND: 20957c478bd9Sstevel@tonic-gate case T_ORDREL_IND: 20967c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_rput_proto: got %s indication\n", 20977c478bd9Sstevel@tonic-gate type == T_DISCON_IND ? "disconnect" 20987c478bd9Sstevel@tonic-gate : "orderly release"); 20997c478bd9Sstevel@tonic-gate 21007c478bd9Sstevel@tonic-gate /* 21017c478bd9Sstevel@tonic-gate * For listen endpoint just pass 21027c478bd9Sstevel@tonic-gate * on the message. 21037c478bd9Sstevel@tonic-gate */ 21047c478bd9Sstevel@tonic-gate 21057c478bd9Sstevel@tonic-gate if (mir->mir_listen_stream) 21067c478bd9Sstevel@tonic-gate break; 21077c478bd9Sstevel@tonic-gate 21087c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 21097c478bd9Sstevel@tonic-gate 21107c478bd9Sstevel@tonic-gate /* 21117c478bd9Sstevel@tonic-gate * If client wants to break off connection, record 21127c478bd9Sstevel@tonic-gate * that fact. 21137c478bd9Sstevel@tonic-gate */ 21147c478bd9Sstevel@tonic-gate mir_svc_start_close(WR(q), mir); 21157c478bd9Sstevel@tonic-gate 21167c478bd9Sstevel@tonic-gate /* 21177c478bd9Sstevel@tonic-gate * If we are idle, then send the orderly release 21187c478bd9Sstevel@tonic-gate * or disconnect indication to nfsd. 21197c478bd9Sstevel@tonic-gate */ 21207c478bd9Sstevel@tonic-gate if (MIR_SVC_QUIESCED(mir)) { 21217c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 21227c478bd9Sstevel@tonic-gate break; 21237c478bd9Sstevel@tonic-gate } 21247c478bd9Sstevel@tonic-gate 21257c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_rput_proto: not idle, so " 21267c478bd9Sstevel@tonic-gate "disconnect/ord rel indication not passed " 21277c478bd9Sstevel@tonic-gate "upstream on 0x%p\n", (void *)q); 21287c478bd9Sstevel@tonic-gate 21297c478bd9Sstevel@tonic-gate /* 21307c478bd9Sstevel@tonic-gate * Hold the indication until we get idle 21317c478bd9Sstevel@tonic-gate * If there already is an indication stored, 21327c478bd9Sstevel@tonic-gate * replace it if the new one is a disconnect. The 21337c478bd9Sstevel@tonic-gate * reasoning is that disconnection takes less time 21347c478bd9Sstevel@tonic-gate * to process, and once a client decides to 21357c478bd9Sstevel@tonic-gate * disconnect, we should do that. 21367c478bd9Sstevel@tonic-gate */ 21377c478bd9Sstevel@tonic-gate if (mir->mir_svc_pend_mp) { 21387c478bd9Sstevel@tonic-gate if (type == T_DISCON_IND) { 21397c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_rput_proto: replacing" 21407c478bd9Sstevel@tonic-gate " held disconnect/ord rel" 21417c478bd9Sstevel@tonic-gate " indication with disconnect on" 21427c478bd9Sstevel@tonic-gate " 0x%p\n", (void *)q); 21437c478bd9Sstevel@tonic-gate 21447c478bd9Sstevel@tonic-gate freemsg(mir->mir_svc_pend_mp); 21457c478bd9Sstevel@tonic-gate mir->mir_svc_pend_mp = mp; 21467c478bd9Sstevel@tonic-gate } else { 21477c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_rput_proto: already " 21487c478bd9Sstevel@tonic-gate "held a disconnect/ord rel " 21497c478bd9Sstevel@tonic-gate "indication. freeing ord rel " 21507c478bd9Sstevel@tonic-gate "ind on 0x%p\n", (void *)q); 21517c478bd9Sstevel@tonic-gate freemsg(mp); 21527c478bd9Sstevel@tonic-gate } 21537c478bd9Sstevel@tonic-gate } else 21547c478bd9Sstevel@tonic-gate mir->mir_svc_pend_mp = mp; 21557c478bd9Sstevel@tonic-gate 21567c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 21577c478bd9Sstevel@tonic-gate return; 21587c478bd9Sstevel@tonic-gate 21597c478bd9Sstevel@tonic-gate default: 21607c478bd9Sstevel@tonic-gate /* nfsd handles server-side non-data messages. */ 21617c478bd9Sstevel@tonic-gate break; 21627c478bd9Sstevel@tonic-gate } 21637c478bd9Sstevel@tonic-gate break; 21647c478bd9Sstevel@tonic-gate 21657c478bd9Sstevel@tonic-gate default: 21667c478bd9Sstevel@tonic-gate break; 21677c478bd9Sstevel@tonic-gate } 21687c478bd9Sstevel@tonic-gate 21697c478bd9Sstevel@tonic-gate putnext(q, mp); 21707c478bd9Sstevel@tonic-gate } 21717c478bd9Sstevel@tonic-gate 21727c478bd9Sstevel@tonic-gate /* 21737c478bd9Sstevel@tonic-gate * The server-side read queues are used to hold inbound messages while 21747c478bd9Sstevel@tonic-gate * outbound flow control is exerted. When outbound flow control is 21757c478bd9Sstevel@tonic-gate * relieved, mir_wsrv qenables the read-side queue. Read-side queues 21767c478bd9Sstevel@tonic-gate * are not enabled by STREAMS and are explicitly noenable'ed in mir_open. 21777c478bd9Sstevel@tonic-gate * 21787c478bd9Sstevel@tonic-gate * For the server side, we have two types of messages queued. The first type 21797c478bd9Sstevel@tonic-gate * are messages that are ready to be XDR decoded and and then sent to the 21807c478bd9Sstevel@tonic-gate * RPC program's dispatch routine. The second type are "raw" messages that 21817c478bd9Sstevel@tonic-gate * haven't been processed, i.e. assembled from rpc record fragements into 21827c478bd9Sstevel@tonic-gate * full requests. The only time we will see the second type of message 21837c478bd9Sstevel@tonic-gate * queued is if we have a memory allocation failure while processing a 21847c478bd9Sstevel@tonic-gate * a raw message. The field mir_first_non_processed_mblk will mark the 21857c478bd9Sstevel@tonic-gate * first such raw message. So the flow for server side is: 21867c478bd9Sstevel@tonic-gate * 21877c478bd9Sstevel@tonic-gate * - send processed queued messages to kRPC until we run out or find 21887c478bd9Sstevel@tonic-gate * one that needs additional processing because we were short on memory 21897c478bd9Sstevel@tonic-gate * earlier 21907c478bd9Sstevel@tonic-gate * - process a message that was deferred because of lack of 21917c478bd9Sstevel@tonic-gate * memory 21927c478bd9Sstevel@tonic-gate * - continue processing messages until the queue empties or we 21937c478bd9Sstevel@tonic-gate * have to stop because of lack of memory 21947c478bd9Sstevel@tonic-gate * - during each of the above phase, if the queue is empty and 21957c478bd9Sstevel@tonic-gate * there are no pending messages that were passed to the RPC 21967c478bd9Sstevel@tonic-gate * layer, send upstream the pending disconnect/ordrel indication if 21977c478bd9Sstevel@tonic-gate * there is one 21987c478bd9Sstevel@tonic-gate * 21997c478bd9Sstevel@tonic-gate * The read-side queue is also enabled by a bufcall callback if dupmsg 22007c478bd9Sstevel@tonic-gate * fails in mir_rput. 22017c478bd9Sstevel@tonic-gate */ 22027c478bd9Sstevel@tonic-gate static void 22037c478bd9Sstevel@tonic-gate mir_rsrv(queue_t *q) 22047c478bd9Sstevel@tonic-gate { 22057c478bd9Sstevel@tonic-gate mir_t *mir; 22067c478bd9Sstevel@tonic-gate mblk_t *mp; 22077c478bd9Sstevel@tonic-gate mblk_t *cmp = NULL; 22087c478bd9Sstevel@tonic-gate boolean_t stop_timer = B_FALSE; 22097c478bd9Sstevel@tonic-gate 22107c478bd9Sstevel@tonic-gate mir = (mir_t *)q->q_ptr; 22117c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 22127c478bd9Sstevel@tonic-gate 22137c478bd9Sstevel@tonic-gate mp = NULL; 22147c478bd9Sstevel@tonic-gate switch (mir->mir_type) { 22157c478bd9Sstevel@tonic-gate case RPC_SERVER: 22167c478bd9Sstevel@tonic-gate if (mir->mir_ref_cnt == 0) 22177c478bd9Sstevel@tonic-gate mir->mir_hold_inbound = 0; 22187c478bd9Sstevel@tonic-gate if (mir->mir_hold_inbound) { 22197c478bd9Sstevel@tonic-gate 22207c478bd9Sstevel@tonic-gate ASSERT(cmp == NULL); 22217c478bd9Sstevel@tonic-gate if (q->q_first == NULL) { 22227c478bd9Sstevel@tonic-gate 22237c478bd9Sstevel@tonic-gate MIR_CLEAR_INRSRV(mir); 22247c478bd9Sstevel@tonic-gate 22257c478bd9Sstevel@tonic-gate if (MIR_SVC_QUIESCED(mir)) { 22267c478bd9Sstevel@tonic-gate cmp = mir->mir_svc_pend_mp; 22277c478bd9Sstevel@tonic-gate mir->mir_svc_pend_mp = NULL; 22287c478bd9Sstevel@tonic-gate } 22297c478bd9Sstevel@tonic-gate } 22307c478bd9Sstevel@tonic-gate 22317c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 22327c478bd9Sstevel@tonic-gate 22337c478bd9Sstevel@tonic-gate if (cmp != NULL) { 22347c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_rsrv: line %d: sending a held " 22357c478bd9Sstevel@tonic-gate "disconnect/ord rel indication upstream\n", 22367c478bd9Sstevel@tonic-gate __LINE__); 22377c478bd9Sstevel@tonic-gate putnext(q, cmp); 22387c478bd9Sstevel@tonic-gate } 22397c478bd9Sstevel@tonic-gate 22407c478bd9Sstevel@tonic-gate return; 22417c478bd9Sstevel@tonic-gate } 22427c478bd9Sstevel@tonic-gate while (mp = getq(q)) { 22437c478bd9Sstevel@tonic-gate if (mir->mir_krpc_cell) { 22447c478bd9Sstevel@tonic-gate /* 22457c478bd9Sstevel@tonic-gate * If we were idle, turn off idle timer since 22467c478bd9Sstevel@tonic-gate * we aren't idle any more. 22477c478bd9Sstevel@tonic-gate */ 22487c478bd9Sstevel@tonic-gate if (mir->mir_ref_cnt++ == 0) 22497c478bd9Sstevel@tonic-gate stop_timer = B_TRUE; 22507c478bd9Sstevel@tonic-gate if (mir_check_len(q, 22517c478bd9Sstevel@tonic-gate (int32_t)msgdsize(mp), mp)) 22527c478bd9Sstevel@tonic-gate return; 22537c478bd9Sstevel@tonic-gate svc_queuereq(q, mp); 22547c478bd9Sstevel@tonic-gate } else { 22557c478bd9Sstevel@tonic-gate /* 22567c478bd9Sstevel@tonic-gate * Count # of times this happens. Should be 22577c478bd9Sstevel@tonic-gate * never, but experience shows otherwise. 22587c478bd9Sstevel@tonic-gate */ 22597c478bd9Sstevel@tonic-gate mir_krpc_cell_null++; 22607c478bd9Sstevel@tonic-gate freemsg(mp); 22617c478bd9Sstevel@tonic-gate } 22627c478bd9Sstevel@tonic-gate } 22637c478bd9Sstevel@tonic-gate break; 22647c478bd9Sstevel@tonic-gate case RPC_CLIENT: 22657c478bd9Sstevel@tonic-gate break; 22667c478bd9Sstevel@tonic-gate default: 22677c478bd9Sstevel@tonic-gate RPCLOG(1, "mir_rsrv: unexpected mir_type %d\n", mir->mir_type); 22687c478bd9Sstevel@tonic-gate 22697c478bd9Sstevel@tonic-gate if (q->q_first == NULL) 22707c478bd9Sstevel@tonic-gate MIR_CLEAR_INRSRV(mir); 22717c478bd9Sstevel@tonic-gate 22727c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 22737c478bd9Sstevel@tonic-gate 22747c478bd9Sstevel@tonic-gate return; 22757c478bd9Sstevel@tonic-gate } 22767c478bd9Sstevel@tonic-gate 22777c478bd9Sstevel@tonic-gate /* 22787c478bd9Sstevel@tonic-gate * The timer is stopped after all the messages are processed. 22797c478bd9Sstevel@tonic-gate * The reason is that stopping the timer releases the mir_mutex 22807c478bd9Sstevel@tonic-gate * lock temporarily. This means that the request can be serviced 22817c478bd9Sstevel@tonic-gate * while we are still processing the message queue. This is not 22827c478bd9Sstevel@tonic-gate * good. So we stop the timer here instead. 22837c478bd9Sstevel@tonic-gate */ 22847c478bd9Sstevel@tonic-gate if (stop_timer) { 22857c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_rsrv stopping idle timer on 0x%p because ref " 22867c478bd9Sstevel@tonic-gate "cnt going to non zero\n", (void *)WR(q)); 22877c478bd9Sstevel@tonic-gate mir_svc_idle_stop(WR(q), mir); 22887c478bd9Sstevel@tonic-gate } 22897c478bd9Sstevel@tonic-gate 22907c478bd9Sstevel@tonic-gate if (q->q_first == NULL) { 22917c478bd9Sstevel@tonic-gate 22927c478bd9Sstevel@tonic-gate MIR_CLEAR_INRSRV(mir); 22937c478bd9Sstevel@tonic-gate 22947c478bd9Sstevel@tonic-gate ASSERT(cmp == NULL); 22957c478bd9Sstevel@tonic-gate if (mir->mir_type == RPC_SERVER && MIR_SVC_QUIESCED(mir)) { 22967c478bd9Sstevel@tonic-gate cmp = mir->mir_svc_pend_mp; 22977c478bd9Sstevel@tonic-gate mir->mir_svc_pend_mp = NULL; 22987c478bd9Sstevel@tonic-gate } 22997c478bd9Sstevel@tonic-gate 23007c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 23017c478bd9Sstevel@tonic-gate 23027c478bd9Sstevel@tonic-gate if (cmp != NULL) { 23037c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_rsrv: line %d: sending a held " 23047c478bd9Sstevel@tonic-gate "disconnect/ord rel indication upstream\n", 23057c478bd9Sstevel@tonic-gate __LINE__); 23067c478bd9Sstevel@tonic-gate putnext(q, cmp); 23077c478bd9Sstevel@tonic-gate } 23087c478bd9Sstevel@tonic-gate 23097c478bd9Sstevel@tonic-gate return; 23107c478bd9Sstevel@tonic-gate } 23117c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 23127c478bd9Sstevel@tonic-gate } 23137c478bd9Sstevel@tonic-gate 23147c478bd9Sstevel@tonic-gate static int mir_svc_policy_fails; 23157c478bd9Sstevel@tonic-gate 23167c478bd9Sstevel@tonic-gate /* 23177c478bd9Sstevel@tonic-gate * Called to send an event code to nfsd/lockd so that it initiates 23187c478bd9Sstevel@tonic-gate * connection close. 23197c478bd9Sstevel@tonic-gate */ 23207c478bd9Sstevel@tonic-gate static int 23217c478bd9Sstevel@tonic-gate mir_svc_policy_notify(queue_t *q, int event) 23227c478bd9Sstevel@tonic-gate { 23237c478bd9Sstevel@tonic-gate mblk_t *mp; 23247c478bd9Sstevel@tonic-gate #ifdef DEBUG 23257c478bd9Sstevel@tonic-gate mir_t *mir = (mir_t *)q->q_ptr; 23267c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex)); 23277c478bd9Sstevel@tonic-gate #endif 23287c478bd9Sstevel@tonic-gate ASSERT(q->q_flag & QREADR); 23297c478bd9Sstevel@tonic-gate 23307c478bd9Sstevel@tonic-gate /* 23317c478bd9Sstevel@tonic-gate * Create an M_DATA message with the event code and pass it to the 23327c478bd9Sstevel@tonic-gate * Stream head (nfsd or whoever created the stream will consume it). 23337c478bd9Sstevel@tonic-gate */ 23347c478bd9Sstevel@tonic-gate mp = allocb(sizeof (int), BPRI_HI); 23357c478bd9Sstevel@tonic-gate 23367c478bd9Sstevel@tonic-gate if (!mp) { 23377c478bd9Sstevel@tonic-gate 23387c478bd9Sstevel@tonic-gate mir_svc_policy_fails++; 23397c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_svc_policy_notify: could not allocate event " 23407c478bd9Sstevel@tonic-gate "%d\n", event); 23417c478bd9Sstevel@tonic-gate return (ENOMEM); 23427c478bd9Sstevel@tonic-gate } 23437c478bd9Sstevel@tonic-gate 23447c478bd9Sstevel@tonic-gate U32_TO_BE32(event, mp->b_rptr); 23457c478bd9Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (int); 23467c478bd9Sstevel@tonic-gate putnext(q, mp); 23477c478bd9Sstevel@tonic-gate return (0); 23487c478bd9Sstevel@tonic-gate } 23497c478bd9Sstevel@tonic-gate 23507c478bd9Sstevel@tonic-gate /* 23517c478bd9Sstevel@tonic-gate * Server side: start the close phase. We want to get this rpcmod slot in an 23527c478bd9Sstevel@tonic-gate * idle state before mir_close() is called. 23537c478bd9Sstevel@tonic-gate */ 23547c478bd9Sstevel@tonic-gate static void 23557c478bd9Sstevel@tonic-gate mir_svc_start_close(queue_t *wq, mir_t *mir) 23567c478bd9Sstevel@tonic-gate { 23577c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&mir->mir_mutex)); 23587c478bd9Sstevel@tonic-gate ASSERT((wq->q_flag & QREADR) == 0); 23597c478bd9Sstevel@tonic-gate ASSERT(mir->mir_type == RPC_SERVER); 23607c478bd9Sstevel@tonic-gate 23617c478bd9Sstevel@tonic-gate 23627c478bd9Sstevel@tonic-gate /* 23637c478bd9Sstevel@tonic-gate * Do not accept any more messages. 23647c478bd9Sstevel@tonic-gate */ 23657c478bd9Sstevel@tonic-gate mir->mir_svc_no_more_msgs = 1; 23667c478bd9Sstevel@tonic-gate 23677c478bd9Sstevel@tonic-gate /* 23687c478bd9Sstevel@tonic-gate * Next two statements will make the read service procedure invoke 23697c478bd9Sstevel@tonic-gate * svc_queuereq() on everything stuck in the streams read queue. 23707c478bd9Sstevel@tonic-gate * It's not necessary because enabling the write queue will 23717c478bd9Sstevel@tonic-gate * have the same effect, but why not speed the process along? 23727c478bd9Sstevel@tonic-gate */ 23737c478bd9Sstevel@tonic-gate mir->mir_hold_inbound = 0; 23747c478bd9Sstevel@tonic-gate qenable(RD(wq)); 23757c478bd9Sstevel@tonic-gate 23767c478bd9Sstevel@tonic-gate /* 23777c478bd9Sstevel@tonic-gate * Meanwhile force the write service procedure to send the 23787c478bd9Sstevel@tonic-gate * responses downstream, regardless of flow control. 23797c478bd9Sstevel@tonic-gate */ 23807c478bd9Sstevel@tonic-gate qenable(wq); 23817c478bd9Sstevel@tonic-gate } 23827c478bd9Sstevel@tonic-gate 23837c478bd9Sstevel@tonic-gate /* 23847c478bd9Sstevel@tonic-gate * This routine is called directly by KRPC after a request is completed, 23857c478bd9Sstevel@tonic-gate * whether a reply was sent or the request was dropped. 23867c478bd9Sstevel@tonic-gate */ 23877c478bd9Sstevel@tonic-gate static void 23887c478bd9Sstevel@tonic-gate mir_svc_release(queue_t *wq, mblk_t *mp) 23897c478bd9Sstevel@tonic-gate { 23907c478bd9Sstevel@tonic-gate mir_t *mir = (mir_t *)wq->q_ptr; 23917c478bd9Sstevel@tonic-gate mblk_t *cmp = NULL; 23927c478bd9Sstevel@tonic-gate 23937c478bd9Sstevel@tonic-gate ASSERT((wq->q_flag & QREADR) == 0); 23947c478bd9Sstevel@tonic-gate if (mp) 23957c478bd9Sstevel@tonic-gate freemsg(mp); 23967c478bd9Sstevel@tonic-gate 23977c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 23987c478bd9Sstevel@tonic-gate mir->mir_ref_cnt--; 23997c478bd9Sstevel@tonic-gate ASSERT(mir->mir_ref_cnt >= 0); 24007c478bd9Sstevel@tonic-gate 24017c478bd9Sstevel@tonic-gate /* 24027c478bd9Sstevel@tonic-gate * Start idle processing if this is the last reference. 24037c478bd9Sstevel@tonic-gate */ 24047c478bd9Sstevel@tonic-gate if (MIR_SVC_QUIESCED(mir)) { 24057c478bd9Sstevel@tonic-gate 24067c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_svc_release starting idle timer on 0x%p " 24077c478bd9Sstevel@tonic-gate "because ref cnt is zero\n", (void *) wq); 24087c478bd9Sstevel@tonic-gate 24097c478bd9Sstevel@tonic-gate cmp = mir->mir_svc_pend_mp; 24107c478bd9Sstevel@tonic-gate mir->mir_svc_pend_mp = NULL; 24117c478bd9Sstevel@tonic-gate mir_svc_idle_start(wq, mir); 24127c478bd9Sstevel@tonic-gate } 24137c478bd9Sstevel@tonic-gate 24147c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 24157c478bd9Sstevel@tonic-gate 24167c478bd9Sstevel@tonic-gate if (cmp) { 24177c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_svc_release: sending a held " 24187c478bd9Sstevel@tonic-gate "disconnect/ord rel indication upstream on queue 0x%p\n", 24197c478bd9Sstevel@tonic-gate (void *)RD(wq)); 24207c478bd9Sstevel@tonic-gate 24217c478bd9Sstevel@tonic-gate putnext(RD(wq), cmp); 24227c478bd9Sstevel@tonic-gate } 24237c478bd9Sstevel@tonic-gate } 24247c478bd9Sstevel@tonic-gate 24257c478bd9Sstevel@tonic-gate /* 24267c478bd9Sstevel@tonic-gate * This routine is called by server-side KRPC when it is ready to 24277c478bd9Sstevel@tonic-gate * handle inbound messages on the stream. 24287c478bd9Sstevel@tonic-gate */ 24297c478bd9Sstevel@tonic-gate static void 24307c478bd9Sstevel@tonic-gate mir_svc_start(queue_t *wq) 24317c478bd9Sstevel@tonic-gate { 24327c478bd9Sstevel@tonic-gate mir_t *mir = (mir_t *)wq->q_ptr; 24337c478bd9Sstevel@tonic-gate 2434*a26eed27Sgt29601 /* 2435*a26eed27Sgt29601 * no longer need to take the mir_mutex because the 2436*a26eed27Sgt29601 * mir_setup_complete field has been moved out of 2437*a26eed27Sgt29601 * the binary field protected by the mir_mutex. 2438*a26eed27Sgt29601 */ 2439*a26eed27Sgt29601 24407c478bd9Sstevel@tonic-gate mir->mir_setup_complete = 1; 24417c478bd9Sstevel@tonic-gate qenable(RD(wq)); 24427c478bd9Sstevel@tonic-gate } 24437c478bd9Sstevel@tonic-gate 24447c478bd9Sstevel@tonic-gate /* 24457c478bd9Sstevel@tonic-gate * client side wrapper for stopping timer with normal idle timeout. 24467c478bd9Sstevel@tonic-gate */ 24477c478bd9Sstevel@tonic-gate static void 24487c478bd9Sstevel@tonic-gate mir_clnt_idle_stop(queue_t *wq, mir_t *mir) 24497c478bd9Sstevel@tonic-gate { 24507c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&mir->mir_mutex)); 24517c478bd9Sstevel@tonic-gate ASSERT((wq->q_flag & QREADR) == 0); 24527c478bd9Sstevel@tonic-gate ASSERT(mir->mir_type == RPC_CLIENT); 24537c478bd9Sstevel@tonic-gate 24547c478bd9Sstevel@tonic-gate mir_timer_stop(mir); 24557c478bd9Sstevel@tonic-gate } 24567c478bd9Sstevel@tonic-gate 24577c478bd9Sstevel@tonic-gate /* 24587c478bd9Sstevel@tonic-gate * client side wrapper for stopping timer with normal idle timeout. 24597c478bd9Sstevel@tonic-gate */ 24607c478bd9Sstevel@tonic-gate static void 24617c478bd9Sstevel@tonic-gate mir_clnt_idle_start(queue_t *wq, mir_t *mir) 24627c478bd9Sstevel@tonic-gate { 24637c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&mir->mir_mutex)); 24647c478bd9Sstevel@tonic-gate ASSERT((wq->q_flag & QREADR) == 0); 24657c478bd9Sstevel@tonic-gate ASSERT(mir->mir_type == RPC_CLIENT); 24667c478bd9Sstevel@tonic-gate 24677c478bd9Sstevel@tonic-gate mir_timer_start(wq, mir, mir->mir_idle_timeout); 24687c478bd9Sstevel@tonic-gate } 24697c478bd9Sstevel@tonic-gate 24707c478bd9Sstevel@tonic-gate /* 24717c478bd9Sstevel@tonic-gate * client side only. Forces rpcmod to stop sending T_ORDREL_REQs on 24727c478bd9Sstevel@tonic-gate * end-points that aren't connected. 24737c478bd9Sstevel@tonic-gate */ 24747c478bd9Sstevel@tonic-gate static void 24757c478bd9Sstevel@tonic-gate mir_clnt_idle_do_stop(queue_t *wq) 24767c478bd9Sstevel@tonic-gate { 24777c478bd9Sstevel@tonic-gate mir_t *mir = (mir_t *)wq->q_ptr; 24787c478bd9Sstevel@tonic-gate 24797c478bd9Sstevel@tonic-gate RPCLOG(1, "mir_clnt_idle_do_stop: wq 0x%p\n", (void *)wq); 24807c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex)); 24817c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 24827c478bd9Sstevel@tonic-gate mir_clnt_idle_stop(wq, mir); 24837c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 24847c478bd9Sstevel@tonic-gate } 24857c478bd9Sstevel@tonic-gate 24867c478bd9Sstevel@tonic-gate /* 24877c478bd9Sstevel@tonic-gate * Timer handler. It handles idle timeout and memory shortage problem. 24887c478bd9Sstevel@tonic-gate */ 24897c478bd9Sstevel@tonic-gate static void 24907c478bd9Sstevel@tonic-gate mir_timer(void *arg) 24917c478bd9Sstevel@tonic-gate { 24927c478bd9Sstevel@tonic-gate queue_t *wq = (queue_t *)arg; 24937c478bd9Sstevel@tonic-gate mir_t *mir = (mir_t *)wq->q_ptr; 24947c478bd9Sstevel@tonic-gate boolean_t notify; 24957c478bd9Sstevel@tonic-gate 24967c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 24977c478bd9Sstevel@tonic-gate 24987c478bd9Sstevel@tonic-gate /* 24997c478bd9Sstevel@tonic-gate * mir_timer_call is set only when either mir_timer_[start|stop] 25007c478bd9Sstevel@tonic-gate * is progressing. And mir_timer() can only be run while they 25017c478bd9Sstevel@tonic-gate * are progressing if the timer is being stopped. So just 25027c478bd9Sstevel@tonic-gate * return. 25037c478bd9Sstevel@tonic-gate */ 25047c478bd9Sstevel@tonic-gate if (mir->mir_timer_call) { 25057c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 25067c478bd9Sstevel@tonic-gate return; 25077c478bd9Sstevel@tonic-gate } 25087c478bd9Sstevel@tonic-gate mir->mir_timer_id = 0; 25097c478bd9Sstevel@tonic-gate 25107c478bd9Sstevel@tonic-gate switch (mir->mir_type) { 25117c478bd9Sstevel@tonic-gate case RPC_CLIENT: 25127c478bd9Sstevel@tonic-gate 25137c478bd9Sstevel@tonic-gate /* 25147c478bd9Sstevel@tonic-gate * For clients, the timer fires at clnt_idle_timeout 25157c478bd9Sstevel@tonic-gate * intervals. If the activity marker (mir_clntreq) is 25167c478bd9Sstevel@tonic-gate * zero, then the stream has been idle since the last 25177c478bd9Sstevel@tonic-gate * timer event and we notify KRPC. If mir_clntreq is 25187c478bd9Sstevel@tonic-gate * non-zero, then the stream is active and we just 25197c478bd9Sstevel@tonic-gate * restart the timer for another interval. mir_clntreq 25207c478bd9Sstevel@tonic-gate * is set to 1 in mir_wput for every request passed 25217c478bd9Sstevel@tonic-gate * downstream. 25227c478bd9Sstevel@tonic-gate * 25237c478bd9Sstevel@tonic-gate * If this was a memory shortage timer reset the idle 25247c478bd9Sstevel@tonic-gate * timeout regardless; the mir_clntreq will not be a 25257c478bd9Sstevel@tonic-gate * valid indicator. 25267c478bd9Sstevel@tonic-gate * 25277c478bd9Sstevel@tonic-gate * The timer is initially started in mir_wput during 25287c478bd9Sstevel@tonic-gate * RPC_CLIENT ioctl processing. 25297c478bd9Sstevel@tonic-gate * 25307c478bd9Sstevel@tonic-gate * The timer interval can be changed for individual 25317c478bd9Sstevel@tonic-gate * streams with the ND variable "mir_idle_timeout". 25327c478bd9Sstevel@tonic-gate */ 25337c478bd9Sstevel@tonic-gate if (mir->mir_clntreq > 0 && mir->mir_use_timestamp + 25347c478bd9Sstevel@tonic-gate MSEC_TO_TICK(mir->mir_idle_timeout) - lbolt >= 0) { 25357c478bd9Sstevel@tonic-gate clock_t tout; 25367c478bd9Sstevel@tonic-gate 25377c478bd9Sstevel@tonic-gate tout = mir->mir_idle_timeout - 25387c478bd9Sstevel@tonic-gate TICK_TO_MSEC(lbolt - mir->mir_use_timestamp); 25397c478bd9Sstevel@tonic-gate if (tout < 0) 25407c478bd9Sstevel@tonic-gate tout = 1000; 25417c478bd9Sstevel@tonic-gate #if 0 25427c478bd9Sstevel@tonic-gate printf("mir_timer[%d < %d + %d]: reset client timer to %d (ms)\n", 25437c478bd9Sstevel@tonic-gate TICK_TO_MSEC(lbolt), TICK_TO_MSEC(mir->mir_use_timestamp), 25447c478bd9Sstevel@tonic-gate mir->mir_idle_timeout, tout); 25457c478bd9Sstevel@tonic-gate #endif 25467c478bd9Sstevel@tonic-gate mir->mir_clntreq = 0; 25477c478bd9Sstevel@tonic-gate mir_timer_start(wq, mir, tout); 25487c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 25497c478bd9Sstevel@tonic-gate return; 25507c478bd9Sstevel@tonic-gate } 25517c478bd9Sstevel@tonic-gate #if 0 25527c478bd9Sstevel@tonic-gate printf("mir_timer[%d]: doing client timeout\n", lbolt / hz); 25537c478bd9Sstevel@tonic-gate #endif 25547c478bd9Sstevel@tonic-gate /* 25557c478bd9Sstevel@tonic-gate * We are disconnecting, but not necessarily 25567c478bd9Sstevel@tonic-gate * closing. By not closing, we will fail to 25577c478bd9Sstevel@tonic-gate * pick up a possibly changed global timeout value, 25587c478bd9Sstevel@tonic-gate * unless we store it now. 25597c478bd9Sstevel@tonic-gate */ 25607c478bd9Sstevel@tonic-gate mir->mir_idle_timeout = clnt_idle_timeout; 25617c478bd9Sstevel@tonic-gate mir_clnt_idle_start(wq, mir); 25627c478bd9Sstevel@tonic-gate 25637c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 25647c478bd9Sstevel@tonic-gate /* 25657c478bd9Sstevel@tonic-gate * We pass T_ORDREL_REQ as an integer value 25667c478bd9Sstevel@tonic-gate * to KRPC as the indication that the stream 25677c478bd9Sstevel@tonic-gate * is idle. This is not a T_ORDREL_REQ message, 25687c478bd9Sstevel@tonic-gate * it is just a convenient value since we call 25697c478bd9Sstevel@tonic-gate * the same KRPC routine for T_ORDREL_INDs and 25707c478bd9Sstevel@tonic-gate * T_DISCON_INDs. 25717c478bd9Sstevel@tonic-gate */ 25727c478bd9Sstevel@tonic-gate clnt_dispatch_notifyall(wq, T_ORDREL_REQ, 0); 25737c478bd9Sstevel@tonic-gate return; 25747c478bd9Sstevel@tonic-gate 25757c478bd9Sstevel@tonic-gate case RPC_SERVER: 25767c478bd9Sstevel@tonic-gate 25777c478bd9Sstevel@tonic-gate /* 25787c478bd9Sstevel@tonic-gate * For servers, the timer is only running when the stream 25797c478bd9Sstevel@tonic-gate * is really idle or memory is short. The timer is started 25807c478bd9Sstevel@tonic-gate * by mir_wput when mir_type is set to RPC_SERVER and 25817c478bd9Sstevel@tonic-gate * by mir_svc_idle_start whenever the stream goes idle 25827c478bd9Sstevel@tonic-gate * (mir_ref_cnt == 0). The timer is cancelled in 25837c478bd9Sstevel@tonic-gate * mir_rput whenever a new inbound request is passed to KRPC 25847c478bd9Sstevel@tonic-gate * and the stream was previously idle. 25857c478bd9Sstevel@tonic-gate * 25867c478bd9Sstevel@tonic-gate * The timer interval can be changed for individual 25877c478bd9Sstevel@tonic-gate * streams with the ND variable "mir_idle_timeout". 25887c478bd9Sstevel@tonic-gate * 25897c478bd9Sstevel@tonic-gate * If the stream is not idle do nothing. 25907c478bd9Sstevel@tonic-gate */ 25917c478bd9Sstevel@tonic-gate if (!MIR_SVC_QUIESCED(mir)) { 25927c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 25937c478bd9Sstevel@tonic-gate return; 25947c478bd9Sstevel@tonic-gate } 25957c478bd9Sstevel@tonic-gate 25967c478bd9Sstevel@tonic-gate notify = !mir->mir_inrservice; 25977c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 25987c478bd9Sstevel@tonic-gate 25997c478bd9Sstevel@tonic-gate /* 26007c478bd9Sstevel@tonic-gate * If there is no packet queued up in read queue, the stream 26017c478bd9Sstevel@tonic-gate * is really idle so notify nfsd to close it. 26027c478bd9Sstevel@tonic-gate */ 26037c478bd9Sstevel@tonic-gate if (notify) { 26047c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_timer: telling stream head listener " 26057c478bd9Sstevel@tonic-gate "to close stream (0x%p)\n", (void *) RD(wq)); 26067c478bd9Sstevel@tonic-gate (void) mir_svc_policy_notify(RD(wq), 1); 26077c478bd9Sstevel@tonic-gate } 26087c478bd9Sstevel@tonic-gate return; 26097c478bd9Sstevel@tonic-gate default: 26107c478bd9Sstevel@tonic-gate RPCLOG(1, "mir_timer: unexpected mir_type %d\n", 26117c478bd9Sstevel@tonic-gate mir->mir_type); 26127c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 26137c478bd9Sstevel@tonic-gate return; 26147c478bd9Sstevel@tonic-gate } 26157c478bd9Sstevel@tonic-gate } 26167c478bd9Sstevel@tonic-gate 26177c478bd9Sstevel@tonic-gate /* 26187c478bd9Sstevel@tonic-gate * Called by the RPC package to send either a call or a return, or a 26197c478bd9Sstevel@tonic-gate * transport connection request. Adds the record marking header. 26207c478bd9Sstevel@tonic-gate */ 26217c478bd9Sstevel@tonic-gate static void 26227c478bd9Sstevel@tonic-gate mir_wput(queue_t *q, mblk_t *mp) 26237c478bd9Sstevel@tonic-gate { 26247c478bd9Sstevel@tonic-gate uint_t frag_header; 26257c478bd9Sstevel@tonic-gate mir_t *mir = (mir_t *)q->q_ptr; 26267c478bd9Sstevel@tonic-gate uchar_t *rptr = mp->b_rptr; 26277c478bd9Sstevel@tonic-gate 26287c478bd9Sstevel@tonic-gate if (!mir) { 26297c478bd9Sstevel@tonic-gate freemsg(mp); 26307c478bd9Sstevel@tonic-gate return; 26317c478bd9Sstevel@tonic-gate } 26327c478bd9Sstevel@tonic-gate 26337c478bd9Sstevel@tonic-gate if (mp->b_datap->db_type != M_DATA) { 26347c478bd9Sstevel@tonic-gate mir_wput_other(q, mp); 26357c478bd9Sstevel@tonic-gate return; 26367c478bd9Sstevel@tonic-gate } 26377c478bd9Sstevel@tonic-gate 26387c478bd9Sstevel@tonic-gate if (mir->mir_ordrel_pending == 1) { 26397c478bd9Sstevel@tonic-gate freemsg(mp); 26407c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_wput wq 0x%p: got data after T_ORDREL_REQ\n", 26417c478bd9Sstevel@tonic-gate (void *)q); 26427c478bd9Sstevel@tonic-gate return; 26437c478bd9Sstevel@tonic-gate } 26447c478bd9Sstevel@tonic-gate 26457c478bd9Sstevel@tonic-gate frag_header = (uint_t)DLEN(mp); 26467c478bd9Sstevel@tonic-gate frag_header |= MIR_LASTFRAG; 26477c478bd9Sstevel@tonic-gate 26487c478bd9Sstevel@tonic-gate /* Stick in the 4 byte record marking header. */ 26497c478bd9Sstevel@tonic-gate if ((rptr - mp->b_datap->db_base) < sizeof (uint32_t) || 26507c478bd9Sstevel@tonic-gate !IS_P2ALIGNED(mp->b_rptr, sizeof (uint32_t))) { 26517c478bd9Sstevel@tonic-gate /* 26527c478bd9Sstevel@tonic-gate * Since we know that M_DATA messages are created exclusively 26537c478bd9Sstevel@tonic-gate * by KRPC, we expect that KRPC will leave room for our header 26547c478bd9Sstevel@tonic-gate * and 4 byte align which is normal for XDR. 26557c478bd9Sstevel@tonic-gate * If KRPC (or someone else) does not cooperate, then we 26567c478bd9Sstevel@tonic-gate * just throw away the message. 26577c478bd9Sstevel@tonic-gate */ 26587c478bd9Sstevel@tonic-gate RPCLOG(1, "mir_wput: KRPC did not leave space for record " 26597c478bd9Sstevel@tonic-gate "fragment header (%d bytes left)\n", 26607c478bd9Sstevel@tonic-gate (int)(rptr - mp->b_datap->db_base)); 26617c478bd9Sstevel@tonic-gate freemsg(mp); 26627c478bd9Sstevel@tonic-gate return; 26637c478bd9Sstevel@tonic-gate } 26647c478bd9Sstevel@tonic-gate rptr -= sizeof (uint32_t); 26657c478bd9Sstevel@tonic-gate *(uint32_t *)rptr = htonl(frag_header); 26667c478bd9Sstevel@tonic-gate mp->b_rptr = rptr; 26677c478bd9Sstevel@tonic-gate 26687c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 26697c478bd9Sstevel@tonic-gate if (mir->mir_type == RPC_CLIENT) { 26707c478bd9Sstevel@tonic-gate /* 26717c478bd9Sstevel@tonic-gate * For the client, set mir_clntreq to indicate that the 26727c478bd9Sstevel@tonic-gate * connection is active. 26737c478bd9Sstevel@tonic-gate */ 26747c478bd9Sstevel@tonic-gate mir->mir_clntreq = 1; 26757c478bd9Sstevel@tonic-gate mir->mir_use_timestamp = lbolt; 26767c478bd9Sstevel@tonic-gate } 26777c478bd9Sstevel@tonic-gate 26787c478bd9Sstevel@tonic-gate /* 26797c478bd9Sstevel@tonic-gate * If we haven't already queued some data and the downstream module 26807c478bd9Sstevel@tonic-gate * can accept more data, send it on, otherwise we queue the message 26817c478bd9Sstevel@tonic-gate * and take other actions depending on mir_type. 26827c478bd9Sstevel@tonic-gate */ 26837c478bd9Sstevel@tonic-gate if (!mir->mir_inwservice && MIR_WCANPUTNEXT(mir, q)) { 26847c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 26857c478bd9Sstevel@tonic-gate 26867c478bd9Sstevel@tonic-gate /* 26877c478bd9Sstevel@tonic-gate * Now we pass the RPC message downstream. 26887c478bd9Sstevel@tonic-gate */ 26897c478bd9Sstevel@tonic-gate putnext(q, mp); 26907c478bd9Sstevel@tonic-gate return; 26917c478bd9Sstevel@tonic-gate } 26927c478bd9Sstevel@tonic-gate 26937c478bd9Sstevel@tonic-gate switch (mir->mir_type) { 26947c478bd9Sstevel@tonic-gate case RPC_CLIENT: 26957c478bd9Sstevel@tonic-gate /* 26967c478bd9Sstevel@tonic-gate * Check for a previous duplicate request on the 26977c478bd9Sstevel@tonic-gate * queue. If there is one, then we throw away 26987c478bd9Sstevel@tonic-gate * the current message and let the previous one 26997c478bd9Sstevel@tonic-gate * go through. If we can't find a duplicate, then 27007c478bd9Sstevel@tonic-gate * send this one. This tap dance is an effort 27017c478bd9Sstevel@tonic-gate * to reduce traffic and processing requirements 27027c478bd9Sstevel@tonic-gate * under load conditions. 27037c478bd9Sstevel@tonic-gate */ 27047c478bd9Sstevel@tonic-gate if (mir_clnt_dup_request(q, mp)) { 27057c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 27067c478bd9Sstevel@tonic-gate freemsg(mp); 27077c478bd9Sstevel@tonic-gate return; 27087c478bd9Sstevel@tonic-gate } 27097c478bd9Sstevel@tonic-gate break; 27107c478bd9Sstevel@tonic-gate case RPC_SERVER: 27117c478bd9Sstevel@tonic-gate /* 27127c478bd9Sstevel@tonic-gate * Set mir_hold_inbound so that new inbound RPC 27137c478bd9Sstevel@tonic-gate * messages will be held until the client catches 27147c478bd9Sstevel@tonic-gate * up on the earlier replies. This flag is cleared 27157c478bd9Sstevel@tonic-gate * in mir_wsrv after flow control is relieved; 27167c478bd9Sstevel@tonic-gate * the read-side queue is also enabled at that time. 27177c478bd9Sstevel@tonic-gate */ 27187c478bd9Sstevel@tonic-gate mir->mir_hold_inbound = 1; 27197c478bd9Sstevel@tonic-gate break; 27207c478bd9Sstevel@tonic-gate default: 27217c478bd9Sstevel@tonic-gate RPCLOG(1, "mir_wput: unexpected mir_type %d\n", mir->mir_type); 27227c478bd9Sstevel@tonic-gate break; 27237c478bd9Sstevel@tonic-gate } 27247c478bd9Sstevel@tonic-gate mir->mir_inwservice = 1; 27257c478bd9Sstevel@tonic-gate (void) putq(q, mp); 27267c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 27277c478bd9Sstevel@tonic-gate } 27287c478bd9Sstevel@tonic-gate 27297c478bd9Sstevel@tonic-gate static void 27307c478bd9Sstevel@tonic-gate mir_wput_other(queue_t *q, mblk_t *mp) 27317c478bd9Sstevel@tonic-gate { 27327c478bd9Sstevel@tonic-gate mir_t *mir = (mir_t *)q->q_ptr; 27337c478bd9Sstevel@tonic-gate struct iocblk *iocp; 27347c478bd9Sstevel@tonic-gate uchar_t *rptr = mp->b_rptr; 27357c478bd9Sstevel@tonic-gate bool_t flush_in_svc = FALSE; 27367c478bd9Sstevel@tonic-gate 27377c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&mir->mir_mutex)); 27387c478bd9Sstevel@tonic-gate switch (mp->b_datap->db_type) { 27397c478bd9Sstevel@tonic-gate case M_IOCTL: 27407c478bd9Sstevel@tonic-gate iocp = (struct iocblk *)rptr; 27417c478bd9Sstevel@tonic-gate switch (iocp->ioc_cmd) { 27427c478bd9Sstevel@tonic-gate case RPC_CLIENT: 27437c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 27447c478bd9Sstevel@tonic-gate if (mir->mir_type != 0 && 27457c478bd9Sstevel@tonic-gate mir->mir_type != iocp->ioc_cmd) { 27467c478bd9Sstevel@tonic-gate ioc_eperm: 27477c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 27487c478bd9Sstevel@tonic-gate iocp->ioc_error = EPERM; 27497c478bd9Sstevel@tonic-gate iocp->ioc_count = 0; 27507c478bd9Sstevel@tonic-gate mp->b_datap->db_type = M_IOCACK; 27517c478bd9Sstevel@tonic-gate qreply(q, mp); 27527c478bd9Sstevel@tonic-gate return; 27537c478bd9Sstevel@tonic-gate } 27547c478bd9Sstevel@tonic-gate 27557c478bd9Sstevel@tonic-gate mir->mir_type = iocp->ioc_cmd; 27567c478bd9Sstevel@tonic-gate 27577c478bd9Sstevel@tonic-gate /* 27587c478bd9Sstevel@tonic-gate * Clear mir_hold_inbound which was set to 1 by 27597c478bd9Sstevel@tonic-gate * mir_open. This flag is not used on client 27607c478bd9Sstevel@tonic-gate * streams. 27617c478bd9Sstevel@tonic-gate */ 27627c478bd9Sstevel@tonic-gate mir->mir_hold_inbound = 0; 27637c478bd9Sstevel@tonic-gate mir->mir_max_msg_sizep = &clnt_max_msg_size; 27647c478bd9Sstevel@tonic-gate 27657c478bd9Sstevel@tonic-gate /* 27667c478bd9Sstevel@tonic-gate * Start the idle timer. See mir_timer() for more 27677c478bd9Sstevel@tonic-gate * information on how client timers work. 27687c478bd9Sstevel@tonic-gate */ 27697c478bd9Sstevel@tonic-gate mir->mir_idle_timeout = clnt_idle_timeout; 27707c478bd9Sstevel@tonic-gate mir_clnt_idle_start(q, mir); 27717c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 27727c478bd9Sstevel@tonic-gate 27737c478bd9Sstevel@tonic-gate mp->b_datap->db_type = M_IOCACK; 27747c478bd9Sstevel@tonic-gate qreply(q, mp); 27757c478bd9Sstevel@tonic-gate return; 27767c478bd9Sstevel@tonic-gate case RPC_SERVER: 27777c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 27787c478bd9Sstevel@tonic-gate if (mir->mir_type != 0 && 27797c478bd9Sstevel@tonic-gate mir->mir_type != iocp->ioc_cmd) 27807c478bd9Sstevel@tonic-gate goto ioc_eperm; 27817c478bd9Sstevel@tonic-gate 27827c478bd9Sstevel@tonic-gate /* 27837c478bd9Sstevel@tonic-gate * We don't clear mir_hold_inbound here because 27847c478bd9Sstevel@tonic-gate * mir_hold_inbound is used in the flow control 27857c478bd9Sstevel@tonic-gate * model. If we cleared it here, then we'd commit 27867c478bd9Sstevel@tonic-gate * a small violation to the model where the transport 27877c478bd9Sstevel@tonic-gate * might immediately block downstream flow. 27887c478bd9Sstevel@tonic-gate */ 27897c478bd9Sstevel@tonic-gate 27907c478bd9Sstevel@tonic-gate mir->mir_type = iocp->ioc_cmd; 27917c478bd9Sstevel@tonic-gate mir->mir_max_msg_sizep = &svc_max_msg_size; 27927c478bd9Sstevel@tonic-gate 27937c478bd9Sstevel@tonic-gate /* 27947c478bd9Sstevel@tonic-gate * Start the idle timer. See mir_timer() for more 27957c478bd9Sstevel@tonic-gate * information on how server timers work. 27967c478bd9Sstevel@tonic-gate * 27977c478bd9Sstevel@tonic-gate * Note that it is important to start the idle timer 27987c478bd9Sstevel@tonic-gate * here so that connections time out even if we 27997c478bd9Sstevel@tonic-gate * never receive any data on them. 28007c478bd9Sstevel@tonic-gate */ 28017c478bd9Sstevel@tonic-gate mir->mir_idle_timeout = svc_idle_timeout; 28027c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_wput_other starting idle timer on 0x%p " 28037c478bd9Sstevel@tonic-gate "because we got RPC_SERVER ioctl\n", (void *)q); 28047c478bd9Sstevel@tonic-gate mir_svc_idle_start(q, mir); 28057c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 28067c478bd9Sstevel@tonic-gate 28077c478bd9Sstevel@tonic-gate mp->b_datap->db_type = M_IOCACK; 28087c478bd9Sstevel@tonic-gate qreply(q, mp); 28097c478bd9Sstevel@tonic-gate return; 28107c478bd9Sstevel@tonic-gate default: 28117c478bd9Sstevel@tonic-gate break; 28127c478bd9Sstevel@tonic-gate } 28137c478bd9Sstevel@tonic-gate break; 28147c478bd9Sstevel@tonic-gate 28157c478bd9Sstevel@tonic-gate case M_PROTO: 28167c478bd9Sstevel@tonic-gate if (mir->mir_type == RPC_CLIENT) { 28177c478bd9Sstevel@tonic-gate /* 28187c478bd9Sstevel@tonic-gate * We are likely being called from the context of a 28197c478bd9Sstevel@tonic-gate * service procedure. So we need to enqueue. However 28207c478bd9Sstevel@tonic-gate * enqueing may put our message behind data messages. 28217c478bd9Sstevel@tonic-gate * So flush the data first. 28227c478bd9Sstevel@tonic-gate */ 28237c478bd9Sstevel@tonic-gate flush_in_svc = TRUE; 28247c478bd9Sstevel@tonic-gate } 28257c478bd9Sstevel@tonic-gate if ((mp->b_wptr - rptr) < sizeof (uint32_t) || 28267c478bd9Sstevel@tonic-gate !IS_P2ALIGNED(rptr, sizeof (uint32_t))) 28277c478bd9Sstevel@tonic-gate break; 28287c478bd9Sstevel@tonic-gate 28297c478bd9Sstevel@tonic-gate switch (((union T_primitives *)rptr)->type) { 28307c478bd9Sstevel@tonic-gate case T_DATA_REQ: 28317c478bd9Sstevel@tonic-gate /* Don't pass T_DATA_REQ messages downstream. */ 28327c478bd9Sstevel@tonic-gate freemsg(mp); 28337c478bd9Sstevel@tonic-gate return; 28347c478bd9Sstevel@tonic-gate case T_ORDREL_REQ: 28357c478bd9Sstevel@tonic-gate RPCLOG(8, "mir_wput_other wq 0x%p: got T_ORDREL_REQ\n", 28367c478bd9Sstevel@tonic-gate (void *)q); 28377c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 28387c478bd9Sstevel@tonic-gate if (mir->mir_type != RPC_SERVER) { 28397c478bd9Sstevel@tonic-gate /* 28407c478bd9Sstevel@tonic-gate * We are likely being called from 28417c478bd9Sstevel@tonic-gate * clnt_dispatch_notifyall(). Sending 28427c478bd9Sstevel@tonic-gate * a T_ORDREL_REQ will result in 28437c478bd9Sstevel@tonic-gate * a some kind of _IND message being sent, 28447c478bd9Sstevel@tonic-gate * will be another call to 28457c478bd9Sstevel@tonic-gate * clnt_dispatch_notifyall(). To keep the stack 28467c478bd9Sstevel@tonic-gate * lean, queue this message. 28477c478bd9Sstevel@tonic-gate */ 28487c478bd9Sstevel@tonic-gate mir->mir_inwservice = 1; 28497c478bd9Sstevel@tonic-gate (void) putq(q, mp); 28507c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 28517c478bd9Sstevel@tonic-gate return; 28527c478bd9Sstevel@tonic-gate } 28537c478bd9Sstevel@tonic-gate 28547c478bd9Sstevel@tonic-gate /* 28557c478bd9Sstevel@tonic-gate * Mark the structure such that we don't accept any 28567c478bd9Sstevel@tonic-gate * more requests from client. We could defer this 28577c478bd9Sstevel@tonic-gate * until we actually send the orderly release 28587c478bd9Sstevel@tonic-gate * request downstream, but all that does is delay 28597c478bd9Sstevel@tonic-gate * the closing of this stream. 28607c478bd9Sstevel@tonic-gate */ 28617c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_wput_other wq 0x%p: got T_ORDREL_REQ " 28627c478bd9Sstevel@tonic-gate " so calling mir_svc_start_close\n", (void *)q); 28637c478bd9Sstevel@tonic-gate 28647c478bd9Sstevel@tonic-gate mir_svc_start_close(q, mir); 28657c478bd9Sstevel@tonic-gate 28667c478bd9Sstevel@tonic-gate /* 28677c478bd9Sstevel@tonic-gate * If we have sent down a T_ORDREL_REQ, don't send 28687c478bd9Sstevel@tonic-gate * any more. 28697c478bd9Sstevel@tonic-gate */ 28707c478bd9Sstevel@tonic-gate if (mir->mir_ordrel_pending) { 28717c478bd9Sstevel@tonic-gate freemsg(mp); 28727c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 28737c478bd9Sstevel@tonic-gate return; 28747c478bd9Sstevel@tonic-gate } 28757c478bd9Sstevel@tonic-gate 28767c478bd9Sstevel@tonic-gate /* 28777c478bd9Sstevel@tonic-gate * If the stream is not idle, then we hold the 28787c478bd9Sstevel@tonic-gate * orderly release until it becomes idle. This 28797c478bd9Sstevel@tonic-gate * ensures that KRPC will be able to reply to 28807c478bd9Sstevel@tonic-gate * all requests that we have passed to it. 28817c478bd9Sstevel@tonic-gate * 28827c478bd9Sstevel@tonic-gate * We also queue the request if there is data already 28837c478bd9Sstevel@tonic-gate * queued, because we cannot allow the T_ORDREL_REQ 28847c478bd9Sstevel@tonic-gate * to go before data. When we had a separate reply 28857c478bd9Sstevel@tonic-gate * count, this was not a problem, because the 28867c478bd9Sstevel@tonic-gate * reply count was reconciled when mir_wsrv() 28877c478bd9Sstevel@tonic-gate * completed. 28887c478bd9Sstevel@tonic-gate */ 28897c478bd9Sstevel@tonic-gate if (!MIR_SVC_QUIESCED(mir) || 28907c478bd9Sstevel@tonic-gate mir->mir_inwservice == 1) { 28917c478bd9Sstevel@tonic-gate mir->mir_inwservice = 1; 28927c478bd9Sstevel@tonic-gate (void) putq(q, mp); 28937c478bd9Sstevel@tonic-gate 28947c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_wput_other: queuing " 28957c478bd9Sstevel@tonic-gate "T_ORDREL_REQ on 0x%p\n", (void *)q); 28967c478bd9Sstevel@tonic-gate 28977c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 28987c478bd9Sstevel@tonic-gate return; 28997c478bd9Sstevel@tonic-gate } 29007c478bd9Sstevel@tonic-gate 29017c478bd9Sstevel@tonic-gate /* 29027c478bd9Sstevel@tonic-gate * Mark the structure so that we know we sent 29037c478bd9Sstevel@tonic-gate * an orderly release request, and reset the idle timer. 29047c478bd9Sstevel@tonic-gate */ 29057c478bd9Sstevel@tonic-gate mir->mir_ordrel_pending = 1; 29067c478bd9Sstevel@tonic-gate 29077c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_wput_other: calling mir_svc_idle_start" 29087c478bd9Sstevel@tonic-gate " on 0x%p because we got T_ORDREL_REQ\n", 29097c478bd9Sstevel@tonic-gate (void *)q); 29107c478bd9Sstevel@tonic-gate 29117c478bd9Sstevel@tonic-gate mir_svc_idle_start(q, mir); 29127c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 29137c478bd9Sstevel@tonic-gate 29147c478bd9Sstevel@tonic-gate /* 29157c478bd9Sstevel@tonic-gate * When we break, we will putnext the T_ORDREL_REQ. 29167c478bd9Sstevel@tonic-gate */ 29177c478bd9Sstevel@tonic-gate break; 29187c478bd9Sstevel@tonic-gate 29197c478bd9Sstevel@tonic-gate case T_CONN_REQ: 29207c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 29217c478bd9Sstevel@tonic-gate if (mir->mir_head_mp != NULL) { 29227c478bd9Sstevel@tonic-gate freemsg(mir->mir_head_mp); 29237c478bd9Sstevel@tonic-gate mir->mir_head_mp = NULL; 29247c478bd9Sstevel@tonic-gate mir->mir_tail_mp = NULL; 29257c478bd9Sstevel@tonic-gate } 29267c478bd9Sstevel@tonic-gate mir->mir_frag_len = -(int32_t)sizeof (uint32_t); 29277c478bd9Sstevel@tonic-gate /* 29287c478bd9Sstevel@tonic-gate * Restart timer in case mir_clnt_idle_do_stop() was 29297c478bd9Sstevel@tonic-gate * called. 29307c478bd9Sstevel@tonic-gate */ 29317c478bd9Sstevel@tonic-gate mir->mir_idle_timeout = clnt_idle_timeout; 29327c478bd9Sstevel@tonic-gate mir_clnt_idle_stop(q, mir); 29337c478bd9Sstevel@tonic-gate mir_clnt_idle_start(q, mir); 29347c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 29357c478bd9Sstevel@tonic-gate break; 29367c478bd9Sstevel@tonic-gate 29377c478bd9Sstevel@tonic-gate default: 29387c478bd9Sstevel@tonic-gate /* 29397c478bd9Sstevel@tonic-gate * T_DISCON_REQ is one of the interesting default 29407c478bd9Sstevel@tonic-gate * cases here. Ideally, an M_FLUSH is done before 29417c478bd9Sstevel@tonic-gate * T_DISCON_REQ is done. However, that is somewhat 29427c478bd9Sstevel@tonic-gate * cumbersome for clnt_cots.c to do. So we queue 29437c478bd9Sstevel@tonic-gate * T_DISCON_REQ, and let the service procedure 29447c478bd9Sstevel@tonic-gate * flush all M_DATA. 29457c478bd9Sstevel@tonic-gate */ 29467c478bd9Sstevel@tonic-gate break; 29477c478bd9Sstevel@tonic-gate } 29487c478bd9Sstevel@tonic-gate /* fallthru */; 29497c478bd9Sstevel@tonic-gate default: 29507c478bd9Sstevel@tonic-gate if (mp->b_datap->db_type >= QPCTL) { 29517c478bd9Sstevel@tonic-gate if (mp->b_datap->db_type == M_FLUSH) { 29527c478bd9Sstevel@tonic-gate if (mir->mir_type == RPC_CLIENT && 29537c478bd9Sstevel@tonic-gate *mp->b_rptr & FLUSHW) { 29547c478bd9Sstevel@tonic-gate RPCLOG(32, "mir_wput_other: flushing " 29557c478bd9Sstevel@tonic-gate "wq 0x%p\n", (void *)q); 29567c478bd9Sstevel@tonic-gate if (*mp->b_rptr & FLUSHBAND) { 29577c478bd9Sstevel@tonic-gate flushband(q, *(mp->b_rptr + 1), 29587c478bd9Sstevel@tonic-gate FLUSHDATA); 29597c478bd9Sstevel@tonic-gate } else { 29607c478bd9Sstevel@tonic-gate flushq(q, FLUSHDATA); 29617c478bd9Sstevel@tonic-gate } 29627c478bd9Sstevel@tonic-gate } else { 29637c478bd9Sstevel@tonic-gate RPCLOG(32, "mir_wput_other: ignoring " 29647c478bd9Sstevel@tonic-gate "M_FLUSH on wq 0x%p\n", (void *)q); 29657c478bd9Sstevel@tonic-gate } 29667c478bd9Sstevel@tonic-gate } 29677c478bd9Sstevel@tonic-gate break; 29687c478bd9Sstevel@tonic-gate } 29697c478bd9Sstevel@tonic-gate 29707c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 29717c478bd9Sstevel@tonic-gate if (mir->mir_inwservice == 0 && MIR_WCANPUTNEXT(mir, q)) { 29727c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 29737c478bd9Sstevel@tonic-gate break; 29747c478bd9Sstevel@tonic-gate } 29757c478bd9Sstevel@tonic-gate mir->mir_inwservice = 1; 29767c478bd9Sstevel@tonic-gate mir->mir_inwflushdata = flush_in_svc; 29777c478bd9Sstevel@tonic-gate (void) putq(q, mp); 29787c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 29797c478bd9Sstevel@tonic-gate qenable(q); 29807c478bd9Sstevel@tonic-gate 29817c478bd9Sstevel@tonic-gate return; 29827c478bd9Sstevel@tonic-gate } 29837c478bd9Sstevel@tonic-gate putnext(q, mp); 29847c478bd9Sstevel@tonic-gate } 29857c478bd9Sstevel@tonic-gate 29867c478bd9Sstevel@tonic-gate static void 29877c478bd9Sstevel@tonic-gate mir_wsrv(queue_t *q) 29887c478bd9Sstevel@tonic-gate { 29897c478bd9Sstevel@tonic-gate mblk_t *mp; 29907c478bd9Sstevel@tonic-gate mir_t *mir; 29917c478bd9Sstevel@tonic-gate bool_t flushdata; 29927c478bd9Sstevel@tonic-gate 29937c478bd9Sstevel@tonic-gate mir = (mir_t *)q->q_ptr; 29947c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 29957c478bd9Sstevel@tonic-gate 29967c478bd9Sstevel@tonic-gate flushdata = mir->mir_inwflushdata; 29977c478bd9Sstevel@tonic-gate mir->mir_inwflushdata = 0; 29987c478bd9Sstevel@tonic-gate 29997c478bd9Sstevel@tonic-gate while (mp = getq(q)) { 30007c478bd9Sstevel@tonic-gate if (mp->b_datap->db_type == M_DATA) { 30017c478bd9Sstevel@tonic-gate /* 30027c478bd9Sstevel@tonic-gate * Do not send any more data if we have sent 30037c478bd9Sstevel@tonic-gate * a T_ORDREL_REQ. 30047c478bd9Sstevel@tonic-gate */ 30057c478bd9Sstevel@tonic-gate if (flushdata || mir->mir_ordrel_pending == 1) { 30067c478bd9Sstevel@tonic-gate freemsg(mp); 30077c478bd9Sstevel@tonic-gate continue; 30087c478bd9Sstevel@tonic-gate } 30097c478bd9Sstevel@tonic-gate 30107c478bd9Sstevel@tonic-gate /* 30117c478bd9Sstevel@tonic-gate * Make sure that the stream can really handle more 30127c478bd9Sstevel@tonic-gate * data. 30137c478bd9Sstevel@tonic-gate */ 30147c478bd9Sstevel@tonic-gate if (!MIR_WCANPUTNEXT(mir, q)) { 30157c478bd9Sstevel@tonic-gate (void) putbq(q, mp); 30167c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 30177c478bd9Sstevel@tonic-gate return; 30187c478bd9Sstevel@tonic-gate } 30197c478bd9Sstevel@tonic-gate 30207c478bd9Sstevel@tonic-gate /* 30217c478bd9Sstevel@tonic-gate * Now we pass the RPC message downstream. 30227c478bd9Sstevel@tonic-gate */ 30237c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 30247c478bd9Sstevel@tonic-gate putnext(q, mp); 30257c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 30267c478bd9Sstevel@tonic-gate continue; 30277c478bd9Sstevel@tonic-gate } 30287c478bd9Sstevel@tonic-gate 30297c478bd9Sstevel@tonic-gate /* 30307c478bd9Sstevel@tonic-gate * This is not an RPC message, pass it downstream 30317c478bd9Sstevel@tonic-gate * (ignoring flow control) if the server side is not sending a 30327c478bd9Sstevel@tonic-gate * T_ORDREL_REQ downstream. 30337c478bd9Sstevel@tonic-gate */ 30347c478bd9Sstevel@tonic-gate if (mir->mir_type != RPC_SERVER || 30357c478bd9Sstevel@tonic-gate ((union T_primitives *)mp->b_rptr)->type != 30367c478bd9Sstevel@tonic-gate T_ORDREL_REQ) { 30377c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 30387c478bd9Sstevel@tonic-gate putnext(q, mp); 30397c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 30407c478bd9Sstevel@tonic-gate continue; 30417c478bd9Sstevel@tonic-gate } 30427c478bd9Sstevel@tonic-gate 30437c478bd9Sstevel@tonic-gate if (mir->mir_ordrel_pending == 1) { 30447c478bd9Sstevel@tonic-gate /* 30457c478bd9Sstevel@tonic-gate * Don't send two T_ORDRELs 30467c478bd9Sstevel@tonic-gate */ 30477c478bd9Sstevel@tonic-gate freemsg(mp); 30487c478bd9Sstevel@tonic-gate continue; 30497c478bd9Sstevel@tonic-gate } 30507c478bd9Sstevel@tonic-gate 30517c478bd9Sstevel@tonic-gate /* 30527c478bd9Sstevel@tonic-gate * Mark the structure so that we know we sent an orderly 30537c478bd9Sstevel@tonic-gate * release request. We will check to see slot is idle at the 30547c478bd9Sstevel@tonic-gate * end of this routine, and if so, reset the idle timer to 30557c478bd9Sstevel@tonic-gate * handle orderly release timeouts. 30567c478bd9Sstevel@tonic-gate */ 30577c478bd9Sstevel@tonic-gate mir->mir_ordrel_pending = 1; 30587c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_wsrv: sending ordrel req on q 0x%p\n", 30597c478bd9Sstevel@tonic-gate (void *)q); 30607c478bd9Sstevel@tonic-gate /* 30617c478bd9Sstevel@tonic-gate * Send the orderly release downstream. If there are other 30627c478bd9Sstevel@tonic-gate * pending replies we won't be able to send them. However, 30637c478bd9Sstevel@tonic-gate * the only reason we should send the orderly release is if 30647c478bd9Sstevel@tonic-gate * we were idle, or if an unusual event occurred. 30657c478bd9Sstevel@tonic-gate */ 30667c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 30677c478bd9Sstevel@tonic-gate putnext(q, mp); 30687c478bd9Sstevel@tonic-gate mutex_enter(&mir->mir_mutex); 30697c478bd9Sstevel@tonic-gate } 30707c478bd9Sstevel@tonic-gate 30717c478bd9Sstevel@tonic-gate if (q->q_first == NULL) 30727c478bd9Sstevel@tonic-gate /* 30737c478bd9Sstevel@tonic-gate * If we call mir_svc_idle_start() below, then 30747c478bd9Sstevel@tonic-gate * clearing mir_inwservice here will also result in 30757c478bd9Sstevel@tonic-gate * any thread waiting in mir_close() to be signaled. 30767c478bd9Sstevel@tonic-gate */ 30777c478bd9Sstevel@tonic-gate mir->mir_inwservice = 0; 30787c478bd9Sstevel@tonic-gate 30797c478bd9Sstevel@tonic-gate if (mir->mir_type != RPC_SERVER) { 30807c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 30817c478bd9Sstevel@tonic-gate return; 30827c478bd9Sstevel@tonic-gate } 30837c478bd9Sstevel@tonic-gate 30847c478bd9Sstevel@tonic-gate /* 30857c478bd9Sstevel@tonic-gate * If idle we call mir_svc_idle_start to start the timer (or wakeup 30867c478bd9Sstevel@tonic-gate * a close). Also make sure not to start the idle timer on the 30877c478bd9Sstevel@tonic-gate * listener stream. This can cause nfsd to send an orderly release 30887c478bd9Sstevel@tonic-gate * command on the listener stream. 30897c478bd9Sstevel@tonic-gate */ 30907c478bd9Sstevel@tonic-gate if (MIR_SVC_QUIESCED(mir) && !(mir->mir_listen_stream)) { 30917c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_wsrv: calling mir_svc_idle_start on 0x%p " 30927c478bd9Sstevel@tonic-gate "because mir slot is idle\n", (void *)q); 30937c478bd9Sstevel@tonic-gate mir_svc_idle_start(q, mir); 30947c478bd9Sstevel@tonic-gate } 30957c478bd9Sstevel@tonic-gate 30967c478bd9Sstevel@tonic-gate /* 30977c478bd9Sstevel@tonic-gate * If outbound flow control has been relieved, then allow new 30987c478bd9Sstevel@tonic-gate * inbound requests to be processed. 30997c478bd9Sstevel@tonic-gate */ 31007c478bd9Sstevel@tonic-gate if (mir->mir_hold_inbound) { 31017c478bd9Sstevel@tonic-gate mir->mir_hold_inbound = 0; 31027c478bd9Sstevel@tonic-gate qenable(RD(q)); 31037c478bd9Sstevel@tonic-gate } 31047c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 31057c478bd9Sstevel@tonic-gate } 31067c478bd9Sstevel@tonic-gate 31077c478bd9Sstevel@tonic-gate static void 31087c478bd9Sstevel@tonic-gate mir_disconnect(queue_t *q, mir_t *mir) 31097c478bd9Sstevel@tonic-gate { 31107c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&mir->mir_mutex)); 31117c478bd9Sstevel@tonic-gate 31127c478bd9Sstevel@tonic-gate switch (mir->mir_type) { 31137c478bd9Sstevel@tonic-gate case RPC_CLIENT: 31147c478bd9Sstevel@tonic-gate /* 31157c478bd9Sstevel@tonic-gate * We are disconnecting, but not necessarily 31167c478bd9Sstevel@tonic-gate * closing. By not closing, we will fail to 31177c478bd9Sstevel@tonic-gate * pick up a possibly changed global timeout value, 31187c478bd9Sstevel@tonic-gate * unless we store it now. 31197c478bd9Sstevel@tonic-gate */ 31207c478bd9Sstevel@tonic-gate mir->mir_idle_timeout = clnt_idle_timeout; 31217c478bd9Sstevel@tonic-gate mir_clnt_idle_start(WR(q), mir); 31227c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 31237c478bd9Sstevel@tonic-gate 31247c478bd9Sstevel@tonic-gate /* 31257c478bd9Sstevel@tonic-gate * T_DISCON_REQ is passed to KRPC as an integer value 31267c478bd9Sstevel@tonic-gate * (this is not a TPI message). It is used as a 31277c478bd9Sstevel@tonic-gate * convenient value to indicate a sanity check 31287c478bd9Sstevel@tonic-gate * failure -- the same KRPC routine is also called 31297c478bd9Sstevel@tonic-gate * for T_DISCON_INDs and T_ORDREL_INDs. 31307c478bd9Sstevel@tonic-gate */ 31317c478bd9Sstevel@tonic-gate clnt_dispatch_notifyall(WR(q), T_DISCON_REQ, 0); 31327c478bd9Sstevel@tonic-gate break; 31337c478bd9Sstevel@tonic-gate 31347c478bd9Sstevel@tonic-gate case RPC_SERVER: 31357c478bd9Sstevel@tonic-gate mir->mir_svc_no_more_msgs = 1; 31367c478bd9Sstevel@tonic-gate mir_svc_idle_stop(WR(q), mir); 31377c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 31387c478bd9Sstevel@tonic-gate RPCLOG(16, "mir_disconnect: telling " 31397c478bd9Sstevel@tonic-gate "stream head listener to disconnect stream " 31407c478bd9Sstevel@tonic-gate "(0x%p)\n", (void *) q); 31417c478bd9Sstevel@tonic-gate (void) mir_svc_policy_notify(q, 2); 31427c478bd9Sstevel@tonic-gate break; 31437c478bd9Sstevel@tonic-gate 31447c478bd9Sstevel@tonic-gate default: 31457c478bd9Sstevel@tonic-gate mutex_exit(&mir->mir_mutex); 31467c478bd9Sstevel@tonic-gate break; 31477c478bd9Sstevel@tonic-gate } 31487c478bd9Sstevel@tonic-gate } 31497c478bd9Sstevel@tonic-gate 31507c478bd9Sstevel@tonic-gate /* 31517c478bd9Sstevel@tonic-gate * do a sanity check on the length of the fragment. 31527c478bd9Sstevel@tonic-gate * returns 1 if bad else 0. 31537c478bd9Sstevel@tonic-gate */ 31547c478bd9Sstevel@tonic-gate static int 31557c478bd9Sstevel@tonic-gate mir_check_len(queue_t *q, int32_t frag_len, 31567c478bd9Sstevel@tonic-gate mblk_t *head_mp) 31577c478bd9Sstevel@tonic-gate { 31587c478bd9Sstevel@tonic-gate mir_t *mir; 31597c478bd9Sstevel@tonic-gate 31607c478bd9Sstevel@tonic-gate mir = (mir_t *)q->q_ptr; 31617c478bd9Sstevel@tonic-gate 31627c478bd9Sstevel@tonic-gate /* 31637c478bd9Sstevel@tonic-gate * Do a sanity check on the message length. If this message is 31647c478bd9Sstevel@tonic-gate * getting excessively large, shut down the connection. 31657c478bd9Sstevel@tonic-gate */ 31667c478bd9Sstevel@tonic-gate 31677c478bd9Sstevel@tonic-gate if ((frag_len <= 0) || (mir->mir_max_msg_sizep == NULL) || 31687c478bd9Sstevel@tonic-gate (frag_len <= *mir->mir_max_msg_sizep)) { 31697c478bd9Sstevel@tonic-gate return (0); 31707c478bd9Sstevel@tonic-gate } 31717c478bd9Sstevel@tonic-gate 31727c478bd9Sstevel@tonic-gate freemsg(head_mp); 31737c478bd9Sstevel@tonic-gate mir->mir_head_mp = (mblk_t *)0; 31747c478bd9Sstevel@tonic-gate mir->mir_frag_len = -(int)sizeof (uint32_t); 31757c478bd9Sstevel@tonic-gate if (mir->mir_type != RPC_SERVER || mir->mir_setup_complete) { 31767c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, 31777c478bd9Sstevel@tonic-gate "KRPC: record fragment from %s of size(%d) exceeds " 31787c478bd9Sstevel@tonic-gate "maximum (%u). Disconnecting", 31797c478bd9Sstevel@tonic-gate (mir->mir_type == RPC_CLIENT) ? "server" : 31807c478bd9Sstevel@tonic-gate (mir->mir_type == RPC_SERVER) ? "client" : 31817c478bd9Sstevel@tonic-gate "test tool", 31827c478bd9Sstevel@tonic-gate frag_len, *mir->mir_max_msg_sizep); 31837c478bd9Sstevel@tonic-gate } 31847c478bd9Sstevel@tonic-gate 31857c478bd9Sstevel@tonic-gate mir_disconnect(q, mir); 31867c478bd9Sstevel@tonic-gate return (1); 31877c478bd9Sstevel@tonic-gate } 3188