1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/vnode.h> 31 #include <sys/vfs_opreg.h> 32 #include <sys/kmem.h> 33 #include <fs/fs_subr.h> 34 #include <sys/proc.h> 35 #include <sys/kstat.h> 36 #include <sys/port_impl.h> 37 38 /* local functions */ 39 static int port_open(struct vnode **, int, cred_t *, caller_context_t *); 40 static int port_close(struct vnode *, int, int, offset_t, cred_t *, 41 caller_context_t *); 42 static int port_getattr(struct vnode *, struct vattr *, int, cred_t *, 43 caller_context_t *); 44 static int port_access(struct vnode *, int, int, cred_t *, caller_context_t *); 45 static int port_realvp(vnode_t *, vnode_t **, caller_context_t *); 46 static int port_poll(vnode_t *, short, int, short *, struct pollhead **, 47 caller_context_t *); 48 static void port_inactive(struct vnode *, cred_t *, caller_context_t *); 49 50 const fs_operation_def_t port_vnodeops_template[] = { 51 VOPNAME_OPEN, { .vop_open = port_open }, 52 VOPNAME_CLOSE, { .vop_close = port_close }, 53 VOPNAME_GETATTR, { .vop_getattr = port_getattr }, 54 VOPNAME_ACCESS, { .vop_access = port_access }, 55 VOPNAME_INACTIVE, { .vop_inactive = port_inactive }, 56 VOPNAME_FRLOCK, { .error = fs_error }, 57 VOPNAME_REALVP, { .vop_realvp = port_realvp }, 58 VOPNAME_POLL, { .vop_poll = port_poll }, 59 VOPNAME_PATHCONF, { .error = fs_error }, 60 VOPNAME_DISPOSE, { .error = fs_error }, 61 VOPNAME_GETSECATTR, { .error = fs_error }, 62 VOPNAME_SHRLOCK, { .error = fs_error }, 63 NULL, NULL 64 }; 65 66 /* ARGSUSED */ 67 static int 68 port_open(struct vnode **vpp, int flag, cred_t *cr, caller_context_t *ct) 69 { 70 return (0); 71 } 72 73 /* 74 * port_discard_events() scans the port event queue for events owned 75 * by current proc. Non-shareable events will be discarded, all other 76 * events remain in the event queue. 77 */ 78 void 79 port_discard_events(port_queue_t *portq) 80 { 81 port_kevent_t *kevp; 82 pid_t pid = curproc->p_pid; 83 84 /* 85 * The call to port_block() is required to avoid interaction 86 * with other threads in port_get(n). 87 */ 88 mutex_enter(&portq->portq_mutex); 89 port_block(portq); 90 port_push_eventq(portq); /* empty temporary queue */ 91 kevp = list_head(&portq->portq_list); 92 while (kevp) { 93 if (kevp->portkev_pid == pid) { 94 /* own event, check if it is shareable */ 95 if (kevp->portkev_flags & PORT_KEV_NOSHARE) 96 kevp->portkev_flags |= PORT_KEV_FREE; 97 } 98 kevp = list_next(&portq->portq_list, kevp); 99 } 100 port_unblock(portq); 101 mutex_exit(&portq->portq_mutex); 102 } 103 104 /* 105 * Called from port_close(). 106 * Free all kernel events structures which are still in the event queue. 107 */ 108 static void 109 port_close_events(port_queue_t *portq) 110 { 111 port_kevent_t *pkevp; 112 int events; /* ignore events */ 113 114 mutex_enter(&portq->portq_mutex); 115 while (pkevp = list_head(&portq->portq_list)) { 116 portq->portq_nent--; 117 list_remove(&portq->portq_list, pkevp); 118 if (pkevp->portkev_callback) { 119 (void) (*pkevp->portkev_callback)(pkevp->portkev_arg, 120 &events, pkevp->portkev_pid, PORT_CALLBACK_CLOSE, 121 pkevp); 122 } 123 mutex_exit(&portq->portq_mutex); 124 port_free_event_local(pkevp, 0); 125 mutex_enter(&portq->portq_mutex); 126 } 127 128 /* 129 * Wait for any thread in pollwakeup(), accessing this port to 130 * finish. 131 */ 132 while (portq->portq_flags & PORTQ_POLLWK_PEND) { 133 cv_wait(&portq->portq_closecv, &portq->portq_mutex); 134 } 135 mutex_exit(&portq->portq_mutex); 136 } 137 138 /* 139 * The port_close() function is called from standard close(2) when 140 * the file descriptor is of type S_IFPORT/VPORT. 141 * Port file descriptors behave like standard file descriptors. It means, 142 * the port file/vnode is only destroyed on last close. 143 * If the reference counter is > 1 then 144 * - sources associated with the port will be notified about the close, 145 * - objects associated with the port will be dissociated, 146 * - pending and delivered events will be discarded. 147 * On last close all references and caches will be removed. The vnode itself 148 * will be destroyed with VOP_RELE(). 149 */ 150 /* ARGSUSED */ 151 static int 152 port_close(struct vnode *vp, int flag, int count, offset_t offset, cred_t *cr, 153 caller_context_t *ct) 154 { 155 port_t *pp; 156 port_queue_t *portq; 157 port_source_t *ps; 158 port_source_t *ps_next; 159 int source; 160 161 pp = VTOEP(vp); 162 mutex_enter(&pp->port_mutex); 163 if (pp->port_flags & PORT_CLOSED) { 164 mutex_exit(&pp->port_mutex); 165 return (0); 166 } 167 mutex_exit(&pp->port_mutex); 168 169 portq = &pp->port_queue; 170 if (count > 1) { 171 /* 172 * It is not the last close. 173 * Remove/free all event resources owned by the current proc 174 * First notify all with the port associated sources about the 175 * close(2). The last argument of the close callback function 176 * advises the source about the type of of the close. 177 * If the port was set in alert mode by the curren process then 178 * remove the alert mode. 179 */ 180 181 /* check alert mode of the port */ 182 mutex_enter(&portq->portq_mutex); 183 if ((portq->portq_flags & PORTQ_ALERT) && 184 (portq->portq_alert.portal_pid == curproc->p_pid)) 185 portq->portq_flags &= ~PORTQ_ALERT; 186 mutex_exit(&portq->portq_mutex); 187 188 /* notify all event sources about port_close() */ 189 mutex_enter(&portq->portq_source_mutex); 190 for (source = 0; source < PORT_SCACHE_SIZE; source++) { 191 ps = portq->portq_scache[PORT_SHASH(source)]; 192 for (; ps != NULL; ps = ps->portsrc_next) { 193 if (ps->portsrc_close != NULL) 194 (*ps->portsrc_close) 195 (ps->portsrc_closearg, pp->port_fd, 196 curproc->p_pid, 0); 197 } 198 } 199 mutex_exit(&portq->portq_source_mutex); 200 port_discard_events(&pp->port_queue); 201 return (0); 202 } 203 204 /* 205 * We are executing the last close of the port -> discard everything 206 * Make sure that all threads/processes accessing this port leave 207 * the kernel immediately. 208 */ 209 210 mutex_enter(&portq->portq_mutex); 211 portq->portq_flags |= PORTQ_CLOSE; 212 while (portq->portq_thrcnt > 0) { 213 if (portq->portq_thread != NULL) 214 cv_signal(&portq->portq_thread->portget_cv); 215 cv_wait(&portq->portq_closecv, &portq->portq_mutex); 216 } 217 mutex_exit(&portq->portq_mutex); 218 219 /* 220 * Send "last close" message to associated sources. 221 * - new event allocation requests are being denied since uf_file entry 222 * was set to NULL in closeandsetf(). 223 * - all still allocated event structures must be returned to the 224 * port immediately: 225 * - call port_free_event(*event) or 226 * - call port_send_event(*event) to complete event operations 227 * which need activities in a dedicated process environment. 228 * The port_close() function waits until all allocated event structures 229 * are delivered back to the port. 230 */ 231 232 mutex_enter(&portq->portq_source_mutex); 233 for (source = 0; source < PORT_SCACHE_SIZE; source++) { 234 ps = portq->portq_scache[PORT_SHASH(source)]; 235 for (; ps != NULL; ps = ps_next) { 236 ps_next = ps->portsrc_next; 237 if (ps->portsrc_close != NULL) 238 (*ps->portsrc_close)(ps->portsrc_closearg, 239 pp->port_fd, curproc->p_pid, 1); 240 kmem_free(ps, sizeof (port_source_t)); 241 } 242 } 243 kmem_free(portq->portq_scache, 244 PORT_SCACHE_SIZE * sizeof (port_source_t *)); 245 portq->portq_scache = NULL; 246 mutex_exit(&portq->portq_source_mutex); 247 248 mutex_enter(&portq->portq_mutex); 249 /* Wait for outstanding events */ 250 while (pp->port_curr > portq->portq_nent) 251 cv_wait(&portq->portq_closecv, &portq->portq_mutex); 252 mutex_exit(&portq->portq_mutex); 253 254 /* 255 * If PORT_SOURCE_FD objects were not associated with the port then 256 * it is necessary to free the port_fdcache structure here. 257 */ 258 259 if (portq->portq_pcp != NULL) { 260 mutex_destroy(&portq->portq_pcp->pc_lock); 261 kmem_free(portq->portq_pcp, sizeof (port_fdcache_t)); 262 portq->portq_pcp = NULL; 263 } 264 265 /* 266 * Now all events are passed back to the port, 267 * discard remaining events in the port queue 268 */ 269 270 port_close_events(portq); 271 return (0); 272 } 273 274 /* 275 * The port_poll() function is the VOP_POLL() entry of event ports. 276 * Event ports return: 277 * POLLIN : events are available in the event queue 278 * POLLOUT : event queue can still accept events 279 */ 280 /*ARGSUSED*/ 281 static int 282 port_poll(vnode_t *vp, short events, int anyyet, short *reventsp, 283 struct pollhead **phpp, caller_context_t *ct) 284 { 285 port_t *pp; 286 port_queue_t *portq; 287 short levents; 288 289 pp = VTOEP(vp); 290 portq = &pp->port_queue; 291 levents = 0; 292 mutex_enter(&portq->portq_mutex); 293 if (portq->portq_nent) 294 levents = POLLIN; 295 if (pp->port_curr < pp->port_max_events) 296 levents |= POLLOUT; 297 levents &= events; 298 *reventsp = levents; 299 if (levents == 0) { 300 if (!anyyet) { 301 *phpp = &pp->port_pollhd; 302 portq->portq_flags |= 303 events & POLLIN ? PORTQ_POLLIN : 0; 304 portq->portq_flags |= 305 events & POLLOUT ? PORTQ_POLLOUT : 0; 306 } 307 } 308 mutex_exit(&portq->portq_mutex); 309 return (0); 310 } 311 312 313 /* ARGSUSED */ 314 static int 315 port_getattr(struct vnode *vp, struct vattr *vap, int flags, cred_t *cr, 316 caller_context_t *ct) 317 { 318 port_t *pp; 319 extern dev_t portdev; 320 321 pp = VTOEP(vp); 322 323 vap->va_mask = 0; /* bit-mask of attributes */ 324 vap->va_type = vp->v_type; /* vnode type (for create) */ 325 vap->va_mode = 0; /* file access mode */ 326 vap->va_uid = pp->port_uid; /* owner user id */ 327 vap->va_gid = pp->port_gid; /* owner group id */ 328 vap->va_fsid = portdev; /* file system id */ 329 vap->va_nodeid = (ino64_t)0; /* node id */ 330 vap->va_nlink = vp->v_count; /* number of references to file */ 331 vap->va_size = (u_offset_t)pp->port_queue.portq_nent; /* file size */ 332 vap->va_atime = pp->port_ctime; /* time of last access */ 333 vap->va_mtime = pp->port_ctime; /* time of last modification */ 334 vap->va_ctime = pp->port_ctime; /* time file ``created'' */ 335 vap->va_rdev = portdev; /* device the file represents */ 336 vap->va_blksize = 0; /* fundamental block size */ 337 vap->va_nblocks = (fsblkcnt64_t)0; /* # of blocks allocated */ 338 vap->va_seq = 0; /* sequence number */ 339 340 return (0); 341 } 342 343 /* 344 * Destroy the port. 345 */ 346 /* ARGSUSED */ 347 static void 348 port_inactive(struct vnode *vp, cred_t *cr, caller_context_t *ct) 349 { 350 port_t *pp = VTOEP(vp); 351 extern port_kstat_t port_kstat; 352 353 mutex_enter(&port_control.pc_mutex); 354 port_control.pc_nents--; 355 curproc->p_portcnt--; 356 port_kstat.pks_ports.value.ui32--; 357 mutex_exit(&port_control.pc_mutex); 358 vn_free(vp); 359 mutex_destroy(&pp->port_mutex); 360 mutex_destroy(&pp->port_queue.portq_mutex); 361 mutex_destroy(&pp->port_queue.portq_source_mutex); 362 kmem_free(pp, sizeof (port_t)); 363 } 364 365 /* ARGSUSED */ 366 static int 367 port_access(struct vnode *vp, int mode, int flags, cred_t *cr, 368 caller_context_t *ct) 369 { 370 return (0); 371 } 372 373 /* ARGSUSED */ 374 static int 375 port_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct) 376 { 377 *vpp = vp; 378 return (0); 379 } 380