1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/vnode.h> 29 #include <sys/vfs_opreg.h> 30 #include <sys/kmem.h> 31 #include <fs/fs_subr.h> 32 #include <sys/proc.h> 33 #include <sys/kstat.h> 34 #include <sys/port_impl.h> 35 36 /* local functions */ 37 static int port_open(struct vnode **, int, cred_t *, caller_context_t *); 38 static int port_close(struct vnode *, int, int, offset_t, cred_t *, 39 caller_context_t *); 40 static int port_getattr(struct vnode *, struct vattr *, int, cred_t *, 41 caller_context_t *); 42 static int port_access(struct vnode *, int, int, cred_t *, caller_context_t *); 43 static int port_realvp(vnode_t *, vnode_t **, caller_context_t *); 44 static int port_poll(vnode_t *, short, int, short *, struct pollhead **, 45 caller_context_t *); 46 static void port_inactive(struct vnode *, cred_t *, caller_context_t *); 47 48 const fs_operation_def_t port_vnodeops_template[] = { 49 VOPNAME_OPEN, { .vop_open = port_open }, 50 VOPNAME_CLOSE, { .vop_close = port_close }, 51 VOPNAME_GETATTR, { .vop_getattr = port_getattr }, 52 VOPNAME_ACCESS, { .vop_access = port_access }, 53 VOPNAME_INACTIVE, { .vop_inactive = port_inactive }, 54 VOPNAME_FRLOCK, { .error = fs_error }, 55 VOPNAME_REALVP, { .vop_realvp = port_realvp }, 56 VOPNAME_POLL, { .vop_poll = port_poll }, 57 VOPNAME_PATHCONF, { .error = fs_error }, 58 VOPNAME_DISPOSE, { .error = fs_error }, 59 VOPNAME_GETSECATTR, { .error = fs_error }, 60 VOPNAME_SHRLOCK, { .error = fs_error }, 61 NULL, NULL 62 }; 63 64 /* ARGSUSED */ 65 static int 66 port_open(struct vnode **vpp, int flag, cred_t *cr, caller_context_t *ct) 67 { 68 return (0); 69 } 70 71 /* 72 * port_discard_events() scans the port event queue for events owned 73 * by current proc. Non-shareable events will be discarded, all other 74 * events remain in the event queue. 75 */ 76 void 77 port_discard_events(port_queue_t *portq) 78 { 79 port_kevent_t *kevp; 80 pid_t pid = curproc->p_pid; 81 82 /* 83 * The call to port_block() is required to avoid interaction 84 * with other threads in port_get(n). 85 */ 86 mutex_enter(&portq->portq_mutex); 87 port_block(portq); 88 port_push_eventq(portq); /* empty temporary queue */ 89 kevp = list_head(&portq->portq_list); 90 while (kevp) { 91 if (kevp->portkev_pid == pid) { 92 /* own event, check if it is shareable */ 93 if (kevp->portkev_flags & PORT_KEV_NOSHARE) 94 kevp->portkev_flags |= PORT_KEV_FREE; 95 } 96 kevp = list_next(&portq->portq_list, kevp); 97 } 98 port_unblock(portq); 99 mutex_exit(&portq->portq_mutex); 100 } 101 102 /* 103 * Called from port_close(). 104 * Free all kernel events structures which are still in the event queue. 105 */ 106 static void 107 port_close_events(port_queue_t *portq) 108 { 109 port_kevent_t *pkevp; 110 int events; /* ignore events */ 111 112 mutex_enter(&portq->portq_mutex); 113 while (pkevp = list_head(&portq->portq_list)) { 114 portq->portq_nent--; 115 list_remove(&portq->portq_list, pkevp); 116 if (pkevp->portkev_callback) { 117 (void) (*pkevp->portkev_callback)(pkevp->portkev_arg, 118 &events, pkevp->portkev_pid, PORT_CALLBACK_CLOSE, 119 pkevp); 120 } 121 mutex_exit(&portq->portq_mutex); 122 port_free_event_local(pkevp, 0); 123 mutex_enter(&portq->portq_mutex); 124 } 125 126 /* 127 * Wait for any thread in pollwakeup(), accessing this port to 128 * finish. 129 */ 130 while (portq->portq_flags & PORTQ_POLLWK_PEND) { 131 cv_wait(&portq->portq_closecv, &portq->portq_mutex); 132 } 133 mutex_exit(&portq->portq_mutex); 134 } 135 136 /* 137 * The port_close() function is called from standard close(2) when 138 * the file descriptor is of type S_IFPORT/VPORT. 139 * Port file descriptors behave like standard file descriptors. It means, 140 * the port file/vnode is only destroyed on last close. 141 * If the reference counter is > 1 then 142 * - sources associated with the port will be notified about the close, 143 * - objects associated with the port will be dissociated, 144 * - pending and delivered events will be discarded. 145 * On last close all references and caches will be removed. The vnode itself 146 * will be destroyed with VOP_RELE(). 147 */ 148 /* ARGSUSED */ 149 static int 150 port_close(struct vnode *vp, int flag, int count, offset_t offset, cred_t *cr, 151 caller_context_t *ct) 152 { 153 port_t *pp; 154 port_queue_t *portq; 155 port_source_t *ps; 156 port_source_t *ps_next; 157 int source; 158 159 pp = VTOEP(vp); 160 mutex_enter(&pp->port_mutex); 161 if (pp->port_flags & PORT_CLOSED) { 162 mutex_exit(&pp->port_mutex); 163 return (0); 164 } 165 mutex_exit(&pp->port_mutex); 166 167 portq = &pp->port_queue; 168 if (count > 1) { 169 /* 170 * It is not the last close. 171 * Remove/free all event resources owned by the current proc 172 * First notify all with the port associated sources about the 173 * close(2). The last argument of the close callback function 174 * advises the source about the type of of the close. 175 * If the port was set in alert mode by the curren process then 176 * remove the alert mode. 177 */ 178 179 /* check alert mode of the port */ 180 mutex_enter(&portq->portq_mutex); 181 if ((portq->portq_flags & PORTQ_ALERT) && 182 (portq->portq_alert.portal_pid == curproc->p_pid)) 183 portq->portq_flags &= ~PORTQ_ALERT; 184 mutex_exit(&portq->portq_mutex); 185 186 /* notify all event sources about port_close() */ 187 mutex_enter(&portq->portq_source_mutex); 188 for (source = 0; source < PORT_SCACHE_SIZE; source++) { 189 ps = portq->portq_scache[PORT_SHASH(source)]; 190 for (; ps != NULL; ps = ps->portsrc_next) { 191 if (ps->portsrc_close != NULL) 192 (*ps->portsrc_close) 193 (ps->portsrc_closearg, pp->port_fd, 194 curproc->p_pid, 0); 195 } 196 } 197 mutex_exit(&portq->portq_source_mutex); 198 port_discard_events(&pp->port_queue); 199 return (0); 200 } 201 202 /* 203 * We are executing the last close of the port -> discard everything 204 * Make sure that all threads/processes accessing this port leave 205 * the kernel immediately. 206 */ 207 208 mutex_enter(&portq->portq_mutex); 209 portq->portq_flags |= PORTQ_CLOSE; 210 while (portq->portq_thrcnt > 0) { 211 if (portq->portq_thread != NULL) 212 cv_signal(&portq->portq_thread->portget_cv); 213 cv_wait(&portq->portq_closecv, &portq->portq_mutex); 214 } 215 mutex_exit(&portq->portq_mutex); 216 217 /* 218 * Send "last close" message to associated sources. 219 * - new event allocation requests are being denied since uf_file entry 220 * was set to NULL in closeandsetf(). 221 * - all still allocated event structures must be returned to the 222 * port immediately: 223 * - call port_free_event(*event) or 224 * - call port_send_event(*event) to complete event operations 225 * which need activities in a dedicated process environment. 226 * The port_close() function waits until all allocated event structures 227 * are delivered back to the port. 228 */ 229 230 mutex_enter(&portq->portq_source_mutex); 231 for (source = 0; source < PORT_SCACHE_SIZE; source++) { 232 ps = portq->portq_scache[PORT_SHASH(source)]; 233 for (; ps != NULL; ps = ps_next) { 234 ps_next = ps->portsrc_next; 235 if (ps->portsrc_close != NULL) 236 (*ps->portsrc_close)(ps->portsrc_closearg, 237 pp->port_fd, curproc->p_pid, 1); 238 kmem_free(ps, sizeof (port_source_t)); 239 } 240 } 241 kmem_free(portq->portq_scache, 242 PORT_SCACHE_SIZE * sizeof (port_source_t *)); 243 portq->portq_scache = NULL; 244 mutex_exit(&portq->portq_source_mutex); 245 246 mutex_enter(&portq->portq_mutex); 247 /* Wait for outstanding events */ 248 while (pp->port_curr > portq->portq_nent) 249 cv_wait(&portq->portq_closecv, &portq->portq_mutex); 250 mutex_exit(&portq->portq_mutex); 251 252 /* 253 * If PORT_SOURCE_FD objects were not associated with the port then 254 * it is necessary to free the port_fdcache structure here. 255 */ 256 257 if (portq->portq_pcp != NULL) { 258 mutex_destroy(&portq->portq_pcp->pc_lock); 259 kmem_free(portq->portq_pcp, sizeof (port_fdcache_t)); 260 portq->portq_pcp = NULL; 261 } 262 263 /* 264 * Now all events are passed back to the port, 265 * discard remaining events in the port queue 266 */ 267 268 port_close_events(portq); 269 return (0); 270 } 271 272 /* 273 * The port_poll() function is the VOP_POLL() entry of event ports. 274 * Event ports return: 275 * POLLIN : events are available in the event queue 276 * POLLOUT : event queue can still accept events 277 */ 278 /*ARGSUSED*/ 279 static int 280 port_poll(vnode_t *vp, short events, int anyyet, short *reventsp, 281 struct pollhead **phpp, caller_context_t *ct) 282 { 283 port_t *pp; 284 port_queue_t *portq; 285 short levents; 286 287 pp = VTOEP(vp); 288 portq = &pp->port_queue; 289 levents = 0; 290 mutex_enter(&portq->portq_mutex); 291 if (portq->portq_nent) 292 levents = POLLIN; 293 if (pp->port_curr < pp->port_max_events) 294 levents |= POLLOUT; 295 levents &= events; 296 *reventsp = levents; 297 if (levents == 0) { 298 if (!anyyet) { 299 *phpp = &pp->port_pollhd; 300 portq->portq_flags |= 301 events & POLLIN ? PORTQ_POLLIN : 0; 302 portq->portq_flags |= 303 events & POLLOUT ? PORTQ_POLLOUT : 0; 304 } 305 } 306 mutex_exit(&portq->portq_mutex); 307 return (0); 308 } 309 310 311 /* ARGSUSED */ 312 static int 313 port_getattr(struct vnode *vp, struct vattr *vap, int flags, cred_t *cr, 314 caller_context_t *ct) 315 { 316 port_t *pp; 317 extern dev_t portdev; 318 319 pp = VTOEP(vp); 320 321 vap->va_type = vp->v_type; /* vnode type (for create) */ 322 vap->va_mode = 0; /* file access mode */ 323 vap->va_uid = pp->port_uid; /* owner user id */ 324 vap->va_gid = pp->port_gid; /* owner group id */ 325 vap->va_fsid = portdev; /* file system id */ 326 vap->va_nodeid = (ino64_t)0; /* node id */ 327 vap->va_nlink = vp->v_count; /* number of references to file */ 328 vap->va_size = (u_offset_t)pp->port_queue.portq_nent; /* file size */ 329 vap->va_atime = pp->port_ctime; /* time of last access */ 330 vap->va_mtime = pp->port_ctime; /* time of last modification */ 331 vap->va_ctime = pp->port_ctime; /* time file ``created'' */ 332 vap->va_rdev = portdev; /* device the file represents */ 333 vap->va_blksize = 0; /* fundamental block size */ 334 vap->va_nblocks = (fsblkcnt64_t)0; /* # of blocks allocated */ 335 vap->va_seq = 0; /* sequence number */ 336 337 return (0); 338 } 339 340 /* 341 * Destroy the port. 342 */ 343 /* ARGSUSED */ 344 static void 345 port_inactive(struct vnode *vp, cred_t *cr, caller_context_t *ct) 346 { 347 port_t *pp = VTOEP(vp); 348 extern port_kstat_t port_kstat; 349 350 mutex_enter(&port_control.pc_mutex); 351 port_control.pc_nents--; 352 curproc->p_portcnt--; 353 port_kstat.pks_ports.value.ui32--; 354 mutex_exit(&port_control.pc_mutex); 355 vn_free(vp); 356 mutex_destroy(&pp->port_mutex); 357 mutex_destroy(&pp->port_queue.portq_mutex); 358 mutex_destroy(&pp->port_queue.portq_source_mutex); 359 kmem_free(pp, sizeof (port_t)); 360 } 361 362 /* ARGSUSED */ 363 static int 364 port_access(struct vnode *vp, int mode, int flags, cred_t *cr, 365 caller_context_t *ct) 366 { 367 return (0); 368 } 369 370 /* ARGSUSED */ 371 static int 372 port_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct) 373 { 374 *vpp = vp; 375 return (0); 376 } 377