xref: /illumos-gate/usr/src/uts/common/fs/ctfs/ctfs_event.c (revision 2c76d75129011c98e79463bb84917b828f922a11)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/param.h>
28 #include <sys/time.h>
29 #include <sys/cred.h>
30 #include <sys/vfs.h>
31 #include <sys/vfs_opreg.h>
32 #include <sys/gfs.h>
33 #include <sys/vnode.h>
34 #include <sys/systm.h>
35 #include <sys/errno.h>
36 #include <sys/sysmacros.h>
37 #include <fs/fs_subr.h>
38 #include <sys/contract.h>
39 #include <sys/contract_impl.h>
40 #include <sys/ctfs.h>
41 #include <sys/ctfs_impl.h>
42 #include <sys/file.h>
43 #include <sys/policy.h>
44 
45 /*
46  * CTFS routines for the /system/contract/<type>/bundle vnode.
47  * CTFS routines for the /system/contract/<type>/pbundle vnode.
48  * CTFS routines for the /system/contract/<type>/<ctid>/events vnode.
49  */
50 
51 /*
52  * ctfs_endpoint_open
53  *
54  * Called by the VOP_OPEN entry points to perform some common checks
55  * and set up the endpoint listener, if not already done.
56  */
57 static int
ctfs_endpoint_open(ctfs_endpoint_t * endpt,ct_equeue_t * q,int flag)58 ctfs_endpoint_open(ctfs_endpoint_t *endpt, ct_equeue_t *q, int flag)
59 {
60 	if ((flag & ~FNONBLOCK) != (FREAD | FOFFMAX))
61 		return (EINVAL);
62 
63 	mutex_enter(&endpt->ctfs_endpt_lock);
64 	if ((endpt->ctfs_endpt_flags & CTFS_ENDPT_SETUP) == 0) {
65 		endpt->ctfs_endpt_flags |= CTFS_ENDPT_SETUP;
66 		if (flag & FNONBLOCK)
67 			endpt->ctfs_endpt_flags |= CTFS_ENDPT_NBLOCK;
68 		cte_add_listener(q, &endpt->ctfs_endpt_listener);
69 	}
70 	mutex_exit(&endpt->ctfs_endpt_lock);
71 
72 	return (0);
73 }
74 
75 /*
76  * ctfs_endpoint inactive
77  *
78  * Called by the VOP_INACTIVE entry points to perform common listener
79  * cleanup.
80  */
81 static void
ctfs_endpoint_inactive(ctfs_endpoint_t * endpt)82 ctfs_endpoint_inactive(ctfs_endpoint_t *endpt)
83 {
84 	mutex_enter(&endpt->ctfs_endpt_lock);
85 	if (endpt->ctfs_endpt_flags & CTFS_ENDPT_SETUP) {
86 		endpt->ctfs_endpt_flags = 0;
87 		cte_remove_listener(&endpt->ctfs_endpt_listener);
88 	}
89 	pollhead_clean(&endpt->ctfs_endpt_listener.ctl_pollhead);
90 	mutex_exit(&endpt->ctfs_endpt_lock);
91 }
92 
93 /*
94  * ctfs_endpoint_ioctl
95  *
96  * Implements the common VOP_IOCTL handling for the event endpoints.
97  * rprivchk, if true, indicates that event receive requests should
98  * check the provided credentials.  This distinction exists because
99  * contract endpoints perform their privilege checks at open-time, and
100  * process bundle queue listeners by definition may view all events
101  * their queues contain.
102  */
103 static int
ctfs_endpoint_ioctl(ctfs_endpoint_t * endpt,int cmd,intptr_t arg,cred_t * cr,zone_t * zone,int rprivchk)104 ctfs_endpoint_ioctl(ctfs_endpoint_t *endpt, int cmd, intptr_t arg, cred_t *cr,
105     zone_t *zone, int rprivchk)
106 {
107 	uint64_t id, zuniqid;
108 
109 	zuniqid = zone->zone_uniqid;
110 
111 	switch (cmd) {
112 	case CT_ERESET:
113 		cte_reset_listener(&endpt->ctfs_endpt_listener);
114 		break;
115 	case CT_ERECV:
116 		/*
117 		 * We pass in NULL for the cred when reading from
118 		 * process bundle queues and contract queues because
119 		 * the privilege check was performed at open time.
120 		 */
121 		return (cte_get_event(&endpt->ctfs_endpt_listener,
122 		    endpt->ctfs_endpt_flags & CTFS_ENDPT_NBLOCK,
123 		    (void *)arg, rprivchk ? cr : NULL, zuniqid, 0));
124 	case CT_ECRECV:
125 		return (cte_get_event(&endpt->ctfs_endpt_listener,
126 		    endpt->ctfs_endpt_flags & CTFS_ENDPT_NBLOCK,
127 		    (void *)arg, rprivchk ? cr : NULL, zuniqid, 1));
128 	case CT_ENEXT:
129 		if (copyin((void *)arg, &id, sizeof (uint64_t)))
130 			return (EFAULT);
131 		return (cte_next_event(&endpt->ctfs_endpt_listener, id));
132 	case CT_ERELIABLE:
133 		return (cte_set_reliable(&endpt->ctfs_endpt_listener, cr));
134 	default:
135 		return (EINVAL);
136 	}
137 
138 	return (0);
139 }
140 
141 /*
142  * ctfs_endpoint_poll
143  *
144  * Called by the VOP_POLL entry points.
145  */
146 static int
ctfs_endpoint_poll(ctfs_endpoint_t * endpt,short events,int anyyet,short * reventsp,pollhead_t ** php)147 ctfs_endpoint_poll(ctfs_endpoint_t *endpt, short events, int anyyet,
148     short *reventsp, pollhead_t **php)
149 {
150 	if ((events & POLLIN) && endpt->ctfs_endpt_listener.ctl_position) {
151 		*reventsp = POLLIN;
152 	} else {
153 		*reventsp = 0;
154 		if (!anyyet)
155 			*php = &endpt->ctfs_endpt_listener.ctl_pollhead;
156 	}
157 
158 	return (0);
159 }
160 
161 /*
162  * ctfs_create_evnode
163  *
164  * Creates and returns a new evnode.
165  */
166 vnode_t *
ctfs_create_evnode(vnode_t * pvp)167 ctfs_create_evnode(vnode_t *pvp)
168 {
169 	vnode_t *vp;
170 	ctfs_evnode_t *evnode;
171 	ctfs_cdirnode_t *cdirnode = pvp->v_data;
172 
173 	vp = gfs_file_create(sizeof (ctfs_evnode_t), pvp, ctfs_ops_event);
174 	evnode = vp->v_data;
175 
176 	/*
177 	 * We transitively have a hold on the contract through our
178 	 * parent directory.
179 	 */
180 	evnode->ctfs_ev_contract = cdirnode->ctfs_cn_contract;
181 
182 	return (vp);
183 }
184 
185 /*
186  * ctfs_ev_access - VOP_ACCESS entry point
187  *
188  * You only get to access event files for contracts you or your
189  * effective user id owns, unless you have a privilege.
190  */
191 /*ARGSUSED*/
192 static int
ctfs_ev_access(vnode_t * vp,int mode,int flags,cred_t * cr,caller_context_t * cct)193 ctfs_ev_access(
194 	vnode_t *vp,
195 	int mode,
196 	int flags,
197 	cred_t *cr,
198 	caller_context_t *cct)
199 {
200 	ctfs_evnode_t *evnode = vp->v_data;
201 	contract_t *ct = evnode->ctfs_ev_contract;
202 	int error;
203 
204 	if (mode & (VWRITE | VEXEC))
205 		return (EACCES);
206 
207 	if (error = secpolicy_contract_observer(cr, ct))
208 		return (error);
209 
210 	return (0);
211 }
212 
213 /*
214  * ctfs_ev_open - VOP_OPEN entry point
215  *
216  * Performs the same privilege checks as ctfs_ev_access, and then calls
217  * ctfs_endpoint_open to perform the common endpoint initialization.
218  */
219 /* ARGSUSED */
220 static int
ctfs_ev_open(vnode_t ** vpp,int flag,cred_t * cr,caller_context_t * cct)221 ctfs_ev_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *cct)
222 {
223 	ctfs_evnode_t *evnode = (*vpp)->v_data;
224 	contract_t *ct = evnode->ctfs_ev_contract;
225 	int error;
226 
227 	if (error = secpolicy_contract_observer(cr, ct))
228 		return (error);
229 
230 	/*
231 	 * See comment in ctfs_bu_open.
232 	 */
233 	return (ctfs_endpoint_open(&evnode->ctfs_ev_listener,
234 	    &evnode->ctfs_ev_contract->ct_events, flag));
235 }
236 
237 /*
238  * ctfs_ev_inactive - VOP_INACTIVE entry point
239  */
240 /* ARGSUSED */
241 static void
ctfs_ev_inactive(vnode_t * vp,cred_t * cr,caller_context_t * ct)242 ctfs_ev_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
243 {
244 	ctfs_evnode_t *evnode;
245 	vnode_t *pvp = gfs_file_parent(vp);
246 
247 	/*
248 	 * We must destroy the endpoint before releasing the parent; otherwise
249 	 * we will try to destroy a contract with active listeners.  To prevent
250 	 * this, we grab an extra hold on the parent.
251 	 */
252 	VN_HOLD(pvp);
253 	if ((evnode = gfs_file_inactive(vp)) != NULL) {
254 		ctfs_endpoint_inactive(&evnode->ctfs_ev_listener);
255 		kmem_free(evnode, sizeof (ctfs_evnode_t));
256 	}
257 	VN_RELE(pvp);
258 }
259 
260 /*
261  * ctfs_ev_getattr - VOP_GETATTR entry point
262  */
263 /* ARGSUSED */
264 static int
ctfs_ev_getattr(vnode_t * vp,vattr_t * vap,int flags,cred_t * cr,caller_context_t * ct)265 ctfs_ev_getattr(
266 	vnode_t *vp,
267 	vattr_t *vap,
268 	int flags,
269 	cred_t *cr,
270 	caller_context_t *ct)
271 {
272 	ctfs_evnode_t *evnode = vp->v_data;
273 
274 	vap->va_type = VREG;
275 	vap->va_mode = 0444;
276 	vap->va_nlink = 1;
277 	vap->va_size = 0;
278 	vap->va_ctime = evnode->ctfs_ev_contract->ct_ctime;
279 	mutex_enter(&evnode->ctfs_ev_contract->ct_events.ctq_lock);
280 	vap->va_atime = vap->va_mtime =
281 	    evnode->ctfs_ev_contract->ct_events.ctq_atime;
282 	mutex_exit(&evnode->ctfs_ev_contract->ct_events.ctq_lock);
283 	ctfs_common_getattr(vp, vap);
284 
285 	return (0);
286 }
287 
288 /*
289  * ctfs_ev_ioctl - VOP_IOCTL entry point
290  */
291 /* ARGSUSED */
292 static int
ctfs_ev_ioctl(vnode_t * vp,int cmd,intptr_t arg,int flag,cred_t * cr,int * rvalp,caller_context_t * ct)293 ctfs_ev_ioctl(
294 	vnode_t *vp,
295 	int cmd,
296 	intptr_t arg,
297 	int flag,
298 	cred_t *cr,
299 	int *rvalp,
300 	caller_context_t *ct)
301 {
302 	ctfs_evnode_t *evnode = vp->v_data;
303 
304 	return (ctfs_endpoint_ioctl(&evnode->ctfs_ev_listener, cmd, arg, cr,
305 	    VTOZONE(vp), 0));
306 }
307 
308 /*
309  * ctfs_ev_poll - VOP_POLL entry point
310  */
311 /*ARGSUSED*/
312 static int
ctfs_ev_poll(vnode_t * vp,short events,int anyyet,short * reventsp,pollhead_t ** php,caller_context_t * ct)313 ctfs_ev_poll(
314 	vnode_t *vp,
315 	short events,
316 	int anyyet,
317 	short *reventsp,
318 	pollhead_t **php,
319 	caller_context_t *ct)
320 {
321 	ctfs_evnode_t *evnode = vp->v_data;
322 
323 	return (ctfs_endpoint_poll(&evnode->ctfs_ev_listener, events, anyyet,
324 	    reventsp, php));
325 }
326 
327 const fs_operation_def_t ctfs_tops_event[] = {
328 	{ VOPNAME_OPEN,		{ .vop_open = ctfs_ev_open } },
329 	{ VOPNAME_CLOSE,	{ .vop_close = ctfs_close } },
330 	{ VOPNAME_IOCTL,	{ .vop_ioctl = ctfs_ev_ioctl } },
331 	{ VOPNAME_GETATTR,	{ .vop_getattr = ctfs_ev_getattr } },
332 	{ VOPNAME_ACCESS,	{ .vop_access = ctfs_ev_access } },
333 	{ VOPNAME_READDIR,	{ .error = fs_notdir } },
334 	{ VOPNAME_LOOKUP,	{ .error = fs_notdir } },
335 	{ VOPNAME_INACTIVE,	{ .vop_inactive = ctfs_ev_inactive } },
336 	{ VOPNAME_POLL,		{ .vop_poll = ctfs_ev_poll } },
337 	{ NULL, NULL }
338 };
339 
340 /*
341  * ctfs_create_pbundle
342  *
343  * Creates and returns a bunode for a /system/contract/<type>/pbundle
344  * file.
345  */
346 vnode_t *
ctfs_create_pbundle(vnode_t * pvp)347 ctfs_create_pbundle(vnode_t *pvp)
348 {
349 	vnode_t *vp;
350 	ctfs_bunode_t *bundle;
351 
352 	vp = gfs_file_create(sizeof (ctfs_bunode_t), pvp, ctfs_ops_bundle);
353 	bundle = vp->v_data;
354 	bundle->ctfs_bu_queue =
355 	    contract_type_pbundle(ct_types[gfs_file_index(pvp)], curproc);
356 
357 	return (vp);
358 }
359 
360 /*
361  * ctfs_create_bundle
362  *
363  * Creates and returns a bunode for a /system/contract/<type>/bundle
364  * file.
365  */
366 vnode_t *
ctfs_create_bundle(vnode_t * pvp)367 ctfs_create_bundle(vnode_t *pvp)
368 {
369 	vnode_t *vp;
370 	ctfs_bunode_t *bundle;
371 
372 	vp = gfs_file_create(sizeof (ctfs_bunode_t), pvp, ctfs_ops_bundle);
373 	bundle = vp->v_data;
374 	bundle->ctfs_bu_queue =
375 	    contract_type_bundle(ct_types[gfs_file_index(pvp)]);
376 
377 	return (vp);
378 }
379 
380 /*
381  * ctfs_bu_open - VOP_OPEN entry point
382  */
383 /* ARGSUSED */
384 static int
ctfs_bu_open(vnode_t ** vpp,int flag,cred_t * cr,caller_context_t * ct)385 ctfs_bu_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
386 {
387 	ctfs_bunode_t *bunode = (*vpp)->v_data;
388 
389 	/*
390 	 * This assumes we are only ever called immediately after a
391 	 * VOP_LOOKUP.  We could clone ourselves here, but doing so
392 	 * would make /proc/pid/fd accesses less useful.
393 	 */
394 	return (ctfs_endpoint_open(&bunode->ctfs_bu_listener,
395 	    bunode->ctfs_bu_queue, flag));
396 }
397 
398 /*
399  * ctfs_bu_inactive - VOP_INACTIVE entry point
400  */
401 /* ARGSUSED */
402 static void
ctfs_bu_inactive(vnode_t * vp,cred_t * cr,caller_context_t * ct)403 ctfs_bu_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
404 {
405 	ctfs_bunode_t *bunode;
406 	vnode_t *pvp = gfs_file_parent(vp);
407 
408 	/*
409 	 * See comments in ctfs_ev_inactive() above.
410 	 */
411 	VN_HOLD(pvp);
412 	if ((bunode = gfs_file_inactive(vp)) != NULL) {
413 		ctfs_endpoint_inactive(&bunode->ctfs_bu_listener);
414 		kmem_free(bunode, sizeof (ctfs_bunode_t));
415 	}
416 	VN_RELE(pvp);
417 }
418 
419 /*
420  * ctfs_bu_getattr - VOP_GETATTR entry point
421  */
422 /* ARGSUSED */
423 static int
ctfs_bu_getattr(vnode_t * vp,vattr_t * vap,int flags,cred_t * cr,caller_context_t * ct)424 ctfs_bu_getattr(
425 	vnode_t *vp,
426 	vattr_t *vap,
427 	int flags,
428 	cred_t *cr,
429 	caller_context_t *ct)
430 {
431 	ctfs_bunode_t *bunode = vp->v_data;
432 
433 	vap->va_type = VREG;
434 	vap->va_mode = 0444;
435 	vap->va_nodeid = gfs_file_index(vp);
436 	vap->va_nlink = 1;
437 	vap->va_size = 0;
438 	vap->va_ctime.tv_sec = vp->v_vfsp->vfs_mtime;
439 	vap->va_ctime.tv_nsec = 0;
440 	mutex_enter(&bunode->ctfs_bu_queue->ctq_lock);
441 	vap->va_mtime = vap->va_atime = bunode->ctfs_bu_queue->ctq_atime;
442 	mutex_exit(&bunode->ctfs_bu_queue->ctq_lock);
443 	ctfs_common_getattr(vp, vap);
444 
445 	return (0);
446 }
447 
448 /*
449  * ctfs_bu_ioctl - VOP_IOCTL entry point
450  */
451 /* ARGSUSED */
452 static int
ctfs_bu_ioctl(vnode_t * vp,int cmd,intptr_t arg,int flag,cred_t * cr,int * rvalp,caller_context_t * ct)453 ctfs_bu_ioctl(
454 	vnode_t *vp,
455 	int cmd,
456 	intptr_t arg,
457 	int flag,
458 	cred_t *cr,
459 	int *rvalp,
460 	caller_context_t *ct)
461 {
462 	ctfs_bunode_t *bunode = vp->v_data;
463 
464 	return (ctfs_endpoint_ioctl(&bunode->ctfs_bu_listener, cmd, arg, cr,
465 	    VTOZONE(vp), bunode->ctfs_bu_queue->ctq_listno == CTEL_BUNDLE));
466 }
467 
468 /*
469  * ctfs_bu_poll - VOP_POLL entry point
470  */
471 /*ARGSUSED*/
472 static int
ctfs_bu_poll(vnode_t * vp,short events,int anyyet,short * reventsp,pollhead_t ** php,caller_context_t * ct)473 ctfs_bu_poll(
474 	vnode_t *vp,
475 	short events,
476 	int anyyet,
477 	short *reventsp,
478 	pollhead_t **php,
479 	caller_context_t *ct)
480 {
481 	ctfs_bunode_t *bunode = vp->v_data;
482 
483 	return (ctfs_endpoint_poll(&bunode->ctfs_bu_listener, events, anyyet,
484 	    reventsp, php));
485 }
486 
487 const fs_operation_def_t ctfs_tops_bundle[] = {
488 	{ VOPNAME_OPEN,		{ .vop_open = ctfs_bu_open } },
489 	{ VOPNAME_CLOSE,	{ .vop_close = ctfs_close } },
490 	{ VOPNAME_IOCTL,	{ .vop_ioctl = ctfs_bu_ioctl } },
491 	{ VOPNAME_GETATTR,	{ .vop_getattr = ctfs_bu_getattr } },
492 	{ VOPNAME_ACCESS,	{ .vop_access = ctfs_access_readonly } },
493 	{ VOPNAME_READDIR,	{ .error = fs_notdir } },
494 	{ VOPNAME_LOOKUP,	{ .error = fs_notdir } },
495 	{ VOPNAME_INACTIVE,	{ .vop_inactive = ctfs_bu_inactive } },
496 	{ VOPNAME_POLL,		{ .vop_poll = ctfs_bu_poll } },
497 	{ NULL, NULL }
498 };
499