1 /* 2 * cn_proc.c - process events connector 3 * 4 * Copyright (C) Matt Helsley, IBM Corp. 2005 5 * Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net> 6 * Original copyright notice follows: 7 * Copyright (C) 2005 BULL SA. 8 * 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25 #include <linux/module.h> 26 #include <linux/kernel.h> 27 #include <linux/ktime.h> 28 #include <linux/init.h> 29 #include <asm/atomic.h> 30 31 #include <linux/cn_proc.h> 32 33 #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) 34 35 static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); 36 static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; 37 38 /* proc_event_counts is used as the sequence number of the netlink message */ 39 static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; 40 41 static inline void get_seq(__u32 *ts, int *cpu) 42 { 43 *ts = get_cpu_var(proc_event_counts)++; 44 *cpu = smp_processor_id(); 45 put_cpu_var(proc_event_counts); 46 } 47 48 void proc_fork_connector(struct task_struct *task) 49 { 50 struct cn_msg *msg; 51 struct proc_event *ev; 52 __u8 buffer[CN_PROC_MSG_SIZE]; 53 54 if (atomic_read(&proc_event_num_listeners) < 1) 55 return; 56 57 msg = (struct cn_msg*)buffer; 58 ev = (struct proc_event*)msg->data; 59 get_seq(&msg->seq, &ev->cpu); 60 ktime_get_ts(&ev->timestamp); /* get high res monotonic timestamp */ 61 ev->what = PROC_EVENT_FORK; 62 ev->event_data.fork.parent_pid = task->real_parent->pid; 63 ev->event_data.fork.parent_tgid = task->real_parent->tgid; 64 ev->event_data.fork.child_pid = task->pid; 65 ev->event_data.fork.child_tgid = task->tgid; 66 67 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 68 msg->ack = 0; /* not used */ 69 msg->len = sizeof(*ev); 70 /* If cn_netlink_send() failed, the data is not sent */ 71 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 72 } 73 74 void proc_exec_connector(struct task_struct *task) 75 { 76 struct cn_msg *msg; 77 struct proc_event *ev; 78 __u8 buffer[CN_PROC_MSG_SIZE]; 79 80 if (atomic_read(&proc_event_num_listeners) < 1) 81 return; 82 83 msg = (struct cn_msg*)buffer; 84 ev = (struct proc_event*)msg->data; 85 get_seq(&msg->seq, &ev->cpu); 86 ktime_get_ts(&ev->timestamp); 87 ev->what = PROC_EVENT_EXEC; 88 ev->event_data.exec.process_pid = task->pid; 89 ev->event_data.exec.process_tgid = task->tgid; 90 91 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 92 msg->ack = 0; /* not used */ 93 msg->len = sizeof(*ev); 94 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 95 } 96 97 void proc_id_connector(struct task_struct *task, int which_id) 98 { 99 struct cn_msg *msg; 100 struct proc_event *ev; 101 __u8 buffer[CN_PROC_MSG_SIZE]; 102 103 if (atomic_read(&proc_event_num_listeners) < 1) 104 return; 105 106 msg = (struct cn_msg*)buffer; 107 ev = (struct proc_event*)msg->data; 108 ev->what = which_id; 109 ev->event_data.id.process_pid = task->pid; 110 ev->event_data.id.process_tgid = task->tgid; 111 if (which_id == PROC_EVENT_UID) { 112 ev->event_data.id.r.ruid = task->uid; 113 ev->event_data.id.e.euid = task->euid; 114 } else if (which_id == PROC_EVENT_GID) { 115 ev->event_data.id.r.rgid = task->gid; 116 ev->event_data.id.e.egid = task->egid; 117 } else 118 return; 119 get_seq(&msg->seq, &ev->cpu); 120 ktime_get_ts(&ev->timestamp); 121 122 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 123 msg->ack = 0; /* not used */ 124 msg->len = sizeof(*ev); 125 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 126 } 127 128 void proc_exit_connector(struct task_struct *task) 129 { 130 struct cn_msg *msg; 131 struct proc_event *ev; 132 __u8 buffer[CN_PROC_MSG_SIZE]; 133 134 if (atomic_read(&proc_event_num_listeners) < 1) 135 return; 136 137 msg = (struct cn_msg*)buffer; 138 ev = (struct proc_event*)msg->data; 139 get_seq(&msg->seq, &ev->cpu); 140 ktime_get_ts(&ev->timestamp); 141 ev->what = PROC_EVENT_EXIT; 142 ev->event_data.exit.process_pid = task->pid; 143 ev->event_data.exit.process_tgid = task->tgid; 144 ev->event_data.exit.exit_code = task->exit_code; 145 ev->event_data.exit.exit_signal = task->exit_signal; 146 147 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 148 msg->ack = 0; /* not used */ 149 msg->len = sizeof(*ev); 150 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 151 } 152 153 /* 154 * Send an acknowledgement message to userspace 155 * 156 * Use 0 for success, EFOO otherwise. 157 * Note: this is the negative of conventional kernel error 158 * values because it's not being returned via syscall return 159 * mechanisms. 160 */ 161 static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) 162 { 163 struct cn_msg *msg; 164 struct proc_event *ev; 165 __u8 buffer[CN_PROC_MSG_SIZE]; 166 167 if (atomic_read(&proc_event_num_listeners) < 1) 168 return; 169 170 msg = (struct cn_msg*)buffer; 171 ev = (struct proc_event*)msg->data; 172 msg->seq = rcvd_seq; 173 ktime_get_ts(&ev->timestamp); 174 ev->cpu = -1; 175 ev->what = PROC_EVENT_NONE; 176 ev->event_data.ack.err = err; 177 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 178 msg->ack = rcvd_ack + 1; 179 msg->len = sizeof(*ev); 180 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 181 } 182 183 /** 184 * cn_proc_mcast_ctl 185 * @data: message sent from userspace via the connector 186 */ 187 static void cn_proc_mcast_ctl(void *data) 188 { 189 struct cn_msg *msg = data; 190 enum proc_cn_mcast_op *mc_op = NULL; 191 int err = 0; 192 193 if (msg->len != sizeof(*mc_op)) 194 return; 195 196 mc_op = (enum proc_cn_mcast_op*)msg->data; 197 switch (*mc_op) { 198 case PROC_CN_MCAST_LISTEN: 199 atomic_inc(&proc_event_num_listeners); 200 break; 201 case PROC_CN_MCAST_IGNORE: 202 atomic_dec(&proc_event_num_listeners); 203 break; 204 default: 205 err = EINVAL; 206 break; 207 } 208 cn_proc_ack(err, msg->seq, msg->ack); 209 } 210 211 /* 212 * cn_proc_init - initialization entry point 213 * 214 * Adds the connector callback to the connector driver. 215 */ 216 static int __init cn_proc_init(void) 217 { 218 int err; 219 220 if ((err = cn_add_callback(&cn_proc_event_id, "cn_proc", 221 &cn_proc_mcast_ctl))) { 222 printk(KERN_WARNING "cn_proc failed to register\n"); 223 return err; 224 } 225 return 0; 226 } 227 228 module_init(cn_proc_init); 229