1 /* 2 * cn_proc.c - process events connector 3 * 4 * Copyright (C) Matt Helsley, IBM Corp. 2005 5 * Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net> 6 * Original copyright notice follows: 7 * Copyright (C) 2005 BULL SA. 8 * 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 */ 24 25 #include <linux/module.h> 26 #include <linux/kernel.h> 27 #include <linux/ktime.h> 28 #include <linux/init.h> 29 #include <linux/connector.h> 30 #include <asm/atomic.h> 31 32 #include <linux/cn_proc.h> 33 34 #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) 35 36 static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); 37 static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; 38 39 /* proc_event_counts is used as the sequence number of the netlink message */ 40 static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; 41 42 static inline void get_seq(__u32 *ts, int *cpu) 43 { 44 *ts = get_cpu_var(proc_event_counts)++; 45 *cpu = smp_processor_id(); 46 put_cpu_var(proc_event_counts); 47 } 48 49 void proc_fork_connector(struct task_struct *task) 50 { 51 struct cn_msg *msg; 52 struct proc_event *ev; 53 __u8 buffer[CN_PROC_MSG_SIZE]; 54 55 if (atomic_read(&proc_event_num_listeners) < 1) 56 return; 57 58 msg = (struct cn_msg*)buffer; 59 ev = (struct proc_event*)msg->data; 60 get_seq(&msg->seq, &ev->cpu); 61 ktime_get_ts(&ev->timestamp); /* get high res monotonic timestamp */ 62 ev->what = PROC_EVENT_FORK; 63 ev->event_data.fork.parent_pid = task->real_parent->pid; 64 ev->event_data.fork.parent_tgid = task->real_parent->tgid; 65 ev->event_data.fork.child_pid = task->pid; 66 ev->event_data.fork.child_tgid = task->tgid; 67 68 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 69 msg->ack = 0; /* not used */ 70 msg->len = sizeof(*ev); 71 /* If cn_netlink_send() failed, the data is not sent */ 72 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 73 } 74 75 void proc_exec_connector(struct task_struct *task) 76 { 77 struct cn_msg *msg; 78 struct proc_event *ev; 79 __u8 buffer[CN_PROC_MSG_SIZE]; 80 81 if (atomic_read(&proc_event_num_listeners) < 1) 82 return; 83 84 msg = (struct cn_msg*)buffer; 85 ev = (struct proc_event*)msg->data; 86 get_seq(&msg->seq, &ev->cpu); 87 ktime_get_ts(&ev->timestamp); 88 ev->what = PROC_EVENT_EXEC; 89 ev->event_data.exec.process_pid = task->pid; 90 ev->event_data.exec.process_tgid = task->tgid; 91 92 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 93 msg->ack = 0; /* not used */ 94 msg->len = sizeof(*ev); 95 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 96 } 97 98 void proc_id_connector(struct task_struct *task, int which_id) 99 { 100 struct cn_msg *msg; 101 struct proc_event *ev; 102 __u8 buffer[CN_PROC_MSG_SIZE]; 103 104 if (atomic_read(&proc_event_num_listeners) < 1) 105 return; 106 107 msg = (struct cn_msg*)buffer; 108 ev = (struct proc_event*)msg->data; 109 ev->what = which_id; 110 ev->event_data.id.process_pid = task->pid; 111 ev->event_data.id.process_tgid = task->tgid; 112 if (which_id == PROC_EVENT_UID) { 113 ev->event_data.id.r.ruid = task->uid; 114 ev->event_data.id.e.euid = task->euid; 115 } else if (which_id == PROC_EVENT_GID) { 116 ev->event_data.id.r.rgid = task->gid; 117 ev->event_data.id.e.egid = task->egid; 118 } else 119 return; 120 get_seq(&msg->seq, &ev->cpu); 121 ktime_get_ts(&ev->timestamp); 122 123 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 124 msg->ack = 0; /* not used */ 125 msg->len = sizeof(*ev); 126 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 127 } 128 129 void proc_exit_connector(struct task_struct *task) 130 { 131 struct cn_msg *msg; 132 struct proc_event *ev; 133 __u8 buffer[CN_PROC_MSG_SIZE]; 134 135 if (atomic_read(&proc_event_num_listeners) < 1) 136 return; 137 138 msg = (struct cn_msg*)buffer; 139 ev = (struct proc_event*)msg->data; 140 get_seq(&msg->seq, &ev->cpu); 141 ktime_get_ts(&ev->timestamp); 142 ev->what = PROC_EVENT_EXIT; 143 ev->event_data.exit.process_pid = task->pid; 144 ev->event_data.exit.process_tgid = task->tgid; 145 ev->event_data.exit.exit_code = task->exit_code; 146 ev->event_data.exit.exit_signal = task->exit_signal; 147 148 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 149 msg->ack = 0; /* not used */ 150 msg->len = sizeof(*ev); 151 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 152 } 153 154 /* 155 * Send an acknowledgement message to userspace 156 * 157 * Use 0 for success, EFOO otherwise. 158 * Note: this is the negative of conventional kernel error 159 * values because it's not being returned via syscall return 160 * mechanisms. 161 */ 162 static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) 163 { 164 struct cn_msg *msg; 165 struct proc_event *ev; 166 __u8 buffer[CN_PROC_MSG_SIZE]; 167 168 if (atomic_read(&proc_event_num_listeners) < 1) 169 return; 170 171 msg = (struct cn_msg*)buffer; 172 ev = (struct proc_event*)msg->data; 173 msg->seq = rcvd_seq; 174 ktime_get_ts(&ev->timestamp); 175 ev->cpu = -1; 176 ev->what = PROC_EVENT_NONE; 177 ev->event_data.ack.err = err; 178 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 179 msg->ack = rcvd_ack + 1; 180 msg->len = sizeof(*ev); 181 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 182 } 183 184 /** 185 * cn_proc_mcast_ctl 186 * @data: message sent from userspace via the connector 187 */ 188 static void cn_proc_mcast_ctl(void *data) 189 { 190 struct cn_msg *msg = data; 191 enum proc_cn_mcast_op *mc_op = NULL; 192 int err = 0; 193 194 if (msg->len != sizeof(*mc_op)) 195 return; 196 197 mc_op = (enum proc_cn_mcast_op*)msg->data; 198 switch (*mc_op) { 199 case PROC_CN_MCAST_LISTEN: 200 atomic_inc(&proc_event_num_listeners); 201 break; 202 case PROC_CN_MCAST_IGNORE: 203 atomic_dec(&proc_event_num_listeners); 204 break; 205 default: 206 err = EINVAL; 207 break; 208 } 209 cn_proc_ack(err, msg->seq, msg->ack); 210 } 211 212 /* 213 * cn_proc_init - initialization entry point 214 * 215 * Adds the connector callback to the connector driver. 216 */ 217 static int __init cn_proc_init(void) 218 { 219 int err; 220 221 if ((err = cn_add_callback(&cn_proc_event_id, "cn_proc", 222 &cn_proc_mcast_ctl))) { 223 printk(KERN_WARNING "cn_proc failed to register\n"); 224 return err; 225 } 226 return 0; 227 } 228 229 module_init(cn_proc_init); 230