1 /* 2 * connector.c 3 * 4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 5 * All rights reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/list.h> 25 #include <linux/skbuff.h> 26 #include <linux/netlink.h> 27 #include <linux/moduleparam.h> 28 #include <linux/connector.h> 29 #include <linux/mutex.h> 30 31 #include <net/sock.h> 32 33 MODULE_LICENSE("GPL"); 34 MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 35 MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); 36 37 static u32 cn_idx = CN_IDX_CONNECTOR; 38 static u32 cn_val = CN_VAL_CONNECTOR; 39 40 module_param(cn_idx, uint, 0); 41 module_param(cn_val, uint, 0); 42 MODULE_PARM_DESC(cn_idx, "Connector's main device idx."); 43 MODULE_PARM_DESC(cn_val, "Connector's main device val."); 44 45 static DEFINE_MUTEX(notify_lock); 46 static LIST_HEAD(notify_list); 47 48 static struct cn_dev cdev; 49 50 int cn_already_initialized = 0; 51 52 /* 53 * msg->seq and msg->ack are used to determine message genealogy. 54 * When someone sends message it puts there locally unique sequence 55 * and random acknowledge numbers. Sequence number may be copied into 56 * nlmsghdr->nlmsg_seq too. 57 * 58 * Sequence number is incremented with each message to be sent. 59 * 60 * If we expect reply to our message then the sequence number in 61 * received message MUST be the same as in original message, and 62 * acknowledge number MUST be the same + 1. 63 * 64 * If we receive a message and its sequence number is not equal to the 65 * one we are expecting then it is a new message. 66 * 67 * If we receive a message and its sequence number is the same as one 68 * we are expecting but it's acknowledgement number is not equal to 69 * the acknowledgement number in the original message + 1, then it is 70 * a new message. 71 * 72 */ 73 int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) 74 { 75 struct cn_callback_entry *__cbq; 76 unsigned int size; 77 struct sk_buff *skb; 78 struct nlmsghdr *nlh; 79 struct cn_msg *data; 80 struct cn_dev *dev = &cdev; 81 u32 group = 0; 82 int found = 0; 83 84 if (!__group) { 85 spin_lock_bh(&dev->cbdev->queue_lock); 86 list_for_each_entry(__cbq, &dev->cbdev->queue_list, 87 callback_entry) { 88 if (cn_cb_equal(&__cbq->id.id, &msg->id)) { 89 found = 1; 90 group = __cbq->group; 91 } 92 } 93 spin_unlock_bh(&dev->cbdev->queue_lock); 94 95 if (!found) 96 return -ENODEV; 97 } else { 98 group = __group; 99 } 100 101 if (!netlink_has_listeners(dev->nls, group)) 102 return -ESRCH; 103 104 size = NLMSG_SPACE(sizeof(*msg) + msg->len); 105 106 skb = alloc_skb(size, gfp_mask); 107 if (!skb) 108 return -ENOMEM; 109 110 nlh = NLMSG_PUT(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh)); 111 112 data = NLMSG_DATA(nlh); 113 114 memcpy(data, msg, sizeof(*data) + msg->len); 115 116 NETLINK_CB(skb).dst_group = group; 117 118 return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask); 119 120 nlmsg_failure: 121 kfree_skb(skb); 122 return -EINVAL; 123 } 124 EXPORT_SYMBOL_GPL(cn_netlink_send); 125 126 /* 127 * Callback helper - queues work and setup destructor for given data. 128 */ 129 static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data) 130 { 131 struct cn_callback_entry *__cbq, *__new_cbq; 132 struct cn_dev *dev = &cdev; 133 int err = -ENODEV; 134 135 spin_lock_bh(&dev->cbdev->queue_lock); 136 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { 137 if (cn_cb_equal(&__cbq->id.id, &msg->id)) { 138 if (likely(!work_pending(&__cbq->work) && 139 __cbq->data.ddata == NULL)) { 140 __cbq->data.callback_priv = msg; 141 142 __cbq->data.ddata = data; 143 __cbq->data.destruct_data = destruct_data; 144 145 if (queue_work(dev->cbdev->cn_queue, 146 &__cbq->work)) 147 err = 0; 148 } else { 149 struct cn_callback_data *d; 150 151 err = -ENOMEM; 152 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); 153 if (__new_cbq) { 154 d = &__new_cbq->data; 155 d->callback_priv = msg; 156 d->callback = __cbq->data.callback; 157 d->ddata = data; 158 d->destruct_data = destruct_data; 159 d->free = __new_cbq; 160 161 INIT_WORK(&__new_cbq->work, 162 &cn_queue_wrapper); 163 164 if (queue_work(dev->cbdev->cn_queue, 165 &__new_cbq->work)) 166 err = 0; 167 else { 168 kfree(__new_cbq); 169 err = -EINVAL; 170 } 171 } 172 } 173 break; 174 } 175 } 176 spin_unlock_bh(&dev->cbdev->queue_lock); 177 178 return err; 179 } 180 181 /* 182 * Skb receive helper - checks skb and msg size and calls callback 183 * helper. 184 */ 185 static int __cn_rx_skb(struct sk_buff *skb, struct nlmsghdr *nlh) 186 { 187 u32 pid, uid, seq, group; 188 struct cn_msg *msg; 189 190 pid = NETLINK_CREDS(skb)->pid; 191 uid = NETLINK_CREDS(skb)->uid; 192 seq = nlh->nlmsg_seq; 193 group = NETLINK_CB((skb)).dst_group; 194 msg = NLMSG_DATA(nlh); 195 196 return cn_call_callback(msg, (void (*)(void *))kfree_skb, skb); 197 } 198 199 /* 200 * Main netlink receiving function. 201 * 202 * It checks skb and netlink header sizes and calls the skb receive 203 * helper with a shared skb. 204 */ 205 static void cn_rx_skb(struct sk_buff *__skb) 206 { 207 struct nlmsghdr *nlh; 208 u32 len; 209 int err; 210 struct sk_buff *skb; 211 212 skb = skb_get(__skb); 213 214 if (skb->len >= NLMSG_SPACE(0)) { 215 nlh = nlmsg_hdr(skb); 216 217 if (nlh->nlmsg_len < sizeof(struct cn_msg) || 218 skb->len < nlh->nlmsg_len || 219 nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) { 220 kfree_skb(skb); 221 goto out; 222 } 223 224 len = NLMSG_ALIGN(nlh->nlmsg_len); 225 if (len > skb->len) 226 len = skb->len; 227 228 err = __cn_rx_skb(skb, nlh); 229 if (err < 0) 230 kfree_skb(skb); 231 } 232 233 out: 234 kfree_skb(__skb); 235 } 236 237 /* 238 * Notification routing. 239 * 240 * Gets id and checks if there are notification request for it's idx 241 * and val. If there are such requests notify the listeners with the 242 * given notify event. 243 * 244 */ 245 static void cn_notify(struct cb_id *id, u32 notify_event) 246 { 247 struct cn_ctl_entry *ent; 248 249 mutex_lock(¬ify_lock); 250 list_for_each_entry(ent, ¬ify_list, notify_entry) { 251 int i; 252 struct cn_notify_req *req; 253 struct cn_ctl_msg *ctl = ent->msg; 254 int idx_found, val_found; 255 256 idx_found = val_found = 0; 257 258 req = (struct cn_notify_req *)ctl->data; 259 for (i = 0; i < ctl->idx_notify_num; ++i, ++req) { 260 if (id->idx >= req->first && 261 id->idx < req->first + req->range) { 262 idx_found = 1; 263 break; 264 } 265 } 266 267 for (i = 0; i < ctl->val_notify_num; ++i, ++req) { 268 if (id->val >= req->first && 269 id->val < req->first + req->range) { 270 val_found = 1; 271 break; 272 } 273 } 274 275 if (idx_found && val_found) { 276 struct cn_msg m = { .ack = notify_event, }; 277 278 memcpy(&m.id, id, sizeof(m.id)); 279 cn_netlink_send(&m, ctl->group, GFP_KERNEL); 280 } 281 } 282 mutex_unlock(¬ify_lock); 283 } 284 285 /* 286 * Callback add routing - adds callback with given ID and name. 287 * If there is registered callback with the same ID it will not be added. 288 * 289 * May sleep. 290 */ 291 int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *)) 292 { 293 int err; 294 struct cn_dev *dev = &cdev; 295 296 if (!cn_already_initialized) 297 return -EAGAIN; 298 299 err = cn_queue_add_callback(dev->cbdev, name, id, callback); 300 if (err) 301 return err; 302 303 cn_notify(id, 0); 304 305 return 0; 306 } 307 EXPORT_SYMBOL_GPL(cn_add_callback); 308 309 /* 310 * Callback remove routing - removes callback 311 * with given ID. 312 * If there is no registered callback with given 313 * ID nothing happens. 314 * 315 * May sleep while waiting for reference counter to become zero. 316 */ 317 void cn_del_callback(struct cb_id *id) 318 { 319 struct cn_dev *dev = &cdev; 320 321 cn_queue_del_callback(dev->cbdev, id); 322 cn_notify(id, 1); 323 } 324 EXPORT_SYMBOL_GPL(cn_del_callback); 325 326 /* 327 * Checks two connector's control messages to be the same. 328 * Returns 1 if they are the same or if the first one is corrupted. 329 */ 330 static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2) 331 { 332 int i; 333 struct cn_notify_req *req1, *req2; 334 335 if (m1->idx_notify_num != m2->idx_notify_num) 336 return 0; 337 338 if (m1->val_notify_num != m2->val_notify_num) 339 return 0; 340 341 if (m1->len != m2->len) 342 return 0; 343 344 if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) != 345 m1->len) 346 return 1; 347 348 req1 = (struct cn_notify_req *)m1->data; 349 req2 = (struct cn_notify_req *)m2->data; 350 351 for (i = 0; i < m1->idx_notify_num; ++i) { 352 if (req1->first != req2->first || req1->range != req2->range) 353 return 0; 354 req1++; 355 req2++; 356 } 357 358 for (i = 0; i < m1->val_notify_num; ++i) { 359 if (req1->first != req2->first || req1->range != req2->range) 360 return 0; 361 req1++; 362 req2++; 363 } 364 365 return 1; 366 } 367 368 /* 369 * Main connector device's callback. 370 * 371 * Used for notification of a request's processing. 372 */ 373 static void cn_callback(void *data) 374 { 375 struct cn_msg *msg = data; 376 struct cn_ctl_msg *ctl; 377 struct cn_ctl_entry *ent; 378 u32 size; 379 380 if (msg->len < sizeof(*ctl)) 381 return; 382 383 ctl = (struct cn_ctl_msg *)msg->data; 384 385 size = (sizeof(*ctl) + ((ctl->idx_notify_num + 386 ctl->val_notify_num) * 387 sizeof(struct cn_notify_req))); 388 389 if (msg->len != size) 390 return; 391 392 if (ctl->len + sizeof(*ctl) != msg->len) 393 return; 394 395 /* 396 * Remove notification. 397 */ 398 if (ctl->group == 0) { 399 struct cn_ctl_entry *n; 400 401 mutex_lock(¬ify_lock); 402 list_for_each_entry_safe(ent, n, ¬ify_list, notify_entry) { 403 if (cn_ctl_msg_equals(ent->msg, ctl)) { 404 list_del(&ent->notify_entry); 405 kfree(ent); 406 } 407 } 408 mutex_unlock(¬ify_lock); 409 410 return; 411 } 412 413 size += sizeof(*ent); 414 415 ent = kzalloc(size, GFP_KERNEL); 416 if (!ent) 417 return; 418 419 ent->msg = (struct cn_ctl_msg *)(ent + 1); 420 421 memcpy(ent->msg, ctl, size - sizeof(*ent)); 422 423 mutex_lock(¬ify_lock); 424 list_add(&ent->notify_entry, ¬ify_list); 425 mutex_unlock(¬ify_lock); 426 } 427 428 static int __devinit cn_init(void) 429 { 430 struct cn_dev *dev = &cdev; 431 int err; 432 433 dev->input = cn_rx_skb; 434 dev->id.idx = cn_idx; 435 dev->id.val = cn_val; 436 437 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, 438 CN_NETLINK_USERS + 0xf, 439 dev->input, NULL, THIS_MODULE); 440 if (!dev->nls) 441 return -EIO; 442 443 dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls); 444 if (!dev->cbdev) { 445 if (dev->nls->sk_socket) 446 sock_release(dev->nls->sk_socket); 447 return -EINVAL; 448 } 449 450 cn_already_initialized = 1; 451 452 err = cn_add_callback(&dev->id, "connector", &cn_callback); 453 if (err) { 454 cn_already_initialized = 0; 455 cn_queue_free_dev(dev->cbdev); 456 if (dev->nls->sk_socket) 457 sock_release(dev->nls->sk_socket); 458 return -EINVAL; 459 } 460 461 return 0; 462 } 463 464 static void __devexit cn_fini(void) 465 { 466 struct cn_dev *dev = &cdev; 467 468 cn_already_initialized = 0; 469 470 cn_del_callback(&dev->id); 471 cn_queue_free_dev(dev->cbdev); 472 if (dev->nls->sk_socket) 473 sock_release(dev->nls->sk_socket); 474 } 475 476 subsys_initcall(cn_init); 477 module_exit(cn_fini); 478