1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Management Component Transport Protocol (MCTP)
4 *
5 * Copyright (c) 2021 Code Construct
6 * Copyright (c) 2021 Google
7 */
8
9 #include <linux/compat.h>
10 #include <linux/if_arp.h>
11 #include <linux/net.h>
12 #include <linux/mctp.h>
13 #include <linux/module.h>
14 #include <linux/socket.h>
15
16 #include <net/mctp.h>
17 #include <net/mctpdevice.h>
18 #include <net/sock.h>
19
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/mctp.h>
22
23 /* socket implementation */
24
25 static void mctp_sk_expire_keys(struct timer_list *timer);
26
mctp_release(struct socket * sock)27 static int mctp_release(struct socket *sock)
28 {
29 struct sock *sk = sock->sk;
30
31 if (sk) {
32 sock->sk = NULL;
33 sk->sk_prot->close(sk, 0);
34 }
35
36 return 0;
37 }
38
39 /* Generic sockaddr checks, padding checks only so far */
mctp_sockaddr_is_ok(const struct sockaddr_mctp * addr)40 static bool mctp_sockaddr_is_ok(const struct sockaddr_mctp *addr)
41 {
42 return !addr->__smctp_pad0 && !addr->__smctp_pad1;
43 }
44
mctp_sockaddr_ext_is_ok(const struct sockaddr_mctp_ext * addr)45 static bool mctp_sockaddr_ext_is_ok(const struct sockaddr_mctp_ext *addr)
46 {
47 return !addr->__smctp_pad0[0] &&
48 !addr->__smctp_pad0[1] &&
49 !addr->__smctp_pad0[2];
50 }
51
mctp_bind(struct socket * sock,struct sockaddr * addr,int addrlen)52 static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
53 {
54 struct sock *sk = sock->sk;
55 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
56 struct sockaddr_mctp *smctp;
57 int rc;
58
59 if (addrlen < sizeof(*smctp))
60 return -EINVAL;
61
62 if (addr->sa_family != AF_MCTP)
63 return -EAFNOSUPPORT;
64
65 if (!capable(CAP_NET_BIND_SERVICE))
66 return -EACCES;
67
68 /* it's a valid sockaddr for MCTP, cast and do protocol checks */
69 smctp = (struct sockaddr_mctp *)addr;
70
71 if (!mctp_sockaddr_is_ok(smctp))
72 return -EINVAL;
73
74 lock_sock(sk);
75
76 /* TODO: allow rebind */
77 if (sk_hashed(sk)) {
78 rc = -EADDRINUSE;
79 goto out_release;
80 }
81 msk->bind_net = smctp->smctp_network;
82 msk->bind_addr = smctp->smctp_addr.s_addr;
83 msk->bind_type = smctp->smctp_type & 0x7f; /* ignore the IC bit */
84
85 rc = sk->sk_prot->hash(sk);
86
87 out_release:
88 release_sock(sk);
89
90 return rc;
91 }
92
mctp_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)93 static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
94 {
95 DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
96 int rc, addrlen = msg->msg_namelen;
97 struct sock *sk = sock->sk;
98 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
99 struct mctp_skb_cb *cb;
100 struct mctp_route *rt;
101 struct sk_buff *skb = NULL;
102 int hlen;
103
104 if (addr) {
105 const u8 tagbits = MCTP_TAG_MASK | MCTP_TAG_OWNER |
106 MCTP_TAG_PREALLOC;
107
108 if (addrlen < sizeof(struct sockaddr_mctp))
109 return -EINVAL;
110 if (addr->smctp_family != AF_MCTP)
111 return -EINVAL;
112 if (!mctp_sockaddr_is_ok(addr))
113 return -EINVAL;
114 if (addr->smctp_tag & ~tagbits)
115 return -EINVAL;
116 /* can't preallocate a non-owned tag */
117 if (addr->smctp_tag & MCTP_TAG_PREALLOC &&
118 !(addr->smctp_tag & MCTP_TAG_OWNER))
119 return -EINVAL;
120
121 } else {
122 /* TODO: connect()ed sockets */
123 return -EDESTADDRREQ;
124 }
125
126 if (!capable(CAP_NET_RAW))
127 return -EACCES;
128
129 if (addr->smctp_network == MCTP_NET_ANY)
130 addr->smctp_network = mctp_default_net(sock_net(sk));
131
132 /* direct addressing */
133 if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
134 DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
135 extaddr, msg->msg_name);
136 struct net_device *dev;
137
138 rc = -EINVAL;
139 rcu_read_lock();
140 dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex);
141 /* check for correct halen */
142 if (dev && extaddr->smctp_halen == dev->addr_len) {
143 hlen = LL_RESERVED_SPACE(dev) + sizeof(struct mctp_hdr);
144 rc = 0;
145 }
146 rcu_read_unlock();
147 if (rc)
148 goto err_free;
149 rt = NULL;
150 } else {
151 rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
152 addr->smctp_addr.s_addr);
153 if (!rt) {
154 rc = -EHOSTUNREACH;
155 goto err_free;
156 }
157 hlen = LL_RESERVED_SPACE(rt->dev->dev) + sizeof(struct mctp_hdr);
158 }
159
160 skb = sock_alloc_send_skb(sk, hlen + 1 + len,
161 msg->msg_flags & MSG_DONTWAIT, &rc);
162 if (!skb)
163 return rc;
164
165 skb_reserve(skb, hlen);
166
167 /* set type as fist byte in payload */
168 *(u8 *)skb_put(skb, 1) = addr->smctp_type;
169
170 rc = memcpy_from_msg((void *)skb_put(skb, len), msg, len);
171 if (rc < 0)
172 goto err_free;
173
174 /* set up cb */
175 cb = __mctp_cb(skb);
176 cb->net = addr->smctp_network;
177
178 if (!rt) {
179 /* fill extended address in cb */
180 DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
181 extaddr, msg->msg_name);
182
183 if (!mctp_sockaddr_ext_is_ok(extaddr) ||
184 extaddr->smctp_halen > sizeof(cb->haddr)) {
185 rc = -EINVAL;
186 goto err_free;
187 }
188
189 cb->ifindex = extaddr->smctp_ifindex;
190 /* smctp_halen is checked above */
191 cb->halen = extaddr->smctp_halen;
192 memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
193 }
194
195 rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
196 addr->smctp_tag);
197
198 return rc ? : len;
199
200 err_free:
201 kfree_skb(skb);
202 return rc;
203 }
204
mctp_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)205 static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
206 int flags)
207 {
208 DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
209 struct sock *sk = sock->sk;
210 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
211 struct sk_buff *skb;
212 size_t msglen;
213 u8 type;
214 int rc;
215
216 if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
217 return -EOPNOTSUPP;
218
219 skb = skb_recv_datagram(sk, flags, &rc);
220 if (!skb)
221 return rc;
222
223 if (!skb->len) {
224 rc = 0;
225 goto out_free;
226 }
227
228 /* extract message type, remove from data */
229 type = *((u8 *)skb->data);
230 msglen = skb->len - 1;
231
232 if (len < msglen)
233 msg->msg_flags |= MSG_TRUNC;
234 else
235 len = msglen;
236
237 rc = skb_copy_datagram_msg(skb, 1, msg, len);
238 if (rc < 0)
239 goto out_free;
240
241 sock_recv_cmsgs(msg, sk, skb);
242
243 if (addr) {
244 struct mctp_skb_cb *cb = mctp_cb(skb);
245 /* TODO: expand mctp_skb_cb for header fields? */
246 struct mctp_hdr *hdr = mctp_hdr(skb);
247
248 addr = msg->msg_name;
249 addr->smctp_family = AF_MCTP;
250 addr->__smctp_pad0 = 0;
251 addr->smctp_network = cb->net;
252 addr->smctp_addr.s_addr = hdr->src;
253 addr->smctp_type = type;
254 addr->smctp_tag = hdr->flags_seq_tag &
255 (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
256 addr->__smctp_pad1 = 0;
257 msg->msg_namelen = sizeof(*addr);
258
259 if (msk->addr_ext) {
260 DECLARE_SOCKADDR(struct sockaddr_mctp_ext *, ae,
261 msg->msg_name);
262 msg->msg_namelen = sizeof(*ae);
263 ae->smctp_ifindex = cb->ifindex;
264 ae->smctp_halen = cb->halen;
265 memset(ae->__smctp_pad0, 0x0, sizeof(ae->__smctp_pad0));
266 memset(ae->smctp_haddr, 0x0, sizeof(ae->smctp_haddr));
267 memcpy(ae->smctp_haddr, cb->haddr, cb->halen);
268 }
269 }
270
271 rc = len;
272
273 if (flags & MSG_TRUNC)
274 rc = msglen;
275
276 out_free:
277 skb_free_datagram(sk, skb);
278 return rc;
279 }
280
281 /* We're done with the key; invalidate, stop reassembly, and remove from lists.
282 */
__mctp_key_remove(struct mctp_sk_key * key,struct net * net,unsigned long flags,unsigned long reason)283 static void __mctp_key_remove(struct mctp_sk_key *key, struct net *net,
284 unsigned long flags, unsigned long reason)
285 __releases(&key->lock)
286 __must_hold(&net->mctp.keys_lock)
287 {
288 struct sk_buff *skb;
289
290 trace_mctp_key_release(key, reason);
291 skb = key->reasm_head;
292 key->reasm_head = NULL;
293 key->reasm_dead = true;
294 key->valid = false;
295 mctp_dev_release_key(key->dev, key);
296 spin_unlock_irqrestore(&key->lock, flags);
297
298 if (!hlist_unhashed(&key->hlist)) {
299 hlist_del_init(&key->hlist);
300 hlist_del_init(&key->sklist);
301 /* unref for the lists */
302 mctp_key_unref(key);
303 }
304
305 kfree_skb(skb);
306 }
307
mctp_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)308 static int mctp_setsockopt(struct socket *sock, int level, int optname,
309 sockptr_t optval, unsigned int optlen)
310 {
311 struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
312 int val;
313
314 if (level != SOL_MCTP)
315 return -EINVAL;
316
317 if (optname == MCTP_OPT_ADDR_EXT) {
318 if (optlen != sizeof(int))
319 return -EINVAL;
320 if (copy_from_sockptr(&val, optval, sizeof(int)))
321 return -EFAULT;
322 msk->addr_ext = val;
323 return 0;
324 }
325
326 return -ENOPROTOOPT;
327 }
328
mctp_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)329 static int mctp_getsockopt(struct socket *sock, int level, int optname,
330 char __user *optval, int __user *optlen)
331 {
332 struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
333 int len, val;
334
335 if (level != SOL_MCTP)
336 return -EINVAL;
337
338 if (get_user(len, optlen))
339 return -EFAULT;
340
341 if (optname == MCTP_OPT_ADDR_EXT) {
342 if (len != sizeof(int))
343 return -EINVAL;
344 val = !!msk->addr_ext;
345 if (copy_to_user(optval, &val, len))
346 return -EFAULT;
347 return 0;
348 }
349
350 return -EINVAL;
351 }
352
353 /* helpers for reading/writing the tag ioc, handling compatibility across the
354 * two versions, and some basic API error checking
355 */
mctp_ioctl_tag_copy_from_user(unsigned long arg,struct mctp_ioc_tag_ctl2 * ctl,bool tagv2)356 static int mctp_ioctl_tag_copy_from_user(unsigned long arg,
357 struct mctp_ioc_tag_ctl2 *ctl,
358 bool tagv2)
359 {
360 struct mctp_ioc_tag_ctl ctl_compat;
361 unsigned long size;
362 void *ptr;
363 int rc;
364
365 if (tagv2) {
366 size = sizeof(*ctl);
367 ptr = ctl;
368 } else {
369 size = sizeof(ctl_compat);
370 ptr = &ctl_compat;
371 }
372
373 rc = copy_from_user(ptr, (void __user *)arg, size);
374 if (rc)
375 return -EFAULT;
376
377 if (!tagv2) {
378 /* compat, using defaults for new fields */
379 ctl->net = MCTP_INITIAL_DEFAULT_NET;
380 ctl->peer_addr = ctl_compat.peer_addr;
381 ctl->local_addr = MCTP_ADDR_ANY;
382 ctl->flags = ctl_compat.flags;
383 ctl->tag = ctl_compat.tag;
384 }
385
386 if (ctl->flags)
387 return -EINVAL;
388
389 if (ctl->local_addr != MCTP_ADDR_ANY &&
390 ctl->local_addr != MCTP_ADDR_NULL)
391 return -EINVAL;
392
393 return 0;
394 }
395
mctp_ioctl_tag_copy_to_user(unsigned long arg,struct mctp_ioc_tag_ctl2 * ctl,bool tagv2)396 static int mctp_ioctl_tag_copy_to_user(unsigned long arg,
397 struct mctp_ioc_tag_ctl2 *ctl,
398 bool tagv2)
399 {
400 struct mctp_ioc_tag_ctl ctl_compat;
401 unsigned long size;
402 void *ptr;
403 int rc;
404
405 if (tagv2) {
406 ptr = ctl;
407 size = sizeof(*ctl);
408 } else {
409 ctl_compat.peer_addr = ctl->peer_addr;
410 ctl_compat.tag = ctl->tag;
411 ctl_compat.flags = ctl->flags;
412
413 ptr = &ctl_compat;
414 size = sizeof(ctl_compat);
415 }
416
417 rc = copy_to_user((void __user *)arg, ptr, size);
418 if (rc)
419 return -EFAULT;
420
421 return 0;
422 }
423
mctp_ioctl_alloctag(struct mctp_sock * msk,bool tagv2,unsigned long arg)424 static int mctp_ioctl_alloctag(struct mctp_sock *msk, bool tagv2,
425 unsigned long arg)
426 {
427 struct net *net = sock_net(&msk->sk);
428 struct mctp_sk_key *key = NULL;
429 struct mctp_ioc_tag_ctl2 ctl;
430 unsigned long flags;
431 u8 tag;
432 int rc;
433
434 rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
435 if (rc)
436 return rc;
437
438 if (ctl.tag)
439 return -EINVAL;
440
441 key = mctp_alloc_local_tag(msk, ctl.net, MCTP_ADDR_ANY,
442 ctl.peer_addr, true, &tag);
443 if (IS_ERR(key))
444 return PTR_ERR(key);
445
446 ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC;
447 rc = mctp_ioctl_tag_copy_to_user(arg, &ctl, tagv2);
448 if (rc) {
449 unsigned long fl2;
450 /* Unwind our key allocation: the keys list lock needs to be
451 * taken before the individual key locks, and we need a valid
452 * flags value (fl2) to pass to __mctp_key_remove, hence the
453 * second spin_lock_irqsave() rather than a plain spin_lock().
454 */
455 spin_lock_irqsave(&net->mctp.keys_lock, flags);
456 spin_lock_irqsave(&key->lock, fl2);
457 __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED);
458 mctp_key_unref(key);
459 spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
460 return rc;
461 }
462
463 mctp_key_unref(key);
464 return 0;
465 }
466
mctp_ioctl_droptag(struct mctp_sock * msk,bool tagv2,unsigned long arg)467 static int mctp_ioctl_droptag(struct mctp_sock *msk, bool tagv2,
468 unsigned long arg)
469 {
470 struct net *net = sock_net(&msk->sk);
471 struct mctp_ioc_tag_ctl2 ctl;
472 unsigned long flags, fl2;
473 struct mctp_sk_key *key;
474 struct hlist_node *tmp;
475 int rc;
476 u8 tag;
477
478 rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
479 if (rc)
480 return rc;
481
482 /* Must be a local tag, TO set, preallocated */
483 if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC))
484 return -EINVAL;
485
486 tag = ctl.tag & MCTP_TAG_MASK;
487 rc = -EINVAL;
488
489 if (ctl.peer_addr == MCTP_ADDR_NULL)
490 ctl.peer_addr = MCTP_ADDR_ANY;
491
492 spin_lock_irqsave(&net->mctp.keys_lock, flags);
493 hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
494 /* we do an irqsave here, even though we know the irq state,
495 * so we have the flags to pass to __mctp_key_remove
496 */
497 spin_lock_irqsave(&key->lock, fl2);
498 if (key->manual_alloc &&
499 ctl.net == key->net &&
500 ctl.peer_addr == key->peer_addr &&
501 tag == key->tag) {
502 __mctp_key_remove(key, net, fl2,
503 MCTP_TRACE_KEY_DROPPED);
504 rc = 0;
505 } else {
506 spin_unlock_irqrestore(&key->lock, fl2);
507 }
508 }
509 spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
510
511 return rc;
512 }
513
mctp_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)514 static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
515 {
516 struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
517 bool tagv2 = false;
518
519 switch (cmd) {
520 case SIOCMCTPALLOCTAG2:
521 case SIOCMCTPALLOCTAG:
522 tagv2 = cmd == SIOCMCTPALLOCTAG2;
523 return mctp_ioctl_alloctag(msk, tagv2, arg);
524 case SIOCMCTPDROPTAG:
525 case SIOCMCTPDROPTAG2:
526 tagv2 = cmd == SIOCMCTPDROPTAG2;
527 return mctp_ioctl_droptag(msk, tagv2, arg);
528 }
529
530 return -EINVAL;
531 }
532
533 #ifdef CONFIG_COMPAT
mctp_compat_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)534 static int mctp_compat_ioctl(struct socket *sock, unsigned int cmd,
535 unsigned long arg)
536 {
537 void __user *argp = compat_ptr(arg);
538
539 switch (cmd) {
540 /* These have compatible ptr layouts */
541 case SIOCMCTPALLOCTAG:
542 case SIOCMCTPDROPTAG:
543 return mctp_ioctl(sock, cmd, (unsigned long)argp);
544 }
545
546 return -ENOIOCTLCMD;
547 }
548 #endif
549
550 static const struct proto_ops mctp_dgram_ops = {
551 .family = PF_MCTP,
552 .release = mctp_release,
553 .bind = mctp_bind,
554 .connect = sock_no_connect,
555 .socketpair = sock_no_socketpair,
556 .accept = sock_no_accept,
557 .getname = sock_no_getname,
558 .poll = datagram_poll,
559 .ioctl = mctp_ioctl,
560 .gettstamp = sock_gettstamp,
561 .listen = sock_no_listen,
562 .shutdown = sock_no_shutdown,
563 .setsockopt = mctp_setsockopt,
564 .getsockopt = mctp_getsockopt,
565 .sendmsg = mctp_sendmsg,
566 .recvmsg = mctp_recvmsg,
567 .mmap = sock_no_mmap,
568 #ifdef CONFIG_COMPAT
569 .compat_ioctl = mctp_compat_ioctl,
570 #endif
571 };
572
mctp_sk_expire_keys(struct timer_list * timer)573 static void mctp_sk_expire_keys(struct timer_list *timer)
574 {
575 struct mctp_sock *msk = container_of(timer, struct mctp_sock,
576 key_expiry);
577 struct net *net = sock_net(&msk->sk);
578 unsigned long next_expiry, flags, fl2;
579 struct mctp_sk_key *key;
580 struct hlist_node *tmp;
581 bool next_expiry_valid = false;
582
583 spin_lock_irqsave(&net->mctp.keys_lock, flags);
584
585 hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
586 /* don't expire. manual_alloc is immutable, no locking
587 * required.
588 */
589 if (key->manual_alloc)
590 continue;
591
592 spin_lock_irqsave(&key->lock, fl2);
593 if (!time_after_eq(key->expiry, jiffies)) {
594 __mctp_key_remove(key, net, fl2,
595 MCTP_TRACE_KEY_TIMEOUT);
596 continue;
597 }
598
599 if (next_expiry_valid) {
600 if (time_before(key->expiry, next_expiry))
601 next_expiry = key->expiry;
602 } else {
603 next_expiry = key->expiry;
604 next_expiry_valid = true;
605 }
606 spin_unlock_irqrestore(&key->lock, fl2);
607 }
608
609 spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
610
611 if (next_expiry_valid)
612 mod_timer(timer, next_expiry);
613 }
614
mctp_sk_init(struct sock * sk)615 static int mctp_sk_init(struct sock *sk)
616 {
617 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
618
619 INIT_HLIST_HEAD(&msk->keys);
620 timer_setup(&msk->key_expiry, mctp_sk_expire_keys, 0);
621 return 0;
622 }
623
mctp_sk_close(struct sock * sk,long timeout)624 static void mctp_sk_close(struct sock *sk, long timeout)
625 {
626 sk_common_release(sk);
627 }
628
mctp_sk_hash(struct sock * sk)629 static int mctp_sk_hash(struct sock *sk)
630 {
631 struct net *net = sock_net(sk);
632
633 mutex_lock(&net->mctp.bind_lock);
634 sk_add_node_rcu(sk, &net->mctp.binds);
635 mutex_unlock(&net->mctp.bind_lock);
636
637 return 0;
638 }
639
mctp_sk_unhash(struct sock * sk)640 static void mctp_sk_unhash(struct sock *sk)
641 {
642 struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
643 struct net *net = sock_net(sk);
644 unsigned long flags, fl2;
645 struct mctp_sk_key *key;
646 struct hlist_node *tmp;
647
648 /* remove from any type-based binds */
649 mutex_lock(&net->mctp.bind_lock);
650 sk_del_node_init_rcu(sk);
651 mutex_unlock(&net->mctp.bind_lock);
652
653 /* remove tag allocations */
654 spin_lock_irqsave(&net->mctp.keys_lock, flags);
655 hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
656 spin_lock_irqsave(&key->lock, fl2);
657 __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
658 }
659 sock_set_flag(sk, SOCK_DEAD);
660 spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
661
662 /* Since there are no more tag allocations (we have removed all of the
663 * keys), stop any pending expiry events. the timer cannot be re-queued
664 * as the sk is no longer observable
665 */
666 del_timer_sync(&msk->key_expiry);
667 }
668
mctp_sk_destruct(struct sock * sk)669 static void mctp_sk_destruct(struct sock *sk)
670 {
671 skb_queue_purge(&sk->sk_receive_queue);
672 }
673
674 static struct proto mctp_proto = {
675 .name = "MCTP",
676 .owner = THIS_MODULE,
677 .obj_size = sizeof(struct mctp_sock),
678 .init = mctp_sk_init,
679 .close = mctp_sk_close,
680 .hash = mctp_sk_hash,
681 .unhash = mctp_sk_unhash,
682 };
683
mctp_pf_create(struct net * net,struct socket * sock,int protocol,int kern)684 static int mctp_pf_create(struct net *net, struct socket *sock,
685 int protocol, int kern)
686 {
687 const struct proto_ops *ops;
688 struct proto *proto;
689 struct sock *sk;
690 int rc;
691
692 if (protocol)
693 return -EPROTONOSUPPORT;
694
695 /* only datagram sockets are supported */
696 if (sock->type != SOCK_DGRAM)
697 return -ESOCKTNOSUPPORT;
698
699 proto = &mctp_proto;
700 ops = &mctp_dgram_ops;
701
702 sock->state = SS_UNCONNECTED;
703 sock->ops = ops;
704
705 sk = sk_alloc(net, PF_MCTP, GFP_KERNEL, proto, kern);
706 if (!sk)
707 return -ENOMEM;
708
709 sock_init_data(sock, sk);
710 sk->sk_destruct = mctp_sk_destruct;
711
712 rc = 0;
713 if (sk->sk_prot->init)
714 rc = sk->sk_prot->init(sk);
715
716 if (rc)
717 goto err_sk_put;
718
719 return 0;
720
721 err_sk_put:
722 sock_orphan(sk);
723 sock_put(sk);
724 return rc;
725 }
726
727 static struct net_proto_family mctp_pf = {
728 .family = PF_MCTP,
729 .create = mctp_pf_create,
730 .owner = THIS_MODULE,
731 };
732
mctp_init(void)733 static __init int mctp_init(void)
734 {
735 int rc;
736
737 /* ensure our uapi tag definitions match the header format */
738 BUILD_BUG_ON(MCTP_TAG_OWNER != MCTP_HDR_FLAG_TO);
739 BUILD_BUG_ON(MCTP_TAG_MASK != MCTP_HDR_TAG_MASK);
740
741 pr_info("mctp: management component transport protocol core\n");
742
743 rc = sock_register(&mctp_pf);
744 if (rc)
745 return rc;
746
747 rc = proto_register(&mctp_proto, 0);
748 if (rc)
749 goto err_unreg_sock;
750
751 rc = mctp_routes_init();
752 if (rc)
753 goto err_unreg_proto;
754
755 rc = mctp_neigh_init();
756 if (rc)
757 goto err_unreg_routes;
758
759 rc = mctp_device_init();
760 if (rc)
761 goto err_unreg_neigh;
762
763 return 0;
764
765 err_unreg_neigh:
766 mctp_neigh_exit();
767 err_unreg_routes:
768 mctp_routes_exit();
769 err_unreg_proto:
770 proto_unregister(&mctp_proto);
771 err_unreg_sock:
772 sock_unregister(PF_MCTP);
773
774 return rc;
775 }
776
mctp_exit(void)777 static __exit void mctp_exit(void)
778 {
779 mctp_device_exit();
780 mctp_neigh_exit();
781 mctp_routes_exit();
782 proto_unregister(&mctp_proto);
783 sock_unregister(PF_MCTP);
784 }
785
786 subsys_initcall(mctp_init);
787 module_exit(mctp_exit);
788
789 MODULE_DESCRIPTION("MCTP core");
790 MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
791
792 MODULE_ALIAS_NETPROTO(PF_MCTP);
793