Lines Matching +full:mctp +full:- +full:handling

1 // SPDX-License-Identifier: GPL-2.0
3 * Management Component Transport Protocol (MCTP) - routing
15 #include <linux/mctp.h>
22 #include <net/mctp.h>
27 #include <trace/events/mctp.h>
50 /* TODO: look up in skb->cb? */ in mctp_lookup_bind()
56 type = (*(u8 *)skb->data) & 0x7f; in mctp_lookup_bind()
58 sk_for_each_rcu(sk, &net->mctp.binds) { in mctp_lookup_bind()
61 if (msk->bind_net != MCTP_NET_ANY && msk->bind_net != cb->net) in mctp_lookup_bind()
64 if (msk->bind_type != type) in mctp_lookup_bind()
67 if (!mctp_address_matches(msk->bind_addr, mh->dest)) in mctp_lookup_bind()
78 * struct net->mctp.keys contains our set of currently-allocated keys for
79 * MCTP tag management. The lookup tuple for these is the peer EID,
80 * local EID and MCTP tag.
85 * a key with (local = local-eid, peer = ANY). This allows a match on the
91 * - when a packet is sent, with a locally-owned tag: we need to find an
94 * - when a tag is manually allocated: we need to find an unused tag value
98 * (local = ANY, peer = peer-eid).
113 if (key->net != net) in mctp_key_match()
116 if (!mctp_address_matches(key->local_addr, local)) in mctp_key_match()
119 if (!mctp_address_matches(key->peer_addr, peer)) in mctp_key_match()
122 if (key->tag != tag) in mctp_key_match()
128 /* returns a key (with key->lock held, and refcounted), or NULL if no such
134 __acquires(&key->lock) in mctp_lookup_key()
142 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); in mctp_lookup_key()
145 spin_lock_irqsave(&net->mctp.keys_lock, flags); in mctp_lookup_key()
147 hlist_for_each_entry(key, &net->mctp.keys, hlist) { in mctp_lookup_key()
148 if (!mctp_key_match(key, netid, mh->dest, peer, tag)) in mctp_lookup_key()
151 spin_lock(&key->lock); in mctp_lookup_key()
152 if (key->valid) { in mctp_lookup_key()
153 refcount_inc(&key->refs); in mctp_lookup_key()
157 spin_unlock(&key->lock); in mctp_lookup_key()
161 spin_unlock(&net->mctp.keys_lock); in mctp_lookup_key()
164 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); in mctp_lookup_key()
181 key->net = net; in mctp_key_alloc()
182 key->peer_addr = peer; in mctp_key_alloc()
183 key->local_addr = local; in mctp_key_alloc()
184 key->tag = tag; in mctp_key_alloc()
185 key->sk = &msk->sk; in mctp_key_alloc()
186 key->valid = true; in mctp_key_alloc()
187 spin_lock_init(&key->lock); in mctp_key_alloc()
188 refcount_set(&key->refs, 1); in mctp_key_alloc()
189 sock_hold(key->sk); in mctp_key_alloc()
198 if (!refcount_dec_and_test(&key->refs)) in mctp_key_unref()
204 spin_lock_irqsave(&key->lock, flags); in mctp_key_unref()
205 mctp_dev_release_key(key->dev, key); in mctp_key_unref()
206 spin_unlock_irqrestore(&key->lock, flags); in mctp_key_unref()
208 sock_put(key->sk); in mctp_key_unref()
214 struct net *net = sock_net(&msk->sk); in mctp_key_add()
219 spin_lock_irqsave(&net->mctp.keys_lock, flags); in mctp_key_add()
221 if (sock_flag(&msk->sk, SOCK_DEAD)) { in mctp_key_add()
222 rc = -EINVAL; in mctp_key_add()
226 hlist_for_each_entry(tmp, &net->mctp.keys, hlist) { in mctp_key_add()
227 if (mctp_key_match(tmp, key->net, key->local_addr, in mctp_key_add()
228 key->peer_addr, key->tag)) { in mctp_key_add()
229 spin_lock(&tmp->lock); in mctp_key_add()
230 if (tmp->valid) in mctp_key_add()
231 rc = -EEXIST; in mctp_key_add()
232 spin_unlock(&tmp->lock); in mctp_key_add()
239 refcount_inc(&key->refs); in mctp_key_add()
240 key->expiry = jiffies + mctp_key_lifetime; in mctp_key_add()
241 timer_reduce(&msk->key_expiry, key->expiry); in mctp_key_add()
243 hlist_add_head(&key->hlist, &net->mctp.keys); in mctp_key_add()
244 hlist_add_head(&key->sklist, &msk->keys); in mctp_key_add()
248 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); in mctp_key_add()
261 __releases(&key->lock) in __mctp_key_done_in()
266 skb = key->reasm_head; in __mctp_key_done_in()
267 key->reasm_head = NULL; in __mctp_key_done_in()
269 if (!key->manual_alloc) { in __mctp_key_done_in()
270 key->reasm_dead = true; in __mctp_key_done_in()
271 key->valid = false; in __mctp_key_done_in()
272 mctp_dev_release_key(key->dev, key); in __mctp_key_done_in()
274 spin_unlock_irqrestore(&key->lock, flags); in __mctp_key_done_in()
276 if (!key->manual_alloc) { in __mctp_key_done_in()
277 spin_lock_irqsave(&net->mctp.keys_lock, flags); in __mctp_key_done_in()
278 if (!hlist_unhashed(&key->hlist)) { in __mctp_key_done_in()
279 hlist_del_init(&key->hlist); in __mctp_key_done_in()
280 hlist_del_init(&key->sklist); in __mctp_key_done_in()
283 spin_unlock_irqrestore(&net->mctp.keys_lock, flags); in __mctp_key_done_in()
301 refcount_inc(&key->refs); in mctp_skb_set_flow()
302 flow->key = key; in mctp_skb_set_flow()
314 key = flow->key; in mctp_flow_prepare_output()
316 if (WARN_ON(key->dev && key->dev != dev)) in mctp_flow_prepare_output()
331 this_seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) in mctp_frag_queue()
334 if (!key->reasm_head) { in mctp_frag_queue()
335 key->reasm_head = skb; in mctp_frag_queue()
336 key->reasm_tailp = &(skb_shinfo(skb)->frag_list); in mctp_frag_queue()
337 key->last_seq = this_seq; in mctp_frag_queue()
341 exp_seq = (key->last_seq + 1) & MCTP_HDR_SEQ_MASK; in mctp_frag_queue()
344 return -EINVAL; in mctp_frag_queue()
346 if (key->reasm_head->len + skb->len > mctp_message_maxlen) in mctp_frag_queue()
347 return -EINVAL; in mctp_frag_queue()
349 skb->next = NULL; in mctp_frag_queue()
350 skb->sk = NULL; in mctp_frag_queue()
351 *key->reasm_tailp = skb; in mctp_frag_queue()
352 key->reasm_tailp = &skb->next; in mctp_frag_queue()
354 key->last_seq = this_seq; in mctp_frag_queue()
356 key->reasm_head->data_len += skb->len; in mctp_frag_queue()
357 key->reasm_head->len += skb->len; in mctp_frag_queue()
358 key->reasm_head->truesize += skb->truesize; in mctp_frag_queue()
366 struct net *net = dev_net(skb->dev); in mctp_route_input()
375 rc = -EINVAL; in mctp_route_input()
377 /* We may be receiving a locally-routed packet; drop source sk in mctp_route_input()
380 * From here, we will either queue the skb - either to a frag_queue, or in mctp_route_input()
382 * a non-NULL skb on exit will be otherwise unowned, and hence in mctp_route_input()
383 * kfree_skb()-ed. in mctp_route_input()
388 if (skb->len < sizeof(struct mctp_hdr) + 1) in mctp_route_input()
393 netid = mctp_cb(skb)->net; in mctp_route_input()
396 if (mh->ver != 1) in mctp_route_input()
399 flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM); in mctp_route_input()
400 tag = mh->flags_seq_tag & (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO); in mctp_route_input()
405 * we hold a ref on the key, and key->lock held. in mctp_route_input()
407 key = mctp_lookup_key(net, skb, netid, mh->src, &f); in mctp_route_input()
411 msk = container_of(key->sk, struct mctp_sock, sk); in mctp_route_input()
415 * key for reassembly - we'll create a more specific in mctp_route_input()
418 * this lookup requires key->peer to be MCTP_ADDR_ANY, in mctp_route_input()
419 * it doesn't match just any key->peer. in mctp_route_input()
424 msk = container_of(any_key->sk, in mctp_route_input()
426 spin_unlock_irqrestore(&any_key->lock, f); in mctp_route_input()
434 rc = -ENOENT; in mctp_route_input()
438 /* single-packet message? deliver to socket, clean up any in mctp_route_input()
442 rc = sock_queue_rcv_skb(&msk->sk, skb); in mctp_route_input()
456 /* broadcast response or a bind() - create a key for further in mctp_route_input()
460 key = mctp_key_alloc(msk, netid, mh->dest, mh->src, in mctp_route_input()
463 rc = -ENOMEM; in mctp_route_input()
484 /* we don't need to release key->lock on exit, so in mctp_route_input()
492 if (key->reasm_head || key->reasm_dead) { in mctp_route_input()
496 rc = -EEXIST; in mctp_route_input()
507 * using the message-specific key in mctp_route_input()
511 if (!key->reasm_head) in mctp_route_input()
512 rc = -EINVAL; in mctp_route_input()
526 rc = sock_queue_rcv_skb(key->sk, key->reasm_head); in mctp_route_input()
528 key->reasm_head = NULL; in mctp_route_input()
535 rc = -ENOENT; in mctp_route_input()
541 spin_unlock_irqrestore(&key->lock, f); in mctp_route_input()
553 return rt->mtu ?: READ_ONCE(rt->dev->dev->mtu); in mctp_route_mtu()
565 skb->protocol = htons(ETH_P_MCTP); in mctp_route_output()
567 mtu = READ_ONCE(skb->dev->mtu); in mctp_route_output()
568 if (skb->len > mtu) { in mctp_route_output()
570 return -EMSGSIZE; in mctp_route_output()
573 if (cb->ifindex) { in mctp_route_output()
575 if (cb->halen != skb->dev->addr_len) { in mctp_route_output()
578 return -EMSGSIZE; in mctp_route_output()
580 daddr = cb->haddr; in mctp_route_output()
583 if (mctp_neigh_lookup(route->dev, hdr->dest, daddr_buf) == 0) in mctp_route_output()
587 rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol), in mctp_route_output()
588 daddr, skb->dev->dev_addr, skb->len); in mctp_route_output()
591 return -EHOSTUNREACH; in mctp_route_output()
594 mctp_flow_prepare_output(skb, route->dev); in mctp_route_output()
606 if (refcount_dec_and_test(&rt->refs)) { in mctp_route_release()
607 mctp_dev_put(rt->dev); in mctp_route_release()
621 INIT_LIST_HEAD(&rt->list); in mctp_route_alloc()
622 refcount_set(&rt->refs, 1); in mctp_route_alloc()
623 rt->output = mctp_route_discard; in mctp_route_alloc()
630 return READ_ONCE(net->mctp.default_net); in mctp_default_net()
636 return -EINVAL; in mctp_default_net_set()
637 WRITE_ONCE(net->mctp.default_net, index); in mctp_default_net_set()
645 struct netns_mctp *mns = &net->mctp; in mctp_reserve_tag()
647 lockdep_assert_held(&mns->keys_lock); in mctp_reserve_tag()
649 key->expiry = jiffies + mctp_key_lifetime; in mctp_reserve_tag()
650 timer_reduce(&msk->key_expiry, key->expiry); in mctp_reserve_tag()
652 /* we hold the net->key_lock here, allowing updates to both in mctp_reserve_tag()
655 hlist_add_head_rcu(&key->hlist, &mns->keys); in mctp_reserve_tag()
656 hlist_add_head_rcu(&key->sklist, &msk->keys); in mctp_reserve_tag()
657 refcount_inc(&key->refs); in mctp_reserve_tag()
660 /* Allocate a locally-owned tag value for (local, peer), and reserve
668 struct net *net = sock_net(&msk->sk); in mctp_alloc_local_tag()
669 struct netns_mctp *mns = &net->mctp; in mctp_alloc_local_tag()
681 return ERR_PTR(-ENOMEM); in mctp_alloc_local_tag()
686 spin_lock_irqsave(&mns->keys_lock, flags); in mctp_alloc_local_tag()
691 hlist_for_each_entry(tmp, &mns->keys, hlist) { in mctp_alloc_local_tag()
696 /* tags are net-specific */ in mctp_alloc_local_tag()
697 if (tmp->net != netid) in mctp_alloc_local_tag()
701 if (tmp->tag & MCTP_HDR_FLAG_TO) in mctp_alloc_local_tag()
709 !mctp_address_matches(tmp->peer_addr, peer)) in mctp_alloc_local_tag()
713 !mctp_address_matches(tmp->local_addr, local)) in mctp_alloc_local_tag()
716 spin_lock(&tmp->lock); in mctp_alloc_local_tag()
720 if (tmp->valid) in mctp_alloc_local_tag()
721 tagbits &= ~(1 << tmp->tag); in mctp_alloc_local_tag()
722 spin_unlock(&tmp->lock); in mctp_alloc_local_tag()
729 key->tag = __ffs(tagbits); in mctp_alloc_local_tag()
733 key->manual_alloc = manual; in mctp_alloc_local_tag()
734 *tagp = key->tag; in mctp_alloc_local_tag()
737 spin_unlock_irqrestore(&mns->keys_lock, flags); in mctp_alloc_local_tag()
741 return ERR_PTR(-EBUSY); in mctp_alloc_local_tag()
752 struct net *net = sock_net(&msk->sk); in mctp_lookup_prealloc_tag()
753 struct netns_mctp *mns = &net->mctp; in mctp_lookup_prealloc_tag()
760 spin_lock_irqsave(&mns->keys_lock, flags); in mctp_lookup_prealloc_tag()
762 hlist_for_each_entry(tmp, &mns->keys, hlist) { in mctp_lookup_prealloc_tag()
763 if (tmp->net != netid) in mctp_lookup_prealloc_tag()
766 if (tmp->tag != req_tag) in mctp_lookup_prealloc_tag()
769 if (!mctp_address_matches(tmp->peer_addr, daddr)) in mctp_lookup_prealloc_tag()
772 if (!tmp->manual_alloc) in mctp_lookup_prealloc_tag()
775 spin_lock(&tmp->lock); in mctp_lookup_prealloc_tag()
776 if (tmp->valid) { in mctp_lookup_prealloc_tag()
778 refcount_inc(&key->refs); in mctp_lookup_prealloc_tag()
779 spin_unlock(&tmp->lock); in mctp_lookup_prealloc_tag()
782 spin_unlock(&tmp->lock); in mctp_lookup_prealloc_tag()
784 spin_unlock_irqrestore(&mns->keys_lock, flags); in mctp_lookup_prealloc_tag()
787 return ERR_PTR(-ENOENT); in mctp_lookup_prealloc_tag()
790 *tagp = key->tag; in mctp_lookup_prealloc_tag()
799 return READ_ONCE(rt->dev->net) == net && in mctp_rt_match_eid()
800 rt->min <= eid && rt->max >= eid; in mctp_rt_match_eid()
808 return rt1->dev->net == rt2->dev->net && in mctp_rt_compare_exact()
809 rt1->min == rt2->min && in mctp_rt_compare_exact()
810 rt1->max == rt2->max; in mctp_rt_compare_exact()
820 list_for_each_entry_rcu(tmp, &net->mctp.routes, list) { in mctp_route_lookup()
823 if (refcount_inc_not_zero(&tmp->refs)) { in mctp_route_lookup()
842 list_for_each_entry_rcu(tmp, &net->mctp.routes, list) { in mctp_route_lookup_null()
843 if (tmp->dev->dev == dev && tmp->type == RTN_LOCAL && in mctp_route_lookup_null()
844 refcount_inc_not_zero(&tmp->refs)) { in mctp_route_lookup_null()
871 return -EMSGSIZE; in mctp_do_fragment_route()
880 for (pos = 0; pos < skb->len;) { in mctp_do_fragment_route()
882 size = min(mtu - hlen, skb->len - pos); in mctp_do_fragment_route()
886 rc = -ENOMEM; in mctp_do_fragment_route()
891 skb2->protocol = skb->protocol; in mctp_do_fragment_route()
892 skb2->priority = skb->priority; in mctp_do_fragment_route()
893 skb2->dev = skb->dev; in mctp_do_fragment_route()
894 memcpy(skb2->cb, skb->cb, sizeof(skb2->cb)); in mctp_do_fragment_route()
896 if (skb->sk) in mctp_do_fragment_route()
897 skb_set_owner_w(skb2, skb->sk); in mctp_do_fragment_route()
903 skb2->transport_header = skb2->network_header + hlen; in mctp_do_fragment_route()
907 hdr2->ver = hdr->ver; in mctp_do_fragment_route()
908 hdr2->dest = hdr->dest; in mctp_do_fragment_route()
909 hdr2->src = hdr->src; in mctp_do_fragment_route()
910 hdr2->flags_seq_tag = tag & in mctp_do_fragment_route()
914 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_SOM; in mctp_do_fragment_route()
916 if (pos + size == skb->len) in mctp_do_fragment_route()
917 hdr2->flags_seq_tag |= MCTP_HDR_FLAG_EOM; in mctp_do_fragment_route()
919 hdr2->flags_seq_tag |= seq << MCTP_HDR_SEQ_SHIFT; in mctp_do_fragment_route()
924 /* we need to copy the extensions, for MCTP flow data */ in mctp_do_fragment_route()
928 rc = rt->output(rt, skb2); in mctp_do_fragment_route()
956 rc = -ENODEV; in mctp_local_output()
960 if (WARN_ON(!rt->dev)) in mctp_local_output()
963 } else if (cb->ifindex) { in mctp_local_output()
970 dev = dev_get_by_index_rcu(sock_net(sk), cb->ifindex); in mctp_local_output()
975 rt->dev = __mctp_dev_get(dev); in mctp_local_output()
978 if (!rt->dev) in mctp_local_output()
981 /* establish temporary route - we set up enough to keep in mctp_local_output()
984 rt->output = mctp_route_output; in mctp_local_output()
985 rt->mtu = 0; in mctp_local_output()
988 rc = -EINVAL; in mctp_local_output()
992 spin_lock_irqsave(&rt->dev->addrs_lock, flags); in mctp_local_output()
993 if (rt->dev->num_addrs == 0) { in mctp_local_output()
994 rc = -EHOSTUNREACH; in mctp_local_output()
997 saddr = rt->dev->addrs[0]; in mctp_local_output()
1000 spin_unlock_irqrestore(&rt->dev->addrs_lock, flags); in mctp_local_output()
1001 netid = READ_ONCE(rt->dev->net); in mctp_local_output()
1027 skb->protocol = htons(ETH_P_MCTP); in mctp_local_output()
1028 skb->priority = 0; in mctp_local_output()
1032 skb->dev = rt->dev->dev; in mctp_local_output()
1034 /* cb->net will have been set on initial ingress */ in mctp_local_output()
1035 cb->src = saddr; in mctp_local_output()
1039 hdr->ver = 1; in mctp_local_output()
1040 hdr->dest = daddr; in mctp_local_output()
1041 hdr->src = saddr; in mctp_local_output()
1045 if (skb->len + sizeof(struct mctp_hdr) <= mtu) { in mctp_local_output()
1046 hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM | in mctp_local_output()
1048 rc = rt->output(rt, skb); in mctp_local_output()
1073 struct net *net = dev_net(mdev->dev); in mctp_route_add()
1077 return -EINVAL; in mctp_route_add()
1080 return -EINVAL; in mctp_route_add()
1090 return -EINVAL; in mctp_route_add()
1095 return -ENOMEM; in mctp_route_add()
1097 rt->min = daddr_start; in mctp_route_add()
1098 rt->max = daddr_start + daddr_extent; in mctp_route_add()
1099 rt->mtu = mtu; in mctp_route_add()
1100 rt->dev = mdev; in mctp_route_add()
1101 mctp_dev_hold(rt->dev); in mctp_route_add()
1102 rt->type = type; in mctp_route_add()
1103 rt->output = rtfn; in mctp_route_add()
1107 list_for_each_entry(ert, &net->mctp.routes, list) { in mctp_route_add()
1110 return -EEXIST; in mctp_route_add()
1114 list_add_rcu(&rt->list, &net->mctp.routes); in mctp_route_add()
1122 struct net *net = dev_net(mdev->dev); in mctp_route_remove()
1128 return -EINVAL; in mctp_route_remove()
1135 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { in mctp_route_remove()
1136 if (rt->dev == mdev && in mctp_route_remove()
1137 rt->min == daddr_start && rt->max == daddr_end && in mctp_route_remove()
1138 rt->type == type) { in mctp_route_remove()
1139 list_del_rcu(&rt->list); in mctp_route_remove()
1146 return dropped ? 0 : -ENOENT; in mctp_route_remove()
1162 struct net *net = dev_net(mdev->dev); in mctp_route_remove_dev()
1166 list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { in mctp_route_remove_dev()
1167 if (rt->dev == mdev) { in mctp_route_remove_dev()
1168 list_del_rcu(&rt->list); in mctp_route_remove_dev()
1175 /* Incoming packet-handling */
1191 /* basic non-data sanity checks */ in mctp_pkttype_receive()
1203 if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX) in mctp_pkttype_receive()
1209 if (!(mctp_address_unicast(mh->src) || mctp_address_null(mh->src))) in mctp_pkttype_receive()
1213 if (!(mctp_address_unicast(mh->dest) || mctp_address_null(mh->dest) || in mctp_pkttype_receive()
1214 mctp_address_broadcast(mh->dest))) in mctp_pkttype_receive()
1217 /* MCTP drivers must populate halen/haddr */ in mctp_pkttype_receive()
1218 if (dev->type == ARPHRD_MCTP) { in mctp_pkttype_receive()
1222 cb->halen = 0; in mctp_pkttype_receive()
1224 cb->net = READ_ONCE(mdev->net); in mctp_pkttype_receive()
1225 cb->ifindex = dev->ifindex; in mctp_pkttype_receive()
1227 rt = mctp_route_lookup(net, cb->net, mh->dest); in mctp_pkttype_receive()
1230 if (!rt && mh->dest == MCTP_ADDR_NULL && skb->pkt_type == PACKET_HOST) in mctp_pkttype_receive()
1236 rt->output(rt, skb); in mctp_pkttype_receive()
1269 struct net *net = sock_net(skb->sk); in mctp_route_nlparse()
1283 return -EINVAL; in mctp_route_nlparse()
1289 return -EINVAL; in mctp_route_nlparse()
1294 if ((*rtm)->rtm_family != AF_MCTP) { in mctp_route_nlparse()
1296 return -EINVAL; in mctp_route_nlparse()
1302 return -ENODEV; in mctp_route_nlparse()
1306 return -ENODEV; in mctp_route_nlparse()
1308 if (dev->flags & IFF_LOOPBACK) { in mctp_route_nlparse()
1310 return -EINVAL; in mctp_route_nlparse()
1336 if (rtm->rtm_type != RTN_UNICAST) { in mctp_newroute()
1338 return -EINVAL; in mctp_newroute()
1351 rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu, in mctp_newroute()
1352 rtm->rtm_type); in mctp_newroute()
1371 if (rtm->rtm_type != RTN_UNICAST) in mctp_delroute()
1372 return -EINVAL; in mctp_delroute()
1374 rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST); in mctp_delroute()
1387 return -EMSGSIZE; in mctp_fill_rtinfo()
1390 hdr->rtm_family = AF_MCTP; in mctp_fill_rtinfo()
1395 hdr->rtm_dst_len = rt->max - rt->min; in mctp_fill_rtinfo()
1396 hdr->rtm_src_len = 0; in mctp_fill_rtinfo()
1397 hdr->rtm_tos = 0; in mctp_fill_rtinfo()
1398 hdr->rtm_table = RT_TABLE_DEFAULT; in mctp_fill_rtinfo()
1399 hdr->rtm_protocol = RTPROT_STATIC; /* everything is user-defined */ in mctp_fill_rtinfo()
1400 hdr->rtm_scope = RT_SCOPE_LINK; /* TODO: scope in mctp_route? */ in mctp_fill_rtinfo()
1401 hdr->rtm_type = rt->type; in mctp_fill_rtinfo()
1403 if (nla_put_u8(skb, RTA_DST, rt->min)) in mctp_fill_rtinfo()
1410 if (rt->mtu) { in mctp_fill_rtinfo()
1411 if (nla_put_u32(skb, RTAX_MTU, rt->mtu)) in mctp_fill_rtinfo()
1417 if (rt->dev) { in mctp_fill_rtinfo()
1418 if (nla_put_u32(skb, RTA_OIF, rt->dev->dev->ifindex)) in mctp_fill_rtinfo()
1430 return -EMSGSIZE; in mctp_fill_rtinfo()
1435 struct net *net = sock_net(skb->sk); in mctp_dump_rtinfo()
1440 * cb->strict_check in mctp_dump_rtinfo()
1444 s_idx = cb->args[0]; in mctp_dump_rtinfo()
1448 list_for_each_entry_rcu(rt, &net->mctp.routes, list) { in mctp_dump_rtinfo()
1452 NETLINK_CB(cb->skb).portid, in mctp_dump_rtinfo()
1453 cb->nlh->nlmsg_seq, in mctp_dump_rtinfo()
1459 cb->args[0] = idx; in mctp_dump_rtinfo()
1461 return skb->len; in mctp_dump_rtinfo()
1467 struct netns_mctp *ns = &net->mctp; in mctp_routes_net_init()
1469 INIT_LIST_HEAD(&ns->routes); in mctp_routes_net_init()
1470 INIT_HLIST_HEAD(&ns->binds); in mctp_routes_net_init()
1471 mutex_init(&ns->bind_lock); in mctp_routes_net_init()
1472 INIT_HLIST_HEAD(&ns->keys); in mctp_routes_net_init()
1473 spin_lock_init(&ns->keys_lock); in mctp_routes_net_init()
1483 list_for_each_entry_rcu(rt, &net->mctp.routes, list) in mctp_routes_net_exit()
1530 #include "test/route-test.c"