xref: /linux/net/mptcp/diag.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0
2 /* MPTCP socket monitoring support
3  *
4  * Copyright (c) 2019 Red Hat
5  *
6  * Author: Davide Caratti <dcaratti@redhat.com>
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/net.h>
11 #include <linux/inet_diag.h>
12 #include <net/netlink.h>
13 #include "protocol.h"
14 
subflow_get_info(struct sock * sk,struct sk_buff * skb,bool net_admin)15 static int subflow_get_info(struct sock *sk, struct sk_buff *skb, bool net_admin)
16 {
17 	struct mptcp_subflow_context *sf;
18 	struct nlattr *start;
19 	u32 flags = 0;
20 	bool slow;
21 	int err;
22 
23 	if (inet_sk_state_load(sk) == TCP_LISTEN)
24 		return 0;
25 
26 	start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
27 	if (!start)
28 		return -EMSGSIZE;
29 
30 	slow = lock_sock_fast(sk);
31 	rcu_read_lock();
32 	sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
33 	if (!sf) {
34 		err = 0;
35 		goto nla_failure;
36 	}
37 
38 	if (sf->mp_capable)
39 		flags |= MPTCP_SUBFLOW_FLAG_MCAP_REM;
40 	if (sf->request_mptcp)
41 		flags |= MPTCP_SUBFLOW_FLAG_MCAP_LOC;
42 	if (sf->mp_join)
43 		flags |= MPTCP_SUBFLOW_FLAG_JOIN_REM;
44 	if (sf->request_join)
45 		flags |= MPTCP_SUBFLOW_FLAG_JOIN_LOC;
46 	if (sf->backup)
47 		flags |= MPTCP_SUBFLOW_FLAG_BKUP_REM;
48 	if (sf->request_bkup)
49 		flags |= MPTCP_SUBFLOW_FLAG_BKUP_LOC;
50 	if (READ_ONCE(sf->fully_established))
51 		flags |= MPTCP_SUBFLOW_FLAG_FULLY_ESTABLISHED;
52 	if (sf->conn_finished)
53 		flags |= MPTCP_SUBFLOW_FLAG_CONNECTED;
54 	if (sf->map_valid)
55 		flags |= MPTCP_SUBFLOW_FLAG_MAPVALID;
56 
57 	if (nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_TOKEN_REM, sf->remote_token) ||
58 	    nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_TOKEN_LOC, sf->token) ||
59 	    nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_FLAGS, flags) ||
60 	    nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_REM, sf->remote_id) ||
61 	    nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, subflow_get_local_id(sf))) {
62 		err = -EMSGSIZE;
63 		goto nla_failure;
64 	}
65 
66 	/* Only export seq related counters to user with CAP_NET_ADMIN */
67 	if (net_admin &&
68 	    (nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ,
69 			 sf->rel_write_seq) ||
70 	     nla_put_u64_64bit(skb, MPTCP_SUBFLOW_ATTR_MAP_SEQ, sf->map_seq,
71 			       MPTCP_SUBFLOW_ATTR_PAD) ||
72 	     nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_MAP_SFSEQ,
73 			 sf->map_subflow_seq) ||
74 	     nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_SSN_OFFSET, sf->ssn_offset) ||
75 	     nla_put_u16(skb, MPTCP_SUBFLOW_ATTR_MAP_DATALEN,
76 			 sf->map_data_len))) {
77 		err = -EMSGSIZE;
78 		goto nla_failure;
79 	}
80 
81 	rcu_read_unlock();
82 	unlock_sock_fast(sk, slow);
83 	nla_nest_end(skb, start);
84 	return 0;
85 
86 nla_failure:
87 	rcu_read_unlock();
88 	unlock_sock_fast(sk, slow);
89 	nla_nest_cancel(skb, start);
90 	return err;
91 }
92 
subflow_get_info_size(const struct sock * sk,bool net_admin)93 static size_t subflow_get_info_size(const struct sock *sk, bool net_admin)
94 {
95 	size_t size = 0;
96 
97 	size += nla_total_size(0) +	/* INET_ULP_INFO_MPTCP */
98 		nla_total_size(4) +	/* MPTCP_SUBFLOW_ATTR_TOKEN_REM */
99 		nla_total_size(4) +	/* MPTCP_SUBFLOW_ATTR_TOKEN_LOC */
100 		nla_total_size(4) +	/* MPTCP_SUBFLOW_ATTR_FLAGS */
101 		nla_total_size(1) +	/* MPTCP_SUBFLOW_ATTR_ID_REM */
102 		nla_total_size(1) +	/* MPTCP_SUBFLOW_ATTR_ID_LOC */
103 		0;
104 
105 	if (net_admin)
106 		size += nla_total_size(4) +	/* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */
107 			nla_total_size_64bit(8) +	/* MPTCP_SUBFLOW_ATTR_MAP_SEQ */
108 			nla_total_size(4) +	/* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */
109 			nla_total_size(4) +	/* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
110 			nla_total_size(2) +	/* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */
111 			0;
112 
113 	return size;
114 }
115 
mptcp_diag_subflow_init(struct tcp_ulp_ops * ops)116 void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops)
117 {
118 	ops->get_info = subflow_get_info;
119 	ops->get_info_size = subflow_get_info_size;
120 }
121