xref: /linux/drivers/infiniband/core/nldev.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Neither the names of the copyright holders nor the names of its
13  *    contributors may be used to endorse or promote products derived from
14  *    this software without specific prior written permission.
15  *
16  * Alternatively, this software may be distributed under the terms of the
17  * GNU General Public License ("GPL") version 2 as published by the Free
18  * Software Foundation.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/pid.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/mutex.h>
37 #include <net/netlink.h>
38 #include <rdma/rdma_cm.h>
39 #include <rdma/rdma_netlink.h>
40 
41 #include "core_priv.h"
42 #include "cma_priv.h"
43 #include "restrack.h"
44 #include "uverbs.h"
45 
46 /*
47  * This determines whether a non-privileged user is allowed to specify a
48  * controlled QKEY or not, when true non-privileged user is allowed to specify
49  * a controlled QKEY.
50  */
51 static bool privileged_qkey;
52 
53 typedef int (*res_fill_func_t)(struct sk_buff*, bool,
54 			       struct rdma_restrack_entry*, uint32_t);
55 
56 /*
57  * Sort array elements by the netlink attribute name
58  */
59 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
60 	[RDMA_NLDEV_ATTR_CHARDEV]		= { .type = NLA_U64 },
61 	[RDMA_NLDEV_ATTR_CHARDEV_ABI]		= { .type = NLA_U64 },
62 	[RDMA_NLDEV_ATTR_CHARDEV_NAME]		= { .type = NLA_NUL_STRING,
63 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
64 	[RDMA_NLDEV_ATTR_CHARDEV_TYPE]		= { .type = NLA_NUL_STRING,
65 					.len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE },
66 	[RDMA_NLDEV_ATTR_DEV_DIM]               = { .type = NLA_U8 },
67 	[RDMA_NLDEV_ATTR_DEV_INDEX]		= { .type = NLA_U32 },
68 	[RDMA_NLDEV_ATTR_DEV_NAME]		= { .type = NLA_NUL_STRING,
69 					.len = IB_DEVICE_NAME_MAX },
70 	[RDMA_NLDEV_ATTR_DEV_NODE_TYPE]		= { .type = NLA_U8 },
71 	[RDMA_NLDEV_ATTR_DEV_PROTOCOL]		= { .type = NLA_NUL_STRING,
72 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
73 	[RDMA_NLDEV_ATTR_DRIVER]		= { .type = NLA_NESTED },
74 	[RDMA_NLDEV_ATTR_DRIVER_ENTRY]		= { .type = NLA_NESTED },
75 	[RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE]	= { .type = NLA_U8 },
76 	[RDMA_NLDEV_ATTR_DRIVER_STRING]		= { .type = NLA_NUL_STRING,
77 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
78 	[RDMA_NLDEV_ATTR_DRIVER_S32]		= { .type = NLA_S32 },
79 	[RDMA_NLDEV_ATTR_DRIVER_S64]		= { .type = NLA_S64 },
80 	[RDMA_NLDEV_ATTR_DRIVER_U32]		= { .type = NLA_U32 },
81 	[RDMA_NLDEV_ATTR_DRIVER_U64]		= { .type = NLA_U64 },
82 	[RDMA_NLDEV_ATTR_FW_VERSION]		= { .type = NLA_NUL_STRING,
83 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
84 	[RDMA_NLDEV_ATTR_LID]			= { .type = NLA_U32 },
85 	[RDMA_NLDEV_ATTR_LINK_TYPE]		= { .type = NLA_NUL_STRING,
86 					.len = IFNAMSIZ },
87 	[RDMA_NLDEV_ATTR_LMC]			= { .type = NLA_U8 },
88 	[RDMA_NLDEV_ATTR_NDEV_INDEX]		= { .type = NLA_U32 },
89 	[RDMA_NLDEV_ATTR_NDEV_NAME]		= { .type = NLA_NUL_STRING,
90 					.len = IFNAMSIZ },
91 	[RDMA_NLDEV_ATTR_NODE_GUID]		= { .type = NLA_U64 },
92 	[RDMA_NLDEV_ATTR_PORT_INDEX]		= { .type = NLA_U32 },
93 	[RDMA_NLDEV_ATTR_PORT_PHYS_STATE]	= { .type = NLA_U8 },
94 	[RDMA_NLDEV_ATTR_PORT_STATE]		= { .type = NLA_U8 },
95 	[RDMA_NLDEV_ATTR_RES_CM_ID]		= { .type = NLA_NESTED },
96 	[RDMA_NLDEV_ATTR_RES_CM_IDN]		= { .type = NLA_U32 },
97 	[RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY]	= { .type = NLA_NESTED },
98 	[RDMA_NLDEV_ATTR_RES_CQ]		= { .type = NLA_NESTED },
99 	[RDMA_NLDEV_ATTR_RES_CQE]		= { .type = NLA_U32 },
100 	[RDMA_NLDEV_ATTR_RES_CQN]		= { .type = NLA_U32 },
101 	[RDMA_NLDEV_ATTR_RES_CQ_ENTRY]		= { .type = NLA_NESTED },
102 	[RDMA_NLDEV_ATTR_RES_CTX]		= { .type = NLA_NESTED },
103 	[RDMA_NLDEV_ATTR_RES_CTXN]		= { .type = NLA_U32 },
104 	[RDMA_NLDEV_ATTR_RES_CTX_ENTRY]		= { .type = NLA_NESTED },
105 	[RDMA_NLDEV_ATTR_RES_DST_ADDR]		= {
106 			.len = sizeof(struct __kernel_sockaddr_storage) },
107 	[RDMA_NLDEV_ATTR_RES_IOVA]		= { .type = NLA_U64 },
108 	[RDMA_NLDEV_ATTR_RES_KERN_NAME]		= { .type = NLA_NUL_STRING,
109 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
110 	[RDMA_NLDEV_ATTR_RES_LKEY]		= { .type = NLA_U32 },
111 	[RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY]	= { .type = NLA_U32 },
112 	[RDMA_NLDEV_ATTR_RES_LQPN]		= { .type = NLA_U32 },
113 	[RDMA_NLDEV_ATTR_RES_MR]		= { .type = NLA_NESTED },
114 	[RDMA_NLDEV_ATTR_RES_MRLEN]		= { .type = NLA_U64 },
115 	[RDMA_NLDEV_ATTR_RES_MRN]		= { .type = NLA_U32 },
116 	[RDMA_NLDEV_ATTR_RES_MR_ENTRY]		= { .type = NLA_NESTED },
117 	[RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE]	= { .type = NLA_U8 },
118 	[RDMA_NLDEV_ATTR_RES_PD]		= { .type = NLA_NESTED },
119 	[RDMA_NLDEV_ATTR_RES_PDN]		= { .type = NLA_U32 },
120 	[RDMA_NLDEV_ATTR_RES_PD_ENTRY]		= { .type = NLA_NESTED },
121 	[RDMA_NLDEV_ATTR_RES_PID]		= { .type = NLA_U32 },
122 	[RDMA_NLDEV_ATTR_RES_POLL_CTX]		= { .type = NLA_U8 },
123 	[RDMA_NLDEV_ATTR_RES_PS]		= { .type = NLA_U32 },
124 	[RDMA_NLDEV_ATTR_RES_QP]		= { .type = NLA_NESTED },
125 	[RDMA_NLDEV_ATTR_RES_QP_ENTRY]		= { .type = NLA_NESTED },
126 	[RDMA_NLDEV_ATTR_RES_RAW]		= { .type = NLA_BINARY },
127 	[RDMA_NLDEV_ATTR_RES_RKEY]		= { .type = NLA_U32 },
128 	[RDMA_NLDEV_ATTR_RES_RQPN]		= { .type = NLA_U32 },
129 	[RDMA_NLDEV_ATTR_RES_RQ_PSN]		= { .type = NLA_U32 },
130 	[RDMA_NLDEV_ATTR_RES_SQ_PSN]		= { .type = NLA_U32 },
131 	[RDMA_NLDEV_ATTR_RES_SRC_ADDR]		= {
132 			.len = sizeof(struct __kernel_sockaddr_storage) },
133 	[RDMA_NLDEV_ATTR_RES_STATE]		= { .type = NLA_U8 },
134 	[RDMA_NLDEV_ATTR_RES_SUMMARY]		= { .type = NLA_NESTED },
135 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY]	= { .type = NLA_NESTED },
136 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 },
137 	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
138 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
139 	[RDMA_NLDEV_ATTR_RES_TYPE]		= { .type = NLA_U8 },
140 	[RDMA_NLDEV_ATTR_RES_SUBTYPE]		= { .type = NLA_NUL_STRING,
141 					.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
142 	[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
143 	[RDMA_NLDEV_ATTR_RES_USECNT]		= { .type = NLA_U64 },
144 	[RDMA_NLDEV_ATTR_RES_SRQ]		= { .type = NLA_NESTED },
145 	[RDMA_NLDEV_ATTR_RES_SRQN]		= { .type = NLA_U32 },
146 	[RDMA_NLDEV_ATTR_RES_SRQ_ENTRY]		= { .type = NLA_NESTED },
147 	[RDMA_NLDEV_ATTR_MIN_RANGE]		= { .type = NLA_U32 },
148 	[RDMA_NLDEV_ATTR_MAX_RANGE]		= { .type = NLA_U32 },
149 	[RDMA_NLDEV_ATTR_SM_LID]		= { .type = NLA_U32 },
150 	[RDMA_NLDEV_ATTR_SUBNET_PREFIX]		= { .type = NLA_U64 },
151 	[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]	= { .type = NLA_U32 },
152 	[RDMA_NLDEV_ATTR_STAT_MODE]		= { .type = NLA_U32 },
153 	[RDMA_NLDEV_ATTR_STAT_RES]		= { .type = NLA_U32 },
154 	[RDMA_NLDEV_ATTR_STAT_COUNTER]		= { .type = NLA_NESTED },
155 	[RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY]	= { .type = NLA_NESTED },
156 	[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]       = { .type = NLA_U32 },
157 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]       = { .type = NLA_NESTED },
158 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY]  = { .type = NLA_NESTED },
159 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING },
160 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 },
161 	[RDMA_NLDEV_ATTR_SYS_IMAGE_GUID]	= { .type = NLA_U64 },
162 	[RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID]	= { .type = NLA_U32 },
163 	[RDMA_NLDEV_NET_NS_FD]			= { .type = NLA_U32 },
164 	[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]	= { .type = NLA_U8 },
165 	[RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK]	= { .type = NLA_U8 },
166 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX]	= { .type = NLA_U32 },
167 	[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 },
168 	[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE] = { .type = NLA_U8 },
169 	[RDMA_NLDEV_ATTR_DRIVER_DETAILS]	= { .type = NLA_U8 },
170 };
171 
172 static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
173 				      enum rdma_nldev_print_type print_type)
174 {
175 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
176 		return -EMSGSIZE;
177 	if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
178 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
179 		return -EMSGSIZE;
180 
181 	return 0;
182 }
183 
184 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
185 				   enum rdma_nldev_print_type print_type,
186 				   u32 value)
187 {
188 	if (put_driver_name_print_type(msg, name, print_type))
189 		return -EMSGSIZE;
190 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
191 		return -EMSGSIZE;
192 
193 	return 0;
194 }
195 
196 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
197 				   enum rdma_nldev_print_type print_type,
198 				   u64 value)
199 {
200 	if (put_driver_name_print_type(msg, name, print_type))
201 		return -EMSGSIZE;
202 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
203 			      RDMA_NLDEV_ATTR_PAD))
204 		return -EMSGSIZE;
205 
206 	return 0;
207 }
208 
209 int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
210 			      const char *str)
211 {
212 	if (put_driver_name_print_type(msg, name,
213 				       RDMA_NLDEV_PRINT_TYPE_UNSPEC))
214 		return -EMSGSIZE;
215 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str))
216 		return -EMSGSIZE;
217 
218 	return 0;
219 }
220 EXPORT_SYMBOL(rdma_nl_put_driver_string);
221 
222 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
223 {
224 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
225 				       value);
226 }
227 EXPORT_SYMBOL(rdma_nl_put_driver_u32);
228 
229 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
230 			       u32 value)
231 {
232 	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
233 				       value);
234 }
235 EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
236 
237 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
238 {
239 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
240 				       value);
241 }
242 EXPORT_SYMBOL(rdma_nl_put_driver_u64);
243 
244 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
245 {
246 	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
247 				       value);
248 }
249 EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
250 
251 bool rdma_nl_get_privileged_qkey(void)
252 {
253 	return privileged_qkey || capable(CAP_NET_RAW);
254 }
255 EXPORT_SYMBOL(rdma_nl_get_privileged_qkey);
256 
257 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
258 {
259 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
260 		return -EMSGSIZE;
261 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
262 			   dev_name(&device->dev)))
263 		return -EMSGSIZE;
264 
265 	return 0;
266 }
267 
268 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
269 {
270 	char fw[IB_FW_VERSION_NAME_MAX];
271 	int ret = 0;
272 	u32 port;
273 
274 	if (fill_nldev_handle(msg, device))
275 		return -EMSGSIZE;
276 
277 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
278 		return -EMSGSIZE;
279 
280 	BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
281 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
282 			      device->attrs.device_cap_flags,
283 			      RDMA_NLDEV_ATTR_PAD))
284 		return -EMSGSIZE;
285 
286 	ib_get_device_fw_str(device, fw);
287 	/* Device without FW has strlen(fw) = 0 */
288 	if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
289 		return -EMSGSIZE;
290 
291 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
292 			      be64_to_cpu(device->node_guid),
293 			      RDMA_NLDEV_ATTR_PAD))
294 		return -EMSGSIZE;
295 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
296 			      be64_to_cpu(device->attrs.sys_image_guid),
297 			      RDMA_NLDEV_ATTR_PAD))
298 		return -EMSGSIZE;
299 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
300 		return -EMSGSIZE;
301 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim))
302 		return -EMSGSIZE;
303 
304 	/*
305 	 * Link type is determined on first port and mlx4 device
306 	 * which can potentially have two different link type for the same
307 	 * IB device is considered as better to be avoided in the future,
308 	 */
309 	port = rdma_start_port(device);
310 	if (rdma_cap_opa_mad(device, port))
311 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
312 	else if (rdma_protocol_ib(device, port))
313 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
314 	else if (rdma_protocol_iwarp(device, port))
315 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
316 	else if (rdma_protocol_roce(device, port))
317 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
318 	else if (rdma_protocol_usnic(device, port))
319 		ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
320 				     "usnic");
321 	return ret;
322 }
323 
324 static int fill_port_info(struct sk_buff *msg,
325 			  struct ib_device *device, u32 port,
326 			  const struct net *net)
327 {
328 	struct net_device *netdev = NULL;
329 	struct ib_port_attr attr;
330 	int ret;
331 	u64 cap_flags = 0;
332 
333 	if (fill_nldev_handle(msg, device))
334 		return -EMSGSIZE;
335 
336 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
337 		return -EMSGSIZE;
338 
339 	ret = ib_query_port(device, port, &attr);
340 	if (ret)
341 		return ret;
342 
343 	if (rdma_protocol_ib(device, port)) {
344 		BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
345 				sizeof(attr.port_cap_flags2)) > sizeof(u64));
346 		cap_flags = attr.port_cap_flags |
347 			((u64)attr.port_cap_flags2 << 32);
348 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
349 				      cap_flags, RDMA_NLDEV_ATTR_PAD))
350 			return -EMSGSIZE;
351 		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
352 				      attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
353 			return -EMSGSIZE;
354 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
355 			return -EMSGSIZE;
356 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
357 			return -EMSGSIZE;
358 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
359 			return -EMSGSIZE;
360 	}
361 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
362 		return -EMSGSIZE;
363 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
364 		return -EMSGSIZE;
365 
366 	netdev = ib_device_get_netdev(device, port);
367 	if (netdev && net_eq(dev_net(netdev), net)) {
368 		ret = nla_put_u32(msg,
369 				  RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
370 		if (ret)
371 			goto out;
372 		ret = nla_put_string(msg,
373 				     RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
374 	}
375 
376 out:
377 	dev_put(netdev);
378 	return ret;
379 }
380 
381 static int fill_res_info_entry(struct sk_buff *msg,
382 			       const char *name, u64 curr)
383 {
384 	struct nlattr *entry_attr;
385 
386 	entry_attr = nla_nest_start_noflag(msg,
387 					   RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
388 	if (!entry_attr)
389 		return -EMSGSIZE;
390 
391 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
392 		goto err;
393 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
394 			      RDMA_NLDEV_ATTR_PAD))
395 		goto err;
396 
397 	nla_nest_end(msg, entry_attr);
398 	return 0;
399 
400 err:
401 	nla_nest_cancel(msg, entry_attr);
402 	return -EMSGSIZE;
403 }
404 
405 static int fill_res_info(struct sk_buff *msg, struct ib_device *device,
406 			 bool show_details)
407 {
408 	static const char * const names[RDMA_RESTRACK_MAX] = {
409 		[RDMA_RESTRACK_PD] = "pd",
410 		[RDMA_RESTRACK_CQ] = "cq",
411 		[RDMA_RESTRACK_QP] = "qp",
412 		[RDMA_RESTRACK_CM_ID] = "cm_id",
413 		[RDMA_RESTRACK_MR] = "mr",
414 		[RDMA_RESTRACK_CTX] = "ctx",
415 		[RDMA_RESTRACK_SRQ] = "srq",
416 	};
417 
418 	struct nlattr *table_attr;
419 	int ret, i, curr;
420 
421 	if (fill_nldev_handle(msg, device))
422 		return -EMSGSIZE;
423 
424 	table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
425 	if (!table_attr)
426 		return -EMSGSIZE;
427 
428 	for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
429 		if (!names[i])
430 			continue;
431 		curr = rdma_restrack_count(device, i, show_details);
432 		ret = fill_res_info_entry(msg, names[i], curr);
433 		if (ret)
434 			goto err;
435 	}
436 
437 	nla_nest_end(msg, table_attr);
438 	return 0;
439 
440 err:
441 	nla_nest_cancel(msg, table_attr);
442 	return ret;
443 }
444 
445 static int fill_res_name_pid(struct sk_buff *msg,
446 			     struct rdma_restrack_entry *res)
447 {
448 	int err = 0;
449 
450 	/*
451 	 * For user resources, user is should read /proc/PID/comm to get the
452 	 * name of the task file.
453 	 */
454 	if (rdma_is_kernel_res(res)) {
455 		err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
456 				     res->kern_name);
457 	} else {
458 		pid_t pid;
459 
460 		pid = task_pid_vnr(res->task);
461 		/*
462 		 * Task is dead and in zombie state.
463 		 * There is no need to print PID anymore.
464 		 */
465 		if (pid)
466 			/*
467 			 * This part is racy, task can be killed and PID will
468 			 * be zero right here but it is ok, next query won't
469 			 * return PID. We don't promise real-time reflection
470 			 * of SW objects.
471 			 */
472 			err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid);
473 	}
474 
475 	return err ? -EMSGSIZE : 0;
476 }
477 
478 static int fill_res_qp_entry_query(struct sk_buff *msg,
479 				   struct rdma_restrack_entry *res,
480 				   struct ib_device *dev,
481 				   struct ib_qp *qp)
482 {
483 	struct ib_qp_init_attr qp_init_attr;
484 	struct ib_qp_attr qp_attr;
485 	int ret;
486 
487 	ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
488 	if (ret)
489 		return ret;
490 
491 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
492 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
493 				qp_attr.dest_qp_num))
494 			goto err;
495 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
496 				qp_attr.rq_psn))
497 			goto err;
498 	}
499 
500 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
501 		goto err;
502 
503 	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
504 	    qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
505 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
506 			       qp_attr.path_mig_state))
507 			goto err;
508 	}
509 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
510 		goto err;
511 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
512 		goto err;
513 
514 	if (dev->ops.fill_res_qp_entry)
515 		return dev->ops.fill_res_qp_entry(msg, qp);
516 	return 0;
517 
518 err:	return -EMSGSIZE;
519 }
520 
521 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
522 			     struct rdma_restrack_entry *res, uint32_t port)
523 {
524 	struct ib_qp *qp = container_of(res, struct ib_qp, res);
525 	struct ib_device *dev = qp->device;
526 	int ret;
527 
528 	if (port && port != qp->port)
529 		return -EAGAIN;
530 
531 	/* In create_qp() port is not set yet */
532 	if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port))
533 		return -EMSGSIZE;
534 
535 	ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num);
536 	if (ret)
537 		return -EMSGSIZE;
538 
539 	if (!rdma_is_kernel_res(res) &&
540 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
541 		return -EMSGSIZE;
542 
543 	ret = fill_res_name_pid(msg, res);
544 	if (ret)
545 		return -EMSGSIZE;
546 
547 	return fill_res_qp_entry_query(msg, res, dev, qp);
548 }
549 
550 static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
551 				 struct rdma_restrack_entry *res, uint32_t port)
552 {
553 	struct ib_qp *qp = container_of(res, struct ib_qp, res);
554 	struct ib_device *dev = qp->device;
555 
556 	if (port && port != qp->port)
557 		return -EAGAIN;
558 	if (!dev->ops.fill_res_qp_entry_raw)
559 		return -EINVAL;
560 	return dev->ops.fill_res_qp_entry_raw(msg, qp);
561 }
562 
563 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
564 				struct rdma_restrack_entry *res, uint32_t port)
565 {
566 	struct rdma_id_private *id_priv =
567 				container_of(res, struct rdma_id_private, res);
568 	struct ib_device *dev = id_priv->id.device;
569 	struct rdma_cm_id *cm_id = &id_priv->id;
570 
571 	if (port && port != cm_id->port_num)
572 		return -EAGAIN;
573 
574 	if (cm_id->port_num &&
575 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
576 		goto err;
577 
578 	if (id_priv->qp_num) {
579 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
580 			goto err;
581 		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
582 			goto err;
583 	}
584 
585 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
586 		goto err;
587 
588 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
589 		goto err;
590 
591 	if (cm_id->route.addr.src_addr.ss_family &&
592 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
593 		    sizeof(cm_id->route.addr.src_addr),
594 		    &cm_id->route.addr.src_addr))
595 		goto err;
596 	if (cm_id->route.addr.dst_addr.ss_family &&
597 	    nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
598 		    sizeof(cm_id->route.addr.dst_addr),
599 		    &cm_id->route.addr.dst_addr))
600 		goto err;
601 
602 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
603 		goto err;
604 
605 	if (fill_res_name_pid(msg, res))
606 		goto err;
607 
608 	if (dev->ops.fill_res_cm_id_entry)
609 		return dev->ops.fill_res_cm_id_entry(msg, cm_id);
610 	return 0;
611 
612 err: return -EMSGSIZE;
613 }
614 
615 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
616 			     struct rdma_restrack_entry *res, uint32_t port)
617 {
618 	struct ib_cq *cq = container_of(res, struct ib_cq, res);
619 	struct ib_device *dev = cq->device;
620 
621 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
622 		return -EMSGSIZE;
623 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
624 			      atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
625 		return -EMSGSIZE;
626 
627 	/* Poll context is only valid for kernel CQs */
628 	if (rdma_is_kernel_res(res) &&
629 	    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
630 		return -EMSGSIZE;
631 
632 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL)))
633 		return -EMSGSIZE;
634 
635 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
636 		return -EMSGSIZE;
637 	if (!rdma_is_kernel_res(res) &&
638 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
639 			cq->uobject->uevent.uobject.context->res.id))
640 		return -EMSGSIZE;
641 
642 	if (fill_res_name_pid(msg, res))
643 		return -EMSGSIZE;
644 
645 	return (dev->ops.fill_res_cq_entry) ?
646 		dev->ops.fill_res_cq_entry(msg, cq) : 0;
647 }
648 
649 static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
650 				 struct rdma_restrack_entry *res, uint32_t port)
651 {
652 	struct ib_cq *cq = container_of(res, struct ib_cq, res);
653 	struct ib_device *dev = cq->device;
654 
655 	if (!dev->ops.fill_res_cq_entry_raw)
656 		return -EINVAL;
657 	return dev->ops.fill_res_cq_entry_raw(msg, cq);
658 }
659 
660 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
661 			     struct rdma_restrack_entry *res, uint32_t port)
662 {
663 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
664 	struct ib_device *dev = mr->pd->device;
665 
666 	if (has_cap_net_admin) {
667 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
668 			return -EMSGSIZE;
669 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
670 			return -EMSGSIZE;
671 	}
672 
673 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
674 			      RDMA_NLDEV_ATTR_PAD))
675 		return -EMSGSIZE;
676 
677 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
678 		return -EMSGSIZE;
679 
680 	if (!rdma_is_kernel_res(res) &&
681 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
682 		return -EMSGSIZE;
683 
684 	if (fill_res_name_pid(msg, res))
685 		return -EMSGSIZE;
686 
687 	return (dev->ops.fill_res_mr_entry) ?
688 		       dev->ops.fill_res_mr_entry(msg, mr) :
689 		       0;
690 }
691 
692 static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
693 				 struct rdma_restrack_entry *res, uint32_t port)
694 {
695 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
696 	struct ib_device *dev = mr->pd->device;
697 
698 	if (!dev->ops.fill_res_mr_entry_raw)
699 		return -EINVAL;
700 	return dev->ops.fill_res_mr_entry_raw(msg, mr);
701 }
702 
703 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
704 			     struct rdma_restrack_entry *res, uint32_t port)
705 {
706 	struct ib_pd *pd = container_of(res, struct ib_pd, res);
707 
708 	if (has_cap_net_admin) {
709 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
710 				pd->local_dma_lkey))
711 			goto err;
712 		if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
713 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
714 				pd->unsafe_global_rkey))
715 			goto err;
716 	}
717 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
718 			      atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
719 		goto err;
720 
721 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
722 		goto err;
723 
724 	if (!rdma_is_kernel_res(res) &&
725 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
726 			pd->uobject->context->res.id))
727 		goto err;
728 
729 	return fill_res_name_pid(msg, res);
730 
731 err:	return -EMSGSIZE;
732 }
733 
734 static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin,
735 			      struct rdma_restrack_entry *res, uint32_t port)
736 {
737 	struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res);
738 
739 	if (rdma_is_kernel_res(res))
740 		return 0;
741 
742 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id))
743 		return -EMSGSIZE;
744 
745 	return fill_res_name_pid(msg, res);
746 }
747 
748 static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range,
749 				   uint32_t max_range)
750 {
751 	struct nlattr *entry_attr;
752 
753 	if (!min_range)
754 		return 0;
755 
756 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
757 	if (!entry_attr)
758 		return -EMSGSIZE;
759 
760 	if (min_range == max_range) {
761 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range))
762 			goto err;
763 	} else {
764 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range))
765 			goto err;
766 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range))
767 			goto err;
768 	}
769 	nla_nest_end(msg, entry_attr);
770 	return 0;
771 
772 err:
773 	nla_nest_cancel(msg, entry_attr);
774 	return -EMSGSIZE;
775 }
776 
777 static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq)
778 {
779 	uint32_t min_range = 0, prev = 0;
780 	struct rdma_restrack_entry *res;
781 	struct rdma_restrack_root *rt;
782 	struct nlattr *table_attr;
783 	struct ib_qp *qp = NULL;
784 	unsigned long id = 0;
785 
786 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
787 	if (!table_attr)
788 		return -EMSGSIZE;
789 
790 	rt = &srq->device->res[RDMA_RESTRACK_QP];
791 	xa_lock(&rt->xa);
792 	xa_for_each(&rt->xa, id, res) {
793 		if (!rdma_restrack_get(res))
794 			continue;
795 
796 		qp = container_of(res, struct ib_qp, res);
797 		if (!qp->srq || (qp->srq->res.id != srq->res.id)) {
798 			rdma_restrack_put(res);
799 			continue;
800 		}
801 
802 		if (qp->qp_num < prev)
803 			/* qp_num should be ascending */
804 			goto err_loop;
805 
806 		if (min_range == 0) {
807 			min_range = qp->qp_num;
808 		} else if (qp->qp_num > (prev + 1)) {
809 			if (fill_res_range_qp_entry(msg, min_range, prev))
810 				goto err_loop;
811 
812 			min_range = qp->qp_num;
813 		}
814 		prev = qp->qp_num;
815 		rdma_restrack_put(res);
816 	}
817 
818 	xa_unlock(&rt->xa);
819 
820 	if (fill_res_range_qp_entry(msg, min_range, prev))
821 		goto err;
822 
823 	nla_nest_end(msg, table_attr);
824 	return 0;
825 
826 err_loop:
827 	rdma_restrack_put(res);
828 	xa_unlock(&rt->xa);
829 err:
830 	nla_nest_cancel(msg, table_attr);
831 	return -EMSGSIZE;
832 }
833 
834 static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
835 			      struct rdma_restrack_entry *res, uint32_t port)
836 {
837 	struct ib_srq *srq = container_of(res, struct ib_srq, res);
838 	struct ib_device *dev = srq->device;
839 
840 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id))
841 		goto err;
842 
843 	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type))
844 		goto err;
845 
846 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id))
847 		goto err;
848 
849 	if (ib_srq_has_cq(srq->srq_type)) {
850 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN,
851 				srq->ext.cq->res.id))
852 			goto err;
853 	}
854 
855 	if (fill_res_srq_qps(msg, srq))
856 		goto err;
857 
858 	if (fill_res_name_pid(msg, res))
859 		goto err;
860 
861 	if (dev->ops.fill_res_srq_entry)
862 		return dev->ops.fill_res_srq_entry(msg, srq);
863 
864 	return 0;
865 
866 err:
867 	return -EMSGSIZE;
868 }
869 
870 static int fill_res_srq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
871 				 struct rdma_restrack_entry *res, uint32_t port)
872 {
873 	struct ib_srq *srq = container_of(res, struct ib_srq, res);
874 	struct ib_device *dev = srq->device;
875 
876 	if (!dev->ops.fill_res_srq_entry_raw)
877 		return -EINVAL;
878 	return dev->ops.fill_res_srq_entry_raw(msg, srq);
879 }
880 
881 static int fill_stat_counter_mode(struct sk_buff *msg,
882 				  struct rdma_counter *counter)
883 {
884 	struct rdma_counter_mode *m = &counter->mode;
885 
886 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode))
887 		return -EMSGSIZE;
888 
889 	if (m->mode == RDMA_COUNTER_MODE_AUTO) {
890 		if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) &&
891 		    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type))
892 			return -EMSGSIZE;
893 
894 		if ((m->mask & RDMA_COUNTER_MASK_PID) &&
895 		    fill_res_name_pid(msg, &counter->res))
896 			return -EMSGSIZE;
897 	}
898 
899 	return 0;
900 }
901 
902 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn)
903 {
904 	struct nlattr *entry_attr;
905 
906 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
907 	if (!entry_attr)
908 		return -EMSGSIZE;
909 
910 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn))
911 		goto err;
912 
913 	nla_nest_end(msg, entry_attr);
914 	return 0;
915 
916 err:
917 	nla_nest_cancel(msg, entry_attr);
918 	return -EMSGSIZE;
919 }
920 
921 static int fill_stat_counter_qps(struct sk_buff *msg,
922 				 struct rdma_counter *counter)
923 {
924 	struct rdma_restrack_entry *res;
925 	struct rdma_restrack_root *rt;
926 	struct nlattr *table_attr;
927 	struct ib_qp *qp = NULL;
928 	unsigned long id = 0;
929 	int ret = 0;
930 
931 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
932 	if (!table_attr)
933 		return -EMSGSIZE;
934 
935 	rt = &counter->device->res[RDMA_RESTRACK_QP];
936 	xa_lock(&rt->xa);
937 	xa_for_each(&rt->xa, id, res) {
938 		qp = container_of(res, struct ib_qp, res);
939 		if (!qp->counter || (qp->counter->id != counter->id))
940 			continue;
941 
942 		ret = fill_stat_counter_qp_entry(msg, qp->qp_num);
943 		if (ret)
944 			goto err;
945 	}
946 
947 	xa_unlock(&rt->xa);
948 	nla_nest_end(msg, table_attr);
949 	return 0;
950 
951 err:
952 	xa_unlock(&rt->xa);
953 	nla_nest_cancel(msg, table_attr);
954 	return ret;
955 }
956 
957 int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
958 				 u64 value)
959 {
960 	struct nlattr *entry_attr;
961 
962 	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
963 	if (!entry_attr)
964 		return -EMSGSIZE;
965 
966 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
967 			   name))
968 		goto err;
969 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE,
970 			      value, RDMA_NLDEV_ATTR_PAD))
971 		goto err;
972 
973 	nla_nest_end(msg, entry_attr);
974 	return 0;
975 
976 err:
977 	nla_nest_cancel(msg, entry_attr);
978 	return -EMSGSIZE;
979 }
980 EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry);
981 
982 static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
983 			      struct rdma_restrack_entry *res, uint32_t port)
984 {
985 	struct ib_mr *mr = container_of(res, struct ib_mr, res);
986 	struct ib_device *dev = mr->pd->device;
987 
988 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
989 		goto err;
990 
991 	if (dev->ops.fill_stat_mr_entry)
992 		return dev->ops.fill_stat_mr_entry(msg, mr);
993 	return 0;
994 
995 err:
996 	return -EMSGSIZE;
997 }
998 
999 static int fill_stat_counter_hwcounters(struct sk_buff *msg,
1000 					struct rdma_counter *counter)
1001 {
1002 	struct rdma_hw_stats *st = counter->stats;
1003 	struct nlattr *table_attr;
1004 	int i;
1005 
1006 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
1007 	if (!table_attr)
1008 		return -EMSGSIZE;
1009 
1010 	mutex_lock(&st->lock);
1011 	for (i = 0; i < st->num_counters; i++) {
1012 		if (test_bit(i, st->is_disabled))
1013 			continue;
1014 		if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name,
1015 						 st->value[i]))
1016 			goto err;
1017 	}
1018 	mutex_unlock(&st->lock);
1019 
1020 	nla_nest_end(msg, table_attr);
1021 	return 0;
1022 
1023 err:
1024 	mutex_unlock(&st->lock);
1025 	nla_nest_cancel(msg, table_attr);
1026 	return -EMSGSIZE;
1027 }
1028 
1029 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
1030 				  struct rdma_restrack_entry *res,
1031 				  uint32_t port)
1032 {
1033 	struct rdma_counter *counter =
1034 		container_of(res, struct rdma_counter, res);
1035 
1036 	if (port && port != counter->port)
1037 		return -EAGAIN;
1038 
1039 	/* Dump it even query failed */
1040 	rdma_counter_query_stats(counter);
1041 
1042 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) ||
1043 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) ||
1044 	    fill_stat_counter_mode(msg, counter) ||
1045 	    fill_stat_counter_qps(msg, counter) ||
1046 	    fill_stat_counter_hwcounters(msg, counter))
1047 		return -EMSGSIZE;
1048 
1049 	return 0;
1050 }
1051 
1052 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1053 			  struct netlink_ext_ack *extack)
1054 {
1055 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1056 	struct ib_device *device;
1057 	struct sk_buff *msg;
1058 	u32 index;
1059 	int err;
1060 
1061 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1062 				     nldev_policy, extack);
1063 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1064 		return -EINVAL;
1065 
1066 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1067 
1068 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1069 	if (!device)
1070 		return -EINVAL;
1071 
1072 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1073 	if (!msg) {
1074 		err = -ENOMEM;
1075 		goto err;
1076 	}
1077 
1078 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1079 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1080 			0, 0);
1081 	if (!nlh) {
1082 		err = -EMSGSIZE;
1083 		goto err_free;
1084 	}
1085 
1086 	err = fill_dev_info(msg, device);
1087 	if (err)
1088 		goto err_free;
1089 
1090 	nlmsg_end(msg, nlh);
1091 
1092 	ib_device_put(device);
1093 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1094 
1095 err_free:
1096 	nlmsg_free(msg);
1097 err:
1098 	ib_device_put(device);
1099 	return err;
1100 }
1101 
1102 static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1103 			  struct netlink_ext_ack *extack)
1104 {
1105 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1106 	struct ib_device *device;
1107 	u32 index;
1108 	int err;
1109 
1110 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1111 				     nldev_policy, extack);
1112 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1113 		return -EINVAL;
1114 
1115 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1116 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1117 	if (!device)
1118 		return -EINVAL;
1119 
1120 	if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
1121 		char name[IB_DEVICE_NAME_MAX] = {};
1122 
1123 		nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1124 			    IB_DEVICE_NAME_MAX);
1125 		if (strlen(name) == 0) {
1126 			err = -EINVAL;
1127 			goto done;
1128 		}
1129 		err = ib_device_rename(device, name);
1130 		goto done;
1131 	}
1132 
1133 	if (tb[RDMA_NLDEV_NET_NS_FD]) {
1134 		u32 ns_fd;
1135 
1136 		ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
1137 		err = ib_device_set_netns_put(skb, device, ns_fd);
1138 		goto put_done;
1139 	}
1140 
1141 	if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) {
1142 		u8 use_dim;
1143 
1144 		use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]);
1145 		err = ib_device_set_dim(device,  use_dim);
1146 		goto done;
1147 	}
1148 
1149 done:
1150 	ib_device_put(device);
1151 put_done:
1152 	return err;
1153 }
1154 
1155 static int _nldev_get_dumpit(struct ib_device *device,
1156 			     struct sk_buff *skb,
1157 			     struct netlink_callback *cb,
1158 			     unsigned int idx)
1159 {
1160 	int start = cb->args[0];
1161 	struct nlmsghdr *nlh;
1162 
1163 	if (idx < start)
1164 		return 0;
1165 
1166 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1167 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1168 			0, NLM_F_MULTI);
1169 
1170 	if (!nlh || fill_dev_info(skb, device)) {
1171 		nlmsg_cancel(skb, nlh);
1172 		goto out;
1173 	}
1174 
1175 	nlmsg_end(skb, nlh);
1176 
1177 	idx++;
1178 
1179 out:	cb->args[0] = idx;
1180 	return skb->len;
1181 }
1182 
1183 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1184 {
1185 	/*
1186 	 * There is no need to take lock, because
1187 	 * we are relying on ib_core's locking.
1188 	 */
1189 	return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
1190 }
1191 
1192 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1193 			       struct netlink_ext_ack *extack)
1194 {
1195 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1196 	struct ib_device *device;
1197 	struct sk_buff *msg;
1198 	u32 index;
1199 	u32 port;
1200 	int err;
1201 
1202 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1203 				     nldev_policy, extack);
1204 	if (err ||
1205 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1206 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1207 		return -EINVAL;
1208 
1209 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1210 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1211 	if (!device)
1212 		return -EINVAL;
1213 
1214 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1215 	if (!rdma_is_port_valid(device, port)) {
1216 		err = -EINVAL;
1217 		goto err;
1218 	}
1219 
1220 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1221 	if (!msg) {
1222 		err = -ENOMEM;
1223 		goto err;
1224 	}
1225 
1226 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1227 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
1228 			0, 0);
1229 	if (!nlh) {
1230 		err = -EMSGSIZE;
1231 		goto err_free;
1232 	}
1233 
1234 	err = fill_port_info(msg, device, port, sock_net(skb->sk));
1235 	if (err)
1236 		goto err_free;
1237 
1238 	nlmsg_end(msg, nlh);
1239 	ib_device_put(device);
1240 
1241 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1242 
1243 err_free:
1244 	nlmsg_free(msg);
1245 err:
1246 	ib_device_put(device);
1247 	return err;
1248 }
1249 
1250 static int nldev_port_get_dumpit(struct sk_buff *skb,
1251 				 struct netlink_callback *cb)
1252 {
1253 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1254 	struct ib_device *device;
1255 	int start = cb->args[0];
1256 	struct nlmsghdr *nlh;
1257 	u32 idx = 0;
1258 	u32 ifindex;
1259 	int err;
1260 	unsigned int p;
1261 
1262 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1263 				     nldev_policy, NULL);
1264 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1265 		return -EINVAL;
1266 
1267 	ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1268 	device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
1269 	if (!device)
1270 		return -EINVAL;
1271 
1272 	rdma_for_each_port (device, p) {
1273 		/*
1274 		 * The dumpit function returns all information from specific
1275 		 * index. This specific index is taken from the netlink
1276 		 * messages request sent by user and it is available
1277 		 * in cb->args[0].
1278 		 *
1279 		 * Usually, the user doesn't fill this field and it causes
1280 		 * to return everything.
1281 		 *
1282 		 */
1283 		if (idx < start) {
1284 			idx++;
1285 			continue;
1286 		}
1287 
1288 		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
1289 				cb->nlh->nlmsg_seq,
1290 				RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1291 						 RDMA_NLDEV_CMD_PORT_GET),
1292 				0, NLM_F_MULTI);
1293 
1294 		if (!nlh || fill_port_info(skb, device, p, sock_net(skb->sk))) {
1295 			nlmsg_cancel(skb, nlh);
1296 			goto out;
1297 		}
1298 		idx++;
1299 		nlmsg_end(skb, nlh);
1300 	}
1301 
1302 out:
1303 	ib_device_put(device);
1304 	cb->args[0] = idx;
1305 	return skb->len;
1306 }
1307 
1308 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1309 			      struct netlink_ext_ack *extack)
1310 {
1311 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1312 	bool show_details = false;
1313 	struct ib_device *device;
1314 	struct sk_buff *msg;
1315 	u32 index;
1316 	int ret;
1317 
1318 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1319 				     nldev_policy, extack);
1320 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1321 		return -EINVAL;
1322 
1323 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1324 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1325 	if (!device)
1326 		return -EINVAL;
1327 
1328 	if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS])
1329 		show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]);
1330 
1331 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1332 	if (!msg) {
1333 		ret = -ENOMEM;
1334 		goto err;
1335 	}
1336 
1337 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1338 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1339 			0, 0);
1340 	if (!nlh) {
1341 		ret = -EMSGSIZE;
1342 		goto err_free;
1343 	}
1344 
1345 	ret = fill_res_info(msg, device, show_details);
1346 	if (ret)
1347 		goto err_free;
1348 
1349 	nlmsg_end(msg, nlh);
1350 	ib_device_put(device);
1351 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1352 
1353 err_free:
1354 	nlmsg_free(msg);
1355 err:
1356 	ib_device_put(device);
1357 	return ret;
1358 }
1359 
1360 static int _nldev_res_get_dumpit(struct ib_device *device,
1361 				 struct sk_buff *skb,
1362 				 struct netlink_callback *cb,
1363 				 unsigned int idx)
1364 {
1365 	int start = cb->args[0];
1366 	struct nlmsghdr *nlh;
1367 
1368 	if (idx < start)
1369 		return 0;
1370 
1371 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1372 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1373 			0, NLM_F_MULTI);
1374 
1375 	if (!nlh || fill_res_info(skb, device, false)) {
1376 		nlmsg_cancel(skb, nlh);
1377 		goto out;
1378 	}
1379 	nlmsg_end(skb, nlh);
1380 
1381 	idx++;
1382 
1383 out:
1384 	cb->args[0] = idx;
1385 	return skb->len;
1386 }
1387 
1388 static int nldev_res_get_dumpit(struct sk_buff *skb,
1389 				struct netlink_callback *cb)
1390 {
1391 	return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
1392 }
1393 
1394 struct nldev_fill_res_entry {
1395 	enum rdma_nldev_attr nldev_attr;
1396 	u8 flags;
1397 	u32 entry;
1398 	u32 id;
1399 };
1400 
1401 enum nldev_res_flags {
1402 	NLDEV_PER_DEV = 1 << 0,
1403 };
1404 
1405 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
1406 	[RDMA_RESTRACK_QP] = {
1407 		.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
1408 		.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
1409 		.id = RDMA_NLDEV_ATTR_RES_LQPN,
1410 	},
1411 	[RDMA_RESTRACK_CM_ID] = {
1412 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
1413 		.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
1414 		.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
1415 	},
1416 	[RDMA_RESTRACK_CQ] = {
1417 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
1418 		.flags = NLDEV_PER_DEV,
1419 		.entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
1420 		.id = RDMA_NLDEV_ATTR_RES_CQN,
1421 	},
1422 	[RDMA_RESTRACK_MR] = {
1423 		.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
1424 		.flags = NLDEV_PER_DEV,
1425 		.entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
1426 		.id = RDMA_NLDEV_ATTR_RES_MRN,
1427 	},
1428 	[RDMA_RESTRACK_PD] = {
1429 		.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
1430 		.flags = NLDEV_PER_DEV,
1431 		.entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
1432 		.id = RDMA_NLDEV_ATTR_RES_PDN,
1433 	},
1434 	[RDMA_RESTRACK_COUNTER] = {
1435 		.nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
1436 		.entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
1437 		.id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
1438 	},
1439 	[RDMA_RESTRACK_CTX] = {
1440 		.nldev_attr = RDMA_NLDEV_ATTR_RES_CTX,
1441 		.flags = NLDEV_PER_DEV,
1442 		.entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY,
1443 		.id = RDMA_NLDEV_ATTR_RES_CTXN,
1444 	},
1445 	[RDMA_RESTRACK_SRQ] = {
1446 		.nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ,
1447 		.flags = NLDEV_PER_DEV,
1448 		.entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY,
1449 		.id = RDMA_NLDEV_ATTR_RES_SRQN,
1450 	},
1451 
1452 };
1453 
1454 static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1455 			       struct netlink_ext_ack *extack,
1456 			       enum rdma_restrack_type res_type,
1457 			       res_fill_func_t fill_func)
1458 {
1459 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1460 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1461 	struct rdma_restrack_entry *res;
1462 	struct ib_device *device;
1463 	u32 index, id, port = 0;
1464 	bool has_cap_net_admin;
1465 	struct sk_buff *msg;
1466 	int ret;
1467 
1468 	ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1469 				     nldev_policy, extack);
1470 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
1471 		return -EINVAL;
1472 
1473 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1474 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1475 	if (!device)
1476 		return -EINVAL;
1477 
1478 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1479 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1480 		if (!rdma_is_port_valid(device, port)) {
1481 			ret = -EINVAL;
1482 			goto err;
1483 		}
1484 	}
1485 
1486 	if ((port && fe->flags & NLDEV_PER_DEV) ||
1487 	    (!port && ~fe->flags & NLDEV_PER_DEV)) {
1488 		ret = -EINVAL;
1489 		goto err;
1490 	}
1491 
1492 	id = nla_get_u32(tb[fe->id]);
1493 	res = rdma_restrack_get_byid(device, res_type, id);
1494 	if (IS_ERR(res)) {
1495 		ret = PTR_ERR(res);
1496 		goto err;
1497 	}
1498 
1499 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1500 	if (!msg) {
1501 		ret = -ENOMEM;
1502 		goto err_get;
1503 	}
1504 
1505 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1506 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1507 					 RDMA_NL_GET_OP(nlh->nlmsg_type)),
1508 			0, 0);
1509 
1510 	if (!nlh || fill_nldev_handle(msg, device)) {
1511 		ret = -EMSGSIZE;
1512 		goto err_free;
1513 	}
1514 
1515 	has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1516 
1517 	ret = fill_func(msg, has_cap_net_admin, res, port);
1518 	if (ret)
1519 		goto err_free;
1520 
1521 	rdma_restrack_put(res);
1522 	nlmsg_end(msg, nlh);
1523 	ib_device_put(device);
1524 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1525 
1526 err_free:
1527 	nlmsg_free(msg);
1528 err_get:
1529 	rdma_restrack_put(res);
1530 err:
1531 	ib_device_put(device);
1532 	return ret;
1533 }
1534 
1535 static int res_get_common_dumpit(struct sk_buff *skb,
1536 				 struct netlink_callback *cb,
1537 				 enum rdma_restrack_type res_type,
1538 				 res_fill_func_t fill_func)
1539 {
1540 	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1541 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1542 	struct rdma_restrack_entry *res;
1543 	struct rdma_restrack_root *rt;
1544 	int err, ret = 0, idx = 0;
1545 	bool show_details = false;
1546 	struct nlattr *table_attr;
1547 	struct nlattr *entry_attr;
1548 	struct ib_device *device;
1549 	int start = cb->args[0];
1550 	bool has_cap_net_admin;
1551 	struct nlmsghdr *nlh;
1552 	unsigned long id;
1553 	u32 index, port = 0;
1554 	bool filled = false;
1555 
1556 	err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1557 				     nldev_policy, NULL);
1558 	/*
1559 	 * Right now, we are expecting the device index to get res information,
1560 	 * but it is possible to extend this code to return all devices in
1561 	 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
1562 	 * if it doesn't exist, we will iterate over all devices.
1563 	 *
1564 	 * But it is not needed for now.
1565 	 */
1566 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1567 		return -EINVAL;
1568 
1569 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1570 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1571 	if (!device)
1572 		return -EINVAL;
1573 
1574 	if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS])
1575 		show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]);
1576 
1577 	/*
1578 	 * If no PORT_INDEX is supplied, we will return all QPs from that device
1579 	 */
1580 	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1581 		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1582 		if (!rdma_is_port_valid(device, port)) {
1583 			ret = -EINVAL;
1584 			goto err_index;
1585 		}
1586 	}
1587 
1588 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1589 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1590 					 RDMA_NL_GET_OP(cb->nlh->nlmsg_type)),
1591 			0, NLM_F_MULTI);
1592 
1593 	if (!nlh || fill_nldev_handle(skb, device)) {
1594 		ret = -EMSGSIZE;
1595 		goto err;
1596 	}
1597 
1598 	table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
1599 	if (!table_attr) {
1600 		ret = -EMSGSIZE;
1601 		goto err;
1602 	}
1603 
1604 	has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1605 
1606 	rt = &device->res[res_type];
1607 	xa_lock(&rt->xa);
1608 	/*
1609 	 * FIXME: if the skip ahead is something common this loop should
1610 	 * use xas_for_each & xas_pause to optimize, we can have a lot of
1611 	 * objects.
1612 	 */
1613 	xa_for_each(&rt->xa, id, res) {
1614 		if (xa_get_mark(&rt->xa, res->id, RESTRACK_DD) && !show_details)
1615 			goto next;
1616 
1617 		if (idx < start || !rdma_restrack_get(res))
1618 			goto next;
1619 
1620 		xa_unlock(&rt->xa);
1621 
1622 		filled = true;
1623 
1624 		entry_attr = nla_nest_start_noflag(skb, fe->entry);
1625 		if (!entry_attr) {
1626 			ret = -EMSGSIZE;
1627 			rdma_restrack_put(res);
1628 			goto msg_full;
1629 		}
1630 
1631 		ret = fill_func(skb, has_cap_net_admin, res, port);
1632 
1633 		rdma_restrack_put(res);
1634 
1635 		if (ret) {
1636 			nla_nest_cancel(skb, entry_attr);
1637 			if (ret == -EMSGSIZE)
1638 				goto msg_full;
1639 			if (ret == -EAGAIN)
1640 				goto again;
1641 			goto res_err;
1642 		}
1643 		nla_nest_end(skb, entry_attr);
1644 again:		xa_lock(&rt->xa);
1645 next:		idx++;
1646 	}
1647 	xa_unlock(&rt->xa);
1648 
1649 msg_full:
1650 	nla_nest_end(skb, table_attr);
1651 	nlmsg_end(skb, nlh);
1652 	cb->args[0] = idx;
1653 
1654 	/*
1655 	 * No more entries to fill, cancel the message and
1656 	 * return 0 to mark end of dumpit.
1657 	 */
1658 	if (!filled)
1659 		goto err;
1660 
1661 	ib_device_put(device);
1662 	return skb->len;
1663 
1664 res_err:
1665 	nla_nest_cancel(skb, table_attr);
1666 
1667 err:
1668 	nlmsg_cancel(skb, nlh);
1669 
1670 err_index:
1671 	ib_device_put(device);
1672 	return ret;
1673 }
1674 
1675 #define RES_GET_FUNCS(name, type)                                              \
1676 	static int nldev_res_get_##name##_dumpit(struct sk_buff *skb,          \
1677 						 struct netlink_callback *cb)  \
1678 	{                                                                      \
1679 		return res_get_common_dumpit(skb, cb, type,                    \
1680 					     fill_res_##name##_entry);         \
1681 	}                                                                      \
1682 	static int nldev_res_get_##name##_doit(struct sk_buff *skb,            \
1683 					       struct nlmsghdr *nlh,           \
1684 					       struct netlink_ext_ack *extack) \
1685 	{                                                                      \
1686 		return res_get_common_doit(skb, nlh, extack, type,             \
1687 					   fill_res_##name##_entry);           \
1688 	}
1689 
1690 RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
1691 RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP);
1692 RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1693 RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
1694 RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ);
1695 RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1696 RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
1697 RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
1698 RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
1699 RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX);
1700 RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ);
1701 RES_GET_FUNCS(srq_raw, RDMA_RESTRACK_SRQ);
1702 
1703 static LIST_HEAD(link_ops);
1704 static DECLARE_RWSEM(link_ops_rwsem);
1705 
1706 static const struct rdma_link_ops *link_ops_get(const char *type)
1707 {
1708 	const struct rdma_link_ops *ops;
1709 
1710 	list_for_each_entry(ops, &link_ops, list) {
1711 		if (!strcmp(ops->type, type))
1712 			goto out;
1713 	}
1714 	ops = NULL;
1715 out:
1716 	return ops;
1717 }
1718 
1719 void rdma_link_register(struct rdma_link_ops *ops)
1720 {
1721 	down_write(&link_ops_rwsem);
1722 	if (WARN_ON_ONCE(link_ops_get(ops->type)))
1723 		goto out;
1724 	list_add(&ops->list, &link_ops);
1725 out:
1726 	up_write(&link_ops_rwsem);
1727 }
1728 EXPORT_SYMBOL(rdma_link_register);
1729 
1730 void rdma_link_unregister(struct rdma_link_ops *ops)
1731 {
1732 	down_write(&link_ops_rwsem);
1733 	list_del(&ops->list);
1734 	up_write(&link_ops_rwsem);
1735 }
1736 EXPORT_SYMBOL(rdma_link_unregister);
1737 
1738 static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
1739 			  struct netlink_ext_ack *extack)
1740 {
1741 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1742 	char ibdev_name[IB_DEVICE_NAME_MAX];
1743 	const struct rdma_link_ops *ops;
1744 	char ndev_name[IFNAMSIZ];
1745 	struct net_device *ndev;
1746 	char type[IFNAMSIZ];
1747 	int err;
1748 
1749 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1750 				     nldev_policy, extack);
1751 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
1752 	    !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
1753 		return -EINVAL;
1754 
1755 	nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1756 		    sizeof(ibdev_name));
1757 	if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
1758 		return -EINVAL;
1759 
1760 	nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
1761 	nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
1762 		    sizeof(ndev_name));
1763 
1764 	ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
1765 	if (!ndev)
1766 		return -ENODEV;
1767 
1768 	down_read(&link_ops_rwsem);
1769 	ops = link_ops_get(type);
1770 #ifdef CONFIG_MODULES
1771 	if (!ops) {
1772 		up_read(&link_ops_rwsem);
1773 		request_module("rdma-link-%s", type);
1774 		down_read(&link_ops_rwsem);
1775 		ops = link_ops_get(type);
1776 	}
1777 #endif
1778 	err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
1779 	up_read(&link_ops_rwsem);
1780 	dev_put(ndev);
1781 
1782 	return err;
1783 }
1784 
1785 static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
1786 			  struct netlink_ext_ack *extack)
1787 {
1788 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1789 	struct ib_device *device;
1790 	u32 index;
1791 	int err;
1792 
1793 	err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1794 				     nldev_policy, extack);
1795 	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1796 		return -EINVAL;
1797 
1798 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1799 	device = ib_device_get_by_index(sock_net(skb->sk), index);
1800 	if (!device)
1801 		return -EINVAL;
1802 
1803 	if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) {
1804 		ib_device_put(device);
1805 		return -EINVAL;
1806 	}
1807 
1808 	ib_unregister_device_and_put(device);
1809 	return 0;
1810 }
1811 
1812 static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
1813 			     struct netlink_ext_ack *extack)
1814 {
1815 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1816 	char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
1817 	struct ib_client_nl_info data = {};
1818 	struct ib_device *ibdev = NULL;
1819 	struct sk_buff *msg;
1820 	u32 index;
1821 	int err;
1822 
1823 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
1824 			  extack);
1825 	if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
1826 		return -EINVAL;
1827 
1828 	nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
1829 		    sizeof(client_name));
1830 
1831 	if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
1832 		index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1833 		ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
1834 		if (!ibdev)
1835 			return -EINVAL;
1836 
1837 		if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1838 			data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1839 			if (!rdma_is_port_valid(ibdev, data.port)) {
1840 				err = -EINVAL;
1841 				goto out_put;
1842 			}
1843 		} else {
1844 			data.port = -1;
1845 		}
1846 	} else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1847 		return -EINVAL;
1848 	}
1849 
1850 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1851 	if (!msg) {
1852 		err = -ENOMEM;
1853 		goto out_put;
1854 	}
1855 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1856 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1857 					 RDMA_NLDEV_CMD_GET_CHARDEV),
1858 			0, 0);
1859 	if (!nlh) {
1860 		err = -EMSGSIZE;
1861 		goto out_nlmsg;
1862 	}
1863 
1864 	data.nl_msg = msg;
1865 	err = ib_get_client_nl_info(ibdev, client_name, &data);
1866 	if (err)
1867 		goto out_nlmsg;
1868 
1869 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
1870 				huge_encode_dev(data.cdev->devt),
1871 				RDMA_NLDEV_ATTR_PAD);
1872 	if (err)
1873 		goto out_data;
1874 	err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
1875 				RDMA_NLDEV_ATTR_PAD);
1876 	if (err)
1877 		goto out_data;
1878 	if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
1879 			   dev_name(data.cdev))) {
1880 		err = -EMSGSIZE;
1881 		goto out_data;
1882 	}
1883 
1884 	nlmsg_end(msg, nlh);
1885 	put_device(data.cdev);
1886 	if (ibdev)
1887 		ib_device_put(ibdev);
1888 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1889 
1890 out_data:
1891 	put_device(data.cdev);
1892 out_nlmsg:
1893 	nlmsg_free(msg);
1894 out_put:
1895 	if (ibdev)
1896 		ib_device_put(ibdev);
1897 	return err;
1898 }
1899 
1900 static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1901 			      struct netlink_ext_ack *extack)
1902 {
1903 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1904 	struct sk_buff *msg;
1905 	int err;
1906 
1907 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1908 			  nldev_policy, extack);
1909 	if (err)
1910 		return err;
1911 
1912 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1913 	if (!msg)
1914 		return -ENOMEM;
1915 
1916 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1917 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1918 					 RDMA_NLDEV_CMD_SYS_GET),
1919 			0, 0);
1920 	if (!nlh) {
1921 		nlmsg_free(msg);
1922 		return -EMSGSIZE;
1923 	}
1924 
1925 	err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1926 			 (u8)ib_devices_shared_netns);
1927 	if (err) {
1928 		nlmsg_free(msg);
1929 		return err;
1930 	}
1931 
1932 	err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE,
1933 			 (u8)privileged_qkey);
1934 	if (err) {
1935 		nlmsg_free(msg);
1936 		return err;
1937 	}
1938 	/*
1939 	 * Copy-on-fork is supported.
1940 	 * See commits:
1941 	 * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes")
1942 	 * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm")
1943 	 * for more details. Don't backport this without them.
1944 	 *
1945 	 * Return value ignored on purpose, assume copy-on-fork is not
1946 	 * supported in case of failure.
1947 	 */
1948 	nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1);
1949 
1950 	nlmsg_end(msg, nlh);
1951 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1952 }
1953 
1954 static int nldev_set_sys_set_netns_doit(struct nlattr *tb[])
1955 {
1956 	u8 enable;
1957 	int err;
1958 
1959 	enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
1960 	/* Only 0 and 1 are supported */
1961 	if (enable > 1)
1962 		return -EINVAL;
1963 
1964 	err = rdma_compatdev_set(enable);
1965 	return err;
1966 }
1967 
1968 static int nldev_set_sys_set_pqkey_doit(struct nlattr *tb[])
1969 {
1970 	u8 enable;
1971 
1972 	enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE]);
1973 	/* Only 0 and 1 are supported */
1974 	if (enable > 1)
1975 		return -EINVAL;
1976 
1977 	privileged_qkey = enable;
1978 	return 0;
1979 }
1980 
1981 static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1982 				  struct netlink_ext_ack *extack)
1983 {
1984 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1985 	int err;
1986 
1987 	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1988 			  nldev_policy, extack);
1989 	if (err)
1990 		return -EINVAL;
1991 
1992 	if (tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
1993 		return nldev_set_sys_set_netns_doit(tb);
1994 
1995 	if (tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE])
1996 		return nldev_set_sys_set_pqkey_doit(tb);
1997 
1998 	return -EINVAL;
1999 }
2000 
2001 
2002 static int nldev_stat_set_mode_doit(struct sk_buff *msg,
2003 				    struct netlink_ext_ack *extack,
2004 				    struct nlattr *tb[],
2005 				    struct ib_device *device, u32 port)
2006 {
2007 	u32 mode, mask = 0, qpn, cntn = 0;
2008 	int ret;
2009 
2010 	/* Currently only counter for QP is supported */
2011 	if (!tb[RDMA_NLDEV_ATTR_STAT_RES] ||
2012 	    nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
2013 		return -EINVAL;
2014 
2015 	mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
2016 	if (mode == RDMA_COUNTER_MODE_AUTO) {
2017 		if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
2018 			mask = nla_get_u32(
2019 				tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
2020 		return rdma_counter_set_auto_mode(device, port, mask, extack);
2021 	}
2022 
2023 	if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
2024 		return -EINVAL;
2025 
2026 	qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
2027 	if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
2028 		cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
2029 		ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
2030 		if (ret)
2031 			return ret;
2032 	} else {
2033 		ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn);
2034 		if (ret)
2035 			return ret;
2036 	}
2037 
2038 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
2039 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
2040 		ret = -EMSGSIZE;
2041 		goto err_fill;
2042 	}
2043 
2044 	return 0;
2045 
2046 err_fill:
2047 	rdma_counter_unbind_qpn(device, port, qpn, cntn);
2048 	return ret;
2049 }
2050 
2051 static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[],
2052 					       struct ib_device *device,
2053 					       u32 port)
2054 {
2055 	struct rdma_hw_stats *stats;
2056 	struct nlattr *entry_attr;
2057 	unsigned long *target;
2058 	int rem, i, ret = 0;
2059 	u32 index;
2060 
2061 	stats = ib_get_hw_stats_port(device, port);
2062 	if (!stats)
2063 		return -EINVAL;
2064 
2065 	target = kcalloc(BITS_TO_LONGS(stats->num_counters),
2066 			 sizeof(*stats->is_disabled), GFP_KERNEL);
2067 	if (!target)
2068 		return -ENOMEM;
2069 
2070 	nla_for_each_nested(entry_attr, tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS],
2071 			    rem) {
2072 		index = nla_get_u32(entry_attr);
2073 		if ((index >= stats->num_counters) ||
2074 		    !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) {
2075 			ret = -EINVAL;
2076 			goto out;
2077 		}
2078 
2079 		set_bit(index, target);
2080 	}
2081 
2082 	for (i = 0; i < stats->num_counters; i++) {
2083 		if (!(stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL))
2084 			continue;
2085 
2086 		ret = rdma_counter_modify(device, port, i, test_bit(i, target));
2087 		if (ret)
2088 			goto out;
2089 	}
2090 
2091 out:
2092 	kfree(target);
2093 	return ret;
2094 }
2095 
2096 static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2097 			       struct netlink_ext_ack *extack)
2098 {
2099 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2100 	struct ib_device *device;
2101 	struct sk_buff *msg;
2102 	u32 index, port;
2103 	int ret;
2104 
2105 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
2106 			  extack);
2107 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
2108 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
2109 		return -EINVAL;
2110 
2111 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2112 	device = ib_device_get_by_index(sock_net(skb->sk), index);
2113 	if (!device)
2114 		return -EINVAL;
2115 
2116 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2117 	if (!rdma_is_port_valid(device, port)) {
2118 		ret = -EINVAL;
2119 		goto err_put_device;
2120 	}
2121 
2122 	if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] &&
2123 	    !tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) {
2124 		ret = -EINVAL;
2125 		goto err_put_device;
2126 	}
2127 
2128 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2129 	if (!msg) {
2130 		ret = -ENOMEM;
2131 		goto err_put_device;
2132 	}
2133 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2134 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2135 					 RDMA_NLDEV_CMD_STAT_SET),
2136 			0, 0);
2137 	if (!nlh || fill_nldev_handle(msg, device) ||
2138 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
2139 		ret = -EMSGSIZE;
2140 		goto err_free_msg;
2141 	}
2142 
2143 	if (tb[RDMA_NLDEV_ATTR_STAT_MODE]) {
2144 		ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port);
2145 		if (ret)
2146 			goto err_free_msg;
2147 	}
2148 
2149 	if (tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) {
2150 		ret = nldev_stat_set_counter_dynamic_doit(tb, device, port);
2151 		if (ret)
2152 			goto err_free_msg;
2153 	}
2154 
2155 	nlmsg_end(msg, nlh);
2156 	ib_device_put(device);
2157 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2158 
2159 err_free_msg:
2160 	nlmsg_free(msg);
2161 err_put_device:
2162 	ib_device_put(device);
2163 	return ret;
2164 }
2165 
2166 static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2167 			       struct netlink_ext_ack *extack)
2168 {
2169 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2170 	struct ib_device *device;
2171 	struct sk_buff *msg;
2172 	u32 index, port, qpn, cntn;
2173 	int ret;
2174 
2175 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2176 			  nldev_policy, extack);
2177 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
2178 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
2179 	    !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
2180 	    !tb[RDMA_NLDEV_ATTR_RES_LQPN])
2181 		return -EINVAL;
2182 
2183 	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
2184 		return -EINVAL;
2185 
2186 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2187 	device = ib_device_get_by_index(sock_net(skb->sk), index);
2188 	if (!device)
2189 		return -EINVAL;
2190 
2191 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2192 	if (!rdma_is_port_valid(device, port)) {
2193 		ret = -EINVAL;
2194 		goto err;
2195 	}
2196 
2197 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2198 	if (!msg) {
2199 		ret = -ENOMEM;
2200 		goto err;
2201 	}
2202 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2203 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2204 					 RDMA_NLDEV_CMD_STAT_SET),
2205 			0, 0);
2206 	if (!nlh) {
2207 		ret = -EMSGSIZE;
2208 		goto err_fill;
2209 	}
2210 
2211 	cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
2212 	qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
2213 	if (fill_nldev_handle(msg, device) ||
2214 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
2215 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
2216 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
2217 		ret = -EMSGSIZE;
2218 		goto err_fill;
2219 	}
2220 
2221 	ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
2222 	if (ret)
2223 		goto err_fill;
2224 
2225 	nlmsg_end(msg, nlh);
2226 	ib_device_put(device);
2227 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2228 
2229 err_fill:
2230 	nlmsg_free(msg);
2231 err:
2232 	ib_device_put(device);
2233 	return ret;
2234 }
2235 
2236 static int stat_get_doit_default_counter(struct sk_buff *skb,
2237 					 struct nlmsghdr *nlh,
2238 					 struct netlink_ext_ack *extack,
2239 					 struct nlattr *tb[])
2240 {
2241 	struct rdma_hw_stats *stats;
2242 	struct nlattr *table_attr;
2243 	struct ib_device *device;
2244 	int ret, num_cnts, i;
2245 	struct sk_buff *msg;
2246 	u32 index, port;
2247 	u64 v;
2248 
2249 	if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
2250 		return -EINVAL;
2251 
2252 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2253 	device = ib_device_get_by_index(sock_net(skb->sk), index);
2254 	if (!device)
2255 		return -EINVAL;
2256 
2257 	if (!device->ops.alloc_hw_port_stats || !device->ops.get_hw_stats) {
2258 		ret = -EINVAL;
2259 		goto err;
2260 	}
2261 
2262 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2263 	stats = ib_get_hw_stats_port(device, port);
2264 	if (!stats) {
2265 		ret = -EINVAL;
2266 		goto err;
2267 	}
2268 
2269 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2270 	if (!msg) {
2271 		ret = -ENOMEM;
2272 		goto err;
2273 	}
2274 
2275 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2276 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2277 					 RDMA_NLDEV_CMD_STAT_GET),
2278 			0, 0);
2279 
2280 	if (!nlh || fill_nldev_handle(msg, device) ||
2281 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
2282 		ret = -EMSGSIZE;
2283 		goto err_msg;
2284 	}
2285 
2286 	mutex_lock(&stats->lock);
2287 
2288 	num_cnts = device->ops.get_hw_stats(device, stats, port, 0);
2289 	if (num_cnts < 0) {
2290 		ret = -EINVAL;
2291 		goto err_stats;
2292 	}
2293 
2294 	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
2295 	if (!table_attr) {
2296 		ret = -EMSGSIZE;
2297 		goto err_stats;
2298 	}
2299 	for (i = 0; i < num_cnts; i++) {
2300 		if (test_bit(i, stats->is_disabled))
2301 			continue;
2302 
2303 		v = stats->value[i] +
2304 			rdma_counter_get_hwstat_value(device, port, i);
2305 		if (rdma_nl_stat_hwcounter_entry(msg,
2306 						 stats->descs[i].name, v)) {
2307 			ret = -EMSGSIZE;
2308 			goto err_table;
2309 		}
2310 	}
2311 	nla_nest_end(msg, table_attr);
2312 
2313 	mutex_unlock(&stats->lock);
2314 	nlmsg_end(msg, nlh);
2315 	ib_device_put(device);
2316 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2317 
2318 err_table:
2319 	nla_nest_cancel(msg, table_attr);
2320 err_stats:
2321 	mutex_unlock(&stats->lock);
2322 err_msg:
2323 	nlmsg_free(msg);
2324 err:
2325 	ib_device_put(device);
2326 	return ret;
2327 }
2328 
2329 static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
2330 			    struct netlink_ext_ack *extack, struct nlattr *tb[])
2331 
2332 {
2333 	static enum rdma_nl_counter_mode mode;
2334 	static enum rdma_nl_counter_mask mask;
2335 	struct ib_device *device;
2336 	struct sk_buff *msg;
2337 	u32 index, port;
2338 	int ret;
2339 
2340 	if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
2341 		return nldev_res_get_counter_doit(skb, nlh, extack);
2342 
2343 	if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] ||
2344 	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
2345 		return -EINVAL;
2346 
2347 	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2348 	device = ib_device_get_by_index(sock_net(skb->sk), index);
2349 	if (!device)
2350 		return -EINVAL;
2351 
2352 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2353 	if (!rdma_is_port_valid(device, port)) {
2354 		ret = -EINVAL;
2355 		goto err;
2356 	}
2357 
2358 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2359 	if (!msg) {
2360 		ret = -ENOMEM;
2361 		goto err;
2362 	}
2363 
2364 	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2365 			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
2366 					 RDMA_NLDEV_CMD_STAT_GET),
2367 			0, 0);
2368 	if (!nlh) {
2369 		ret = -EMSGSIZE;
2370 		goto err_msg;
2371 	}
2372 
2373 	ret = rdma_counter_get_mode(device, port, &mode, &mask);
2374 	if (ret)
2375 		goto err_msg;
2376 
2377 	if (fill_nldev_handle(msg, device) ||
2378 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
2379 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
2380 		ret = -EMSGSIZE;
2381 		goto err_msg;
2382 	}
2383 
2384 	if ((mode == RDMA_COUNTER_MODE_AUTO) &&
2385 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
2386 		ret = -EMSGSIZE;
2387 		goto err_msg;
2388 	}
2389 
2390 	nlmsg_end(msg, nlh);
2391 	ib_device_put(device);
2392 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2393 
2394 err_msg:
2395 	nlmsg_free(msg);
2396 err:
2397 	ib_device_put(device);
2398 	return ret;
2399 }
2400 
2401 static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
2402 			       struct netlink_ext_ack *extack)
2403 {
2404 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2405 	int ret;
2406 
2407 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2408 			  nldev_policy, extack);
2409 	if (ret)
2410 		return -EINVAL;
2411 
2412 	if (!tb[RDMA_NLDEV_ATTR_STAT_RES])
2413 		return stat_get_doit_default_counter(skb, nlh, extack, tb);
2414 
2415 	switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2416 	case RDMA_NLDEV_ATTR_RES_QP:
2417 		ret = stat_get_doit_qp(skb, nlh, extack, tb);
2418 		break;
2419 	case RDMA_NLDEV_ATTR_RES_MR:
2420 		ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR,
2421 					  fill_stat_mr_entry);
2422 		break;
2423 	default:
2424 		ret = -EINVAL;
2425 		break;
2426 	}
2427 
2428 	return ret;
2429 }
2430 
2431 static int nldev_stat_get_dumpit(struct sk_buff *skb,
2432 				 struct netlink_callback *cb)
2433 {
2434 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2435 	int ret;
2436 
2437 	ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2438 			  nldev_policy, NULL);
2439 	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
2440 		return -EINVAL;
2441 
2442 	switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2443 	case RDMA_NLDEV_ATTR_RES_QP:
2444 		ret = nldev_res_get_counter_dumpit(skb, cb);
2445 		break;
2446 	case RDMA_NLDEV_ATTR_RES_MR:
2447 		ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
2448 					    fill_stat_mr_entry);
2449 		break;
2450 	default:
2451 		ret = -EINVAL;
2452 		break;
2453 	}
2454 
2455 	return ret;
2456 }
2457 
2458 static int nldev_stat_get_counter_status_doit(struct sk_buff *skb,
2459 					      struct nlmsghdr *nlh,
2460 					      struct netlink_ext_ack *extack)
2461 {
2462 	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry;
2463 	struct rdma_hw_stats *stats;
2464 	struct ib_device *device;
2465 	struct sk_buff *msg;
2466 	u32 devid, port;
2467 	int ret, i;
2468 
2469 	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2470 			  nldev_policy, extack);
2471 	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
2472 	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
2473 		return -EINVAL;
2474 
2475 	devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
2476 	device = ib_device_get_by_index(sock_net(skb->sk), devid);
2477 	if (!device)
2478 		return -EINVAL;
2479 
2480 	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
2481 	if (!rdma_is_port_valid(device, port)) {
2482 		ret = -EINVAL;
2483 		goto err;
2484 	}
2485 
2486 	stats = ib_get_hw_stats_port(device, port);
2487 	if (!stats) {
2488 		ret = -EINVAL;
2489 		goto err;
2490 	}
2491 
2492 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2493 	if (!msg) {
2494 		ret = -ENOMEM;
2495 		goto err;
2496 	}
2497 
2498 	nlh = nlmsg_put(
2499 		msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
2500 		RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET_STATUS),
2501 		0, 0);
2502 
2503 	ret = -EMSGSIZE;
2504 	if (!nlh || fill_nldev_handle(msg, device) ||
2505 	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
2506 		goto err_msg;
2507 
2508 	table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
2509 	if (!table)
2510 		goto err_msg;
2511 
2512 	mutex_lock(&stats->lock);
2513 	for (i = 0; i < stats->num_counters; i++) {
2514 		entry = nla_nest_start(msg,
2515 				       RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
2516 		if (!entry)
2517 			goto err_msg_table;
2518 
2519 		if (nla_put_string(msg,
2520 				   RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
2521 				   stats->descs[i].name) ||
2522 		    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i))
2523 			goto err_msg_entry;
2524 
2525 		if ((stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) &&
2526 		    (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC,
2527 				!test_bit(i, stats->is_disabled))))
2528 			goto err_msg_entry;
2529 
2530 		nla_nest_end(msg, entry);
2531 	}
2532 	mutex_unlock(&stats->lock);
2533 
2534 	nla_nest_end(msg, table);
2535 	nlmsg_end(msg, nlh);
2536 	ib_device_put(device);
2537 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
2538 
2539 err_msg_entry:
2540 	nla_nest_cancel(msg, entry);
2541 err_msg_table:
2542 	mutex_unlock(&stats->lock);
2543 	nla_nest_cancel(msg, table);
2544 err_msg:
2545 	nlmsg_free(msg);
2546 err:
2547 	ib_device_put(device);
2548 	return ret;
2549 }
2550 
2551 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
2552 	[RDMA_NLDEV_CMD_GET] = {
2553 		.doit = nldev_get_doit,
2554 		.dump = nldev_get_dumpit,
2555 	},
2556 	[RDMA_NLDEV_CMD_GET_CHARDEV] = {
2557 		.doit = nldev_get_chardev,
2558 	},
2559 	[RDMA_NLDEV_CMD_SET] = {
2560 		.doit = nldev_set_doit,
2561 		.flags = RDMA_NL_ADMIN_PERM,
2562 	},
2563 	[RDMA_NLDEV_CMD_NEWLINK] = {
2564 		.doit = nldev_newlink,
2565 		.flags = RDMA_NL_ADMIN_PERM,
2566 	},
2567 	[RDMA_NLDEV_CMD_DELLINK] = {
2568 		.doit = nldev_dellink,
2569 		.flags = RDMA_NL_ADMIN_PERM,
2570 	},
2571 	[RDMA_NLDEV_CMD_PORT_GET] = {
2572 		.doit = nldev_port_get_doit,
2573 		.dump = nldev_port_get_dumpit,
2574 	},
2575 	[RDMA_NLDEV_CMD_RES_GET] = {
2576 		.doit = nldev_res_get_doit,
2577 		.dump = nldev_res_get_dumpit,
2578 	},
2579 	[RDMA_NLDEV_CMD_RES_QP_GET] = {
2580 		.doit = nldev_res_get_qp_doit,
2581 		.dump = nldev_res_get_qp_dumpit,
2582 	},
2583 	[RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
2584 		.doit = nldev_res_get_cm_id_doit,
2585 		.dump = nldev_res_get_cm_id_dumpit,
2586 	},
2587 	[RDMA_NLDEV_CMD_RES_CQ_GET] = {
2588 		.doit = nldev_res_get_cq_doit,
2589 		.dump = nldev_res_get_cq_dumpit,
2590 	},
2591 	[RDMA_NLDEV_CMD_RES_MR_GET] = {
2592 		.doit = nldev_res_get_mr_doit,
2593 		.dump = nldev_res_get_mr_dumpit,
2594 	},
2595 	[RDMA_NLDEV_CMD_RES_PD_GET] = {
2596 		.doit = nldev_res_get_pd_doit,
2597 		.dump = nldev_res_get_pd_dumpit,
2598 	},
2599 	[RDMA_NLDEV_CMD_RES_CTX_GET] = {
2600 		.doit = nldev_res_get_ctx_doit,
2601 		.dump = nldev_res_get_ctx_dumpit,
2602 	},
2603 	[RDMA_NLDEV_CMD_RES_SRQ_GET] = {
2604 		.doit = nldev_res_get_srq_doit,
2605 		.dump = nldev_res_get_srq_dumpit,
2606 	},
2607 	[RDMA_NLDEV_CMD_SYS_GET] = {
2608 		.doit = nldev_sys_get_doit,
2609 	},
2610 	[RDMA_NLDEV_CMD_SYS_SET] = {
2611 		.doit = nldev_set_sys_set_doit,
2612 		.flags = RDMA_NL_ADMIN_PERM,
2613 	},
2614 	[RDMA_NLDEV_CMD_STAT_SET] = {
2615 		.doit = nldev_stat_set_doit,
2616 		.flags = RDMA_NL_ADMIN_PERM,
2617 	},
2618 	[RDMA_NLDEV_CMD_STAT_GET] = {
2619 		.doit = nldev_stat_get_doit,
2620 		.dump = nldev_stat_get_dumpit,
2621 	},
2622 	[RDMA_NLDEV_CMD_STAT_DEL] = {
2623 		.doit = nldev_stat_del_doit,
2624 		.flags = RDMA_NL_ADMIN_PERM,
2625 	},
2626 	[RDMA_NLDEV_CMD_RES_QP_GET_RAW] = {
2627 		.doit = nldev_res_get_qp_raw_doit,
2628 		.dump = nldev_res_get_qp_raw_dumpit,
2629 		.flags = RDMA_NL_ADMIN_PERM,
2630 	},
2631 	[RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = {
2632 		.doit = nldev_res_get_cq_raw_doit,
2633 		.dump = nldev_res_get_cq_raw_dumpit,
2634 		.flags = RDMA_NL_ADMIN_PERM,
2635 	},
2636 	[RDMA_NLDEV_CMD_RES_MR_GET_RAW] = {
2637 		.doit = nldev_res_get_mr_raw_doit,
2638 		.dump = nldev_res_get_mr_raw_dumpit,
2639 		.flags = RDMA_NL_ADMIN_PERM,
2640 	},
2641 	[RDMA_NLDEV_CMD_RES_SRQ_GET_RAW] = {
2642 		.doit = nldev_res_get_srq_raw_doit,
2643 		.dump = nldev_res_get_srq_raw_dumpit,
2644 		.flags = RDMA_NL_ADMIN_PERM,
2645 	},
2646 	[RDMA_NLDEV_CMD_STAT_GET_STATUS] = {
2647 		.doit = nldev_stat_get_counter_status_doit,
2648 	},
2649 };
2650 
2651 void __init nldev_init(void)
2652 {
2653 	rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
2654 }
2655 
2656 void nldev_exit(void)
2657 {
2658 	rdma_nl_unregister(RDMA_NL_NLDEV);
2659 }
2660 
2661 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
2662