1 /* 2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Neither the names of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * Alternatively, this software may be distributed under the terms of the 17 * GNU General Public License ("GPL") version 2 as published by the Free 18 * Software Foundation. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/pid.h> 35 #include <linux/pid_namespace.h> 36 #include <linux/mutex.h> 37 #include <net/netlink.h> 38 #include <rdma/rdma_cm.h> 39 #include <rdma/rdma_netlink.h> 40 41 #include "core_priv.h" 42 #include "cma_priv.h" 43 #include "restrack.h" 44 #include "uverbs.h" 45 46 /* 47 * This determines whether a non-privileged user is allowed to specify a 48 * controlled QKEY or not, when true non-privileged user is allowed to specify 49 * a controlled QKEY. 50 */ 51 static bool privileged_qkey; 52 53 typedef int (*res_fill_func_t)(struct sk_buff*, bool, 54 struct rdma_restrack_entry*, uint32_t); 55 56 /* 57 * Sort array elements by the netlink attribute name 58 */ 59 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { 60 [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 }, 61 [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 }, 62 [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING, 63 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 64 [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING, 65 .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE }, 66 [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 }, 67 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 }, 68 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, 69 .len = IB_DEVICE_NAME_MAX }, 70 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 }, 71 [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING, 72 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 73 [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED }, 74 [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED }, 75 [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 }, 76 [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING, 77 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 78 [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 }, 79 [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 }, 80 [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 }, 81 [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 }, 82 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING, 83 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 84 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 }, 85 [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING, 86 .len = IFNAMSIZ }, 87 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 }, 88 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 }, 89 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING, 90 .len = IFNAMSIZ }, 91 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 }, 92 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 }, 93 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 }, 94 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 }, 95 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED }, 96 [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 }, 97 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED }, 98 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED }, 99 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 }, 100 [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 }, 101 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED }, 102 [RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED }, 103 [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 }, 104 [RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED }, 105 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = { 106 .len = sizeof(struct __kernel_sockaddr_storage) }, 107 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 }, 108 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING, 109 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 110 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 }, 111 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 }, 112 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 }, 113 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED }, 114 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 }, 115 [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 }, 116 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED }, 117 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 }, 118 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED }, 119 [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 }, 120 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED }, 121 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 }, 122 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 }, 123 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 }, 124 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED }, 125 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED }, 126 [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY }, 127 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 }, 128 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 }, 129 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 }, 130 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 }, 131 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = { 132 .len = sizeof(struct __kernel_sockaddr_storage) }, 133 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 }, 134 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED }, 135 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED }, 136 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 }, 137 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING, 138 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 139 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 }, 140 [RDMA_NLDEV_ATTR_RES_SUBTYPE] = { .type = NLA_NUL_STRING, 141 .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, 142 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 }, 143 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 }, 144 [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED }, 145 [RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 }, 146 [RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED }, 147 [RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 }, 148 [RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 }, 149 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 }, 150 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 }, 151 [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 }, 152 [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 }, 153 [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 }, 154 [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED }, 155 [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED }, 156 [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 }, 157 [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED }, 158 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED }, 159 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING }, 160 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 }, 161 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 }, 162 [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 }, 163 [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 }, 164 [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 }, 165 [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 }, 166 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 }, 167 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 }, 168 [RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE] = { .type = NLA_U8 }, 169 [RDMA_NLDEV_ATTR_DRIVER_DETAILS] = { .type = NLA_U8 }, 170 [RDMA_NLDEV_ATTR_DEV_TYPE] = { .type = NLA_U8 }, 171 [RDMA_NLDEV_ATTR_PARENT_NAME] = { .type = NLA_NUL_STRING }, 172 [RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE] = { .type = NLA_U8 }, 173 [RDMA_NLDEV_ATTR_EVENT_TYPE] = { .type = NLA_U8 }, 174 }; 175 176 static int put_driver_name_print_type(struct sk_buff *msg, const char *name, 177 enum rdma_nldev_print_type print_type) 178 { 179 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) 180 return -EMSGSIZE; 181 if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC && 182 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) 183 return -EMSGSIZE; 184 185 return 0; 186 } 187 188 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, 189 enum rdma_nldev_print_type print_type, 190 u32 value) 191 { 192 if (put_driver_name_print_type(msg, name, print_type)) 193 return -EMSGSIZE; 194 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) 195 return -EMSGSIZE; 196 197 return 0; 198 } 199 200 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, 201 enum rdma_nldev_print_type print_type, 202 u64 value) 203 { 204 if (put_driver_name_print_type(msg, name, print_type)) 205 return -EMSGSIZE; 206 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, 207 RDMA_NLDEV_ATTR_PAD)) 208 return -EMSGSIZE; 209 210 return 0; 211 } 212 213 int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name, 214 const char *str) 215 { 216 if (put_driver_name_print_type(msg, name, 217 RDMA_NLDEV_PRINT_TYPE_UNSPEC)) 218 return -EMSGSIZE; 219 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str)) 220 return -EMSGSIZE; 221 222 return 0; 223 } 224 EXPORT_SYMBOL(rdma_nl_put_driver_string); 225 226 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) 227 { 228 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 229 value); 230 } 231 EXPORT_SYMBOL(rdma_nl_put_driver_u32); 232 233 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, 234 u32 value) 235 { 236 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 237 value); 238 } 239 EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex); 240 241 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) 242 { 243 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 244 value); 245 } 246 EXPORT_SYMBOL(rdma_nl_put_driver_u64); 247 248 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) 249 { 250 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 251 value); 252 } 253 EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex); 254 255 bool rdma_nl_get_privileged_qkey(void) 256 { 257 return privileged_qkey || capable(CAP_NET_RAW); 258 } 259 EXPORT_SYMBOL(rdma_nl_get_privileged_qkey); 260 261 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) 262 { 263 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) 264 return -EMSGSIZE; 265 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, 266 dev_name(&device->dev))) 267 return -EMSGSIZE; 268 269 return 0; 270 } 271 272 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) 273 { 274 char fw[IB_FW_VERSION_NAME_MAX]; 275 int ret = 0; 276 u32 port; 277 278 if (fill_nldev_handle(msg, device)) 279 return -EMSGSIZE; 280 281 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) 282 return -EMSGSIZE; 283 284 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64)); 285 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 286 device->attrs.device_cap_flags, 287 RDMA_NLDEV_ATTR_PAD)) 288 return -EMSGSIZE; 289 290 ib_get_device_fw_str(device, fw); 291 /* Device without FW has strlen(fw) = 0 */ 292 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) 293 return -EMSGSIZE; 294 295 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, 296 be64_to_cpu(device->node_guid), 297 RDMA_NLDEV_ATTR_PAD)) 298 return -EMSGSIZE; 299 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, 300 be64_to_cpu(device->attrs.sys_image_guid), 301 RDMA_NLDEV_ATTR_PAD)) 302 return -EMSGSIZE; 303 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) 304 return -EMSGSIZE; 305 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim)) 306 return -EMSGSIZE; 307 308 if (device->type && 309 nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_TYPE, device->type)) 310 return -EMSGSIZE; 311 312 if (device->parent && 313 nla_put_string(msg, RDMA_NLDEV_ATTR_PARENT_NAME, 314 dev_name(&device->parent->dev))) 315 return -EMSGSIZE; 316 317 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE, 318 device->name_assign_type)) 319 return -EMSGSIZE; 320 321 /* 322 * Link type is determined on first port and mlx4 device 323 * which can potentially have two different link type for the same 324 * IB device is considered as better to be avoided in the future, 325 */ 326 port = rdma_start_port(device); 327 if (rdma_cap_opa_mad(device, port)) 328 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); 329 else if (rdma_protocol_ib(device, port)) 330 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); 331 else if (rdma_protocol_iwarp(device, port)) 332 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); 333 else if (rdma_protocol_roce(device, port)) 334 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); 335 else if (rdma_protocol_usnic(device, port)) 336 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, 337 "usnic"); 338 return ret; 339 } 340 341 static int fill_port_info(struct sk_buff *msg, 342 struct ib_device *device, u32 port, 343 const struct net *net) 344 { 345 struct net_device *netdev = NULL; 346 struct ib_port_attr attr; 347 int ret; 348 u64 cap_flags = 0; 349 350 if (fill_nldev_handle(msg, device)) 351 return -EMSGSIZE; 352 353 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) 354 return -EMSGSIZE; 355 356 ret = ib_query_port(device, port, &attr); 357 if (ret) 358 return ret; 359 360 if (rdma_protocol_ib(device, port)) { 361 BUILD_BUG_ON((sizeof(attr.port_cap_flags) + 362 sizeof(attr.port_cap_flags2)) > sizeof(u64)); 363 cap_flags = attr.port_cap_flags | 364 ((u64)attr.port_cap_flags2 << 32); 365 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 366 cap_flags, RDMA_NLDEV_ATTR_PAD)) 367 return -EMSGSIZE; 368 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, 369 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD)) 370 return -EMSGSIZE; 371 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) 372 return -EMSGSIZE; 373 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) 374 return -EMSGSIZE; 375 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) 376 return -EMSGSIZE; 377 } 378 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) 379 return -EMSGSIZE; 380 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) 381 return -EMSGSIZE; 382 383 netdev = ib_device_get_netdev(device, port); 384 if (netdev && net_eq(dev_net(netdev), net)) { 385 ret = nla_put_u32(msg, 386 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); 387 if (ret) 388 goto out; 389 ret = nla_put_string(msg, 390 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); 391 } 392 393 out: 394 dev_put(netdev); 395 return ret; 396 } 397 398 static int fill_res_info_entry(struct sk_buff *msg, 399 const char *name, u64 curr) 400 { 401 struct nlattr *entry_attr; 402 403 entry_attr = nla_nest_start_noflag(msg, 404 RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); 405 if (!entry_attr) 406 return -EMSGSIZE; 407 408 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) 409 goto err; 410 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 411 RDMA_NLDEV_ATTR_PAD)) 412 goto err; 413 414 nla_nest_end(msg, entry_attr); 415 return 0; 416 417 err: 418 nla_nest_cancel(msg, entry_attr); 419 return -EMSGSIZE; 420 } 421 422 static int fill_res_info(struct sk_buff *msg, struct ib_device *device, 423 bool show_details) 424 { 425 static const char * const names[RDMA_RESTRACK_MAX] = { 426 [RDMA_RESTRACK_PD] = "pd", 427 [RDMA_RESTRACK_CQ] = "cq", 428 [RDMA_RESTRACK_QP] = "qp", 429 [RDMA_RESTRACK_CM_ID] = "cm_id", 430 [RDMA_RESTRACK_MR] = "mr", 431 [RDMA_RESTRACK_CTX] = "ctx", 432 [RDMA_RESTRACK_SRQ] = "srq", 433 }; 434 435 struct nlattr *table_attr; 436 int ret, i, curr; 437 438 if (fill_nldev_handle(msg, device)) 439 return -EMSGSIZE; 440 441 table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); 442 if (!table_attr) 443 return -EMSGSIZE; 444 445 for (i = 0; i < RDMA_RESTRACK_MAX; i++) { 446 if (!names[i]) 447 continue; 448 curr = rdma_restrack_count(device, i, show_details); 449 ret = fill_res_info_entry(msg, names[i], curr); 450 if (ret) 451 goto err; 452 } 453 454 nla_nest_end(msg, table_attr); 455 return 0; 456 457 err: 458 nla_nest_cancel(msg, table_attr); 459 return ret; 460 } 461 462 static int fill_res_name_pid(struct sk_buff *msg, 463 struct rdma_restrack_entry *res) 464 { 465 int err = 0; 466 467 /* 468 * For user resources, user is should read /proc/PID/comm to get the 469 * name of the task file. 470 */ 471 if (rdma_is_kernel_res(res)) { 472 err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, 473 res->kern_name); 474 } else { 475 pid_t pid; 476 477 pid = task_pid_vnr(res->task); 478 /* 479 * Task is dead and in zombie state. 480 * There is no need to print PID anymore. 481 */ 482 if (pid) 483 /* 484 * This part is racy, task can be killed and PID will 485 * be zero right here but it is ok, next query won't 486 * return PID. We don't promise real-time reflection 487 * of SW objects. 488 */ 489 err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid); 490 } 491 492 return err ? -EMSGSIZE : 0; 493 } 494 495 static int fill_res_qp_entry_query(struct sk_buff *msg, 496 struct rdma_restrack_entry *res, 497 struct ib_device *dev, 498 struct ib_qp *qp) 499 { 500 struct ib_qp_init_attr qp_init_attr; 501 struct ib_qp_attr qp_attr; 502 int ret; 503 504 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr); 505 if (ret) 506 return ret; 507 508 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) { 509 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, 510 qp_attr.dest_qp_num)) 511 goto err; 512 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, 513 qp_attr.rq_psn)) 514 goto err; 515 } 516 517 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) 518 goto err; 519 520 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC || 521 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) { 522 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, 523 qp_attr.path_mig_state)) 524 goto err; 525 } 526 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) 527 goto err; 528 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) 529 goto err; 530 531 if (dev->ops.fill_res_qp_entry) 532 return dev->ops.fill_res_qp_entry(msg, qp); 533 return 0; 534 535 err: return -EMSGSIZE; 536 } 537 538 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, 539 struct rdma_restrack_entry *res, uint32_t port) 540 { 541 struct ib_qp *qp = container_of(res, struct ib_qp, res); 542 struct ib_device *dev = qp->device; 543 int ret; 544 545 if (port && port != qp->port) 546 return -EAGAIN; 547 548 /* In create_qp() port is not set yet */ 549 if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) 550 return -EMSGSIZE; 551 552 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); 553 if (ret) 554 return -EMSGSIZE; 555 556 if (!rdma_is_kernel_res(res) && 557 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) 558 return -EMSGSIZE; 559 560 ret = fill_res_name_pid(msg, res); 561 if (ret) 562 return -EMSGSIZE; 563 564 return fill_res_qp_entry_query(msg, res, dev, qp); 565 } 566 567 static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, 568 struct rdma_restrack_entry *res, uint32_t port) 569 { 570 struct ib_qp *qp = container_of(res, struct ib_qp, res); 571 struct ib_device *dev = qp->device; 572 573 if (port && port != qp->port) 574 return -EAGAIN; 575 if (!dev->ops.fill_res_qp_entry_raw) 576 return -EINVAL; 577 return dev->ops.fill_res_qp_entry_raw(msg, qp); 578 } 579 580 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, 581 struct rdma_restrack_entry *res, uint32_t port) 582 { 583 struct rdma_id_private *id_priv = 584 container_of(res, struct rdma_id_private, res); 585 struct ib_device *dev = id_priv->id.device; 586 struct rdma_cm_id *cm_id = &id_priv->id; 587 588 if (port && port != cm_id->port_num) 589 return -EAGAIN; 590 591 if (cm_id->port_num && 592 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) 593 goto err; 594 595 if (id_priv->qp_num) { 596 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) 597 goto err; 598 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) 599 goto err; 600 } 601 602 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) 603 goto err; 604 605 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) 606 goto err; 607 608 if (cm_id->route.addr.src_addr.ss_family && 609 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, 610 sizeof(cm_id->route.addr.src_addr), 611 &cm_id->route.addr.src_addr)) 612 goto err; 613 if (cm_id->route.addr.dst_addr.ss_family && 614 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, 615 sizeof(cm_id->route.addr.dst_addr), 616 &cm_id->route.addr.dst_addr)) 617 goto err; 618 619 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id)) 620 goto err; 621 622 if (fill_res_name_pid(msg, res)) 623 goto err; 624 625 if (dev->ops.fill_res_cm_id_entry) 626 return dev->ops.fill_res_cm_id_entry(msg, cm_id); 627 return 0; 628 629 err: return -EMSGSIZE; 630 } 631 632 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, 633 struct rdma_restrack_entry *res, uint32_t port) 634 { 635 struct ib_cq *cq = container_of(res, struct ib_cq, res); 636 struct ib_device *dev = cq->device; 637 638 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) 639 return -EMSGSIZE; 640 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 641 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) 642 return -EMSGSIZE; 643 644 /* Poll context is only valid for kernel CQs */ 645 if (rdma_is_kernel_res(res) && 646 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) 647 return -EMSGSIZE; 648 649 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) 650 return -EMSGSIZE; 651 652 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) 653 return -EMSGSIZE; 654 if (!rdma_is_kernel_res(res) && 655 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, 656 cq->uobject->uevent.uobject.context->res.id)) 657 return -EMSGSIZE; 658 659 if (fill_res_name_pid(msg, res)) 660 return -EMSGSIZE; 661 662 return (dev->ops.fill_res_cq_entry) ? 663 dev->ops.fill_res_cq_entry(msg, cq) : 0; 664 } 665 666 static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, 667 struct rdma_restrack_entry *res, uint32_t port) 668 { 669 struct ib_cq *cq = container_of(res, struct ib_cq, res); 670 struct ib_device *dev = cq->device; 671 672 if (!dev->ops.fill_res_cq_entry_raw) 673 return -EINVAL; 674 return dev->ops.fill_res_cq_entry_raw(msg, cq); 675 } 676 677 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, 678 struct rdma_restrack_entry *res, uint32_t port) 679 { 680 struct ib_mr *mr = container_of(res, struct ib_mr, res); 681 struct ib_device *dev = mr->pd->device; 682 683 if (has_cap_net_admin) { 684 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) 685 return -EMSGSIZE; 686 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) 687 return -EMSGSIZE; 688 } 689 690 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, 691 RDMA_NLDEV_ATTR_PAD)) 692 return -EMSGSIZE; 693 694 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) 695 return -EMSGSIZE; 696 697 if (!rdma_is_kernel_res(res) && 698 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) 699 return -EMSGSIZE; 700 701 if (fill_res_name_pid(msg, res)) 702 return -EMSGSIZE; 703 704 return (dev->ops.fill_res_mr_entry) ? 705 dev->ops.fill_res_mr_entry(msg, mr) : 706 0; 707 } 708 709 static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, 710 struct rdma_restrack_entry *res, uint32_t port) 711 { 712 struct ib_mr *mr = container_of(res, struct ib_mr, res); 713 struct ib_device *dev = mr->pd->device; 714 715 if (!dev->ops.fill_res_mr_entry_raw) 716 return -EINVAL; 717 return dev->ops.fill_res_mr_entry_raw(msg, mr); 718 } 719 720 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, 721 struct rdma_restrack_entry *res, uint32_t port) 722 { 723 struct ib_pd *pd = container_of(res, struct ib_pd, res); 724 725 if (has_cap_net_admin) { 726 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, 727 pd->local_dma_lkey)) 728 goto err; 729 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && 730 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, 731 pd->unsafe_global_rkey)) 732 goto err; 733 } 734 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 735 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) 736 goto err; 737 738 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) 739 goto err; 740 741 if (!rdma_is_kernel_res(res) && 742 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, 743 pd->uobject->context->res.id)) 744 goto err; 745 746 return fill_res_name_pid(msg, res); 747 748 err: return -EMSGSIZE; 749 } 750 751 static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin, 752 struct rdma_restrack_entry *res, uint32_t port) 753 { 754 struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res); 755 756 if (rdma_is_kernel_res(res)) 757 return 0; 758 759 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id)) 760 return -EMSGSIZE; 761 762 return fill_res_name_pid(msg, res); 763 } 764 765 static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range, 766 uint32_t max_range) 767 { 768 struct nlattr *entry_attr; 769 770 if (!min_range) 771 return 0; 772 773 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); 774 if (!entry_attr) 775 return -EMSGSIZE; 776 777 if (min_range == max_range) { 778 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range)) 779 goto err; 780 } else { 781 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range)) 782 goto err; 783 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range)) 784 goto err; 785 } 786 nla_nest_end(msg, entry_attr); 787 return 0; 788 789 err: 790 nla_nest_cancel(msg, entry_attr); 791 return -EMSGSIZE; 792 } 793 794 static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq) 795 { 796 uint32_t min_range = 0, prev = 0; 797 struct rdma_restrack_entry *res; 798 struct rdma_restrack_root *rt; 799 struct nlattr *table_attr; 800 struct ib_qp *qp = NULL; 801 unsigned long id = 0; 802 803 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); 804 if (!table_attr) 805 return -EMSGSIZE; 806 807 rt = &srq->device->res[RDMA_RESTRACK_QP]; 808 xa_lock(&rt->xa); 809 xa_for_each(&rt->xa, id, res) { 810 if (!rdma_restrack_get(res)) 811 continue; 812 813 qp = container_of(res, struct ib_qp, res); 814 if (!qp->srq || (qp->srq->res.id != srq->res.id)) { 815 rdma_restrack_put(res); 816 continue; 817 } 818 819 if (qp->qp_num < prev) 820 /* qp_num should be ascending */ 821 goto err_loop; 822 823 if (min_range == 0) { 824 min_range = qp->qp_num; 825 } else if (qp->qp_num > (prev + 1)) { 826 if (fill_res_range_qp_entry(msg, min_range, prev)) 827 goto err_loop; 828 829 min_range = qp->qp_num; 830 } 831 prev = qp->qp_num; 832 rdma_restrack_put(res); 833 } 834 835 xa_unlock(&rt->xa); 836 837 if (fill_res_range_qp_entry(msg, min_range, prev)) 838 goto err; 839 840 nla_nest_end(msg, table_attr); 841 return 0; 842 843 err_loop: 844 rdma_restrack_put(res); 845 xa_unlock(&rt->xa); 846 err: 847 nla_nest_cancel(msg, table_attr); 848 return -EMSGSIZE; 849 } 850 851 static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin, 852 struct rdma_restrack_entry *res, uint32_t port) 853 { 854 struct ib_srq *srq = container_of(res, struct ib_srq, res); 855 struct ib_device *dev = srq->device; 856 857 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id)) 858 goto err; 859 860 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type)) 861 goto err; 862 863 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id)) 864 goto err; 865 866 if (ib_srq_has_cq(srq->srq_type)) { 867 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, 868 srq->ext.cq->res.id)) 869 goto err; 870 } 871 872 if (fill_res_srq_qps(msg, srq)) 873 goto err; 874 875 if (fill_res_name_pid(msg, res)) 876 goto err; 877 878 if (dev->ops.fill_res_srq_entry) 879 return dev->ops.fill_res_srq_entry(msg, srq); 880 881 return 0; 882 883 err: 884 return -EMSGSIZE; 885 } 886 887 static int fill_res_srq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, 888 struct rdma_restrack_entry *res, uint32_t port) 889 { 890 struct ib_srq *srq = container_of(res, struct ib_srq, res); 891 struct ib_device *dev = srq->device; 892 893 if (!dev->ops.fill_res_srq_entry_raw) 894 return -EINVAL; 895 return dev->ops.fill_res_srq_entry_raw(msg, srq); 896 } 897 898 static int fill_stat_counter_mode(struct sk_buff *msg, 899 struct rdma_counter *counter) 900 { 901 struct rdma_counter_mode *m = &counter->mode; 902 903 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode)) 904 return -EMSGSIZE; 905 906 if (m->mode == RDMA_COUNTER_MODE_AUTO) { 907 if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) && 908 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type)) 909 return -EMSGSIZE; 910 911 if ((m->mask & RDMA_COUNTER_MASK_PID) && 912 fill_res_name_pid(msg, &counter->res)) 913 return -EMSGSIZE; 914 } 915 916 return 0; 917 } 918 919 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn) 920 { 921 struct nlattr *entry_attr; 922 923 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); 924 if (!entry_attr) 925 return -EMSGSIZE; 926 927 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) 928 goto err; 929 930 nla_nest_end(msg, entry_attr); 931 return 0; 932 933 err: 934 nla_nest_cancel(msg, entry_attr); 935 return -EMSGSIZE; 936 } 937 938 static int fill_stat_counter_qps(struct sk_buff *msg, 939 struct rdma_counter *counter) 940 { 941 struct rdma_restrack_entry *res; 942 struct rdma_restrack_root *rt; 943 struct nlattr *table_attr; 944 struct ib_qp *qp = NULL; 945 unsigned long id = 0; 946 int ret = 0; 947 948 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); 949 if (!table_attr) 950 return -EMSGSIZE; 951 952 rt = &counter->device->res[RDMA_RESTRACK_QP]; 953 xa_lock(&rt->xa); 954 xa_for_each(&rt->xa, id, res) { 955 qp = container_of(res, struct ib_qp, res); 956 if (!qp->counter || (qp->counter->id != counter->id)) 957 continue; 958 959 ret = fill_stat_counter_qp_entry(msg, qp->qp_num); 960 if (ret) 961 goto err; 962 } 963 964 xa_unlock(&rt->xa); 965 nla_nest_end(msg, table_attr); 966 return 0; 967 968 err: 969 xa_unlock(&rt->xa); 970 nla_nest_cancel(msg, table_attr); 971 return ret; 972 } 973 974 int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name, 975 u64 value) 976 { 977 struct nlattr *entry_attr; 978 979 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); 980 if (!entry_attr) 981 return -EMSGSIZE; 982 983 if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, 984 name)) 985 goto err; 986 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE, 987 value, RDMA_NLDEV_ATTR_PAD)) 988 goto err; 989 990 nla_nest_end(msg, entry_attr); 991 return 0; 992 993 err: 994 nla_nest_cancel(msg, entry_attr); 995 return -EMSGSIZE; 996 } 997 EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry); 998 999 static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, 1000 struct rdma_restrack_entry *res, uint32_t port) 1001 { 1002 struct ib_mr *mr = container_of(res, struct ib_mr, res); 1003 struct ib_device *dev = mr->pd->device; 1004 1005 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) 1006 goto err; 1007 1008 if (dev->ops.fill_stat_mr_entry) 1009 return dev->ops.fill_stat_mr_entry(msg, mr); 1010 return 0; 1011 1012 err: 1013 return -EMSGSIZE; 1014 } 1015 1016 static int fill_stat_counter_hwcounters(struct sk_buff *msg, 1017 struct rdma_counter *counter) 1018 { 1019 struct rdma_hw_stats *st = counter->stats; 1020 struct nlattr *table_attr; 1021 int i; 1022 1023 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); 1024 if (!table_attr) 1025 return -EMSGSIZE; 1026 1027 mutex_lock(&st->lock); 1028 for (i = 0; i < st->num_counters; i++) { 1029 if (test_bit(i, st->is_disabled)) 1030 continue; 1031 if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name, 1032 st->value[i])) 1033 goto err; 1034 } 1035 mutex_unlock(&st->lock); 1036 1037 nla_nest_end(msg, table_attr); 1038 return 0; 1039 1040 err: 1041 mutex_unlock(&st->lock); 1042 nla_nest_cancel(msg, table_attr); 1043 return -EMSGSIZE; 1044 } 1045 1046 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, 1047 struct rdma_restrack_entry *res, 1048 uint32_t port) 1049 { 1050 struct rdma_counter *counter = 1051 container_of(res, struct rdma_counter, res); 1052 1053 if (port && port != counter->port) 1054 return -EAGAIN; 1055 1056 /* Dump it even query failed */ 1057 rdma_counter_query_stats(counter); 1058 1059 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) || 1060 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) || 1061 fill_stat_counter_mode(msg, counter) || 1062 fill_stat_counter_qps(msg, counter) || 1063 fill_stat_counter_hwcounters(msg, counter)) 1064 return -EMSGSIZE; 1065 1066 return 0; 1067 } 1068 1069 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1070 struct netlink_ext_ack *extack) 1071 { 1072 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1073 struct ib_device *device; 1074 struct sk_buff *msg; 1075 u32 index; 1076 int err; 1077 1078 err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1079 nldev_policy, NL_VALIDATE_LIBERAL, extack); 1080 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1081 return -EINVAL; 1082 1083 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1084 1085 device = ib_device_get_by_index(sock_net(skb->sk), index); 1086 if (!device) 1087 return -EINVAL; 1088 1089 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1090 if (!msg) { 1091 err = -ENOMEM; 1092 goto err; 1093 } 1094 1095 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1096 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 1097 0, 0); 1098 if (!nlh) { 1099 err = -EMSGSIZE; 1100 goto err_free; 1101 } 1102 1103 err = fill_dev_info(msg, device); 1104 if (err) 1105 goto err_free; 1106 1107 nlmsg_end(msg, nlh); 1108 1109 ib_device_put(device); 1110 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1111 1112 err_free: 1113 nlmsg_free(msg); 1114 err: 1115 ib_device_put(device); 1116 return err; 1117 } 1118 1119 static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1120 struct netlink_ext_ack *extack) 1121 { 1122 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1123 struct ib_device *device; 1124 u32 index; 1125 int err; 1126 1127 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1128 nldev_policy, extack); 1129 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1130 return -EINVAL; 1131 1132 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1133 device = ib_device_get_by_index(sock_net(skb->sk), index); 1134 if (!device) 1135 return -EINVAL; 1136 1137 if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) { 1138 char name[IB_DEVICE_NAME_MAX] = {}; 1139 1140 nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], 1141 IB_DEVICE_NAME_MAX); 1142 if (strlen(name) == 0) { 1143 err = -EINVAL; 1144 goto done; 1145 } 1146 err = ib_device_rename(device, name); 1147 goto done; 1148 } 1149 1150 if (tb[RDMA_NLDEV_NET_NS_FD]) { 1151 u32 ns_fd; 1152 1153 ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]); 1154 err = ib_device_set_netns_put(skb, device, ns_fd); 1155 goto put_done; 1156 } 1157 1158 if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) { 1159 u8 use_dim; 1160 1161 use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]); 1162 err = ib_device_set_dim(device, use_dim); 1163 goto done; 1164 } 1165 1166 done: 1167 ib_device_put(device); 1168 put_done: 1169 return err; 1170 } 1171 1172 static int _nldev_get_dumpit(struct ib_device *device, 1173 struct sk_buff *skb, 1174 struct netlink_callback *cb, 1175 unsigned int idx) 1176 { 1177 int start = cb->args[0]; 1178 struct nlmsghdr *nlh; 1179 1180 if (idx < start) 1181 return 0; 1182 1183 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1184 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 1185 0, NLM_F_MULTI); 1186 1187 if (!nlh || fill_dev_info(skb, device)) { 1188 nlmsg_cancel(skb, nlh); 1189 goto out; 1190 } 1191 1192 nlmsg_end(skb, nlh); 1193 1194 idx++; 1195 1196 out: cb->args[0] = idx; 1197 return skb->len; 1198 } 1199 1200 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 1201 { 1202 /* 1203 * There is no need to take lock, because 1204 * we are relying on ib_core's locking. 1205 */ 1206 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb); 1207 } 1208 1209 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1210 struct netlink_ext_ack *extack) 1211 { 1212 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1213 struct ib_device *device; 1214 struct sk_buff *msg; 1215 u32 index; 1216 u32 port; 1217 int err; 1218 1219 err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1220 nldev_policy, NL_VALIDATE_LIBERAL, extack); 1221 if (err || 1222 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 1223 !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 1224 return -EINVAL; 1225 1226 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1227 device = ib_device_get_by_index(sock_net(skb->sk), index); 1228 if (!device) 1229 return -EINVAL; 1230 1231 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1232 if (!rdma_is_port_valid(device, port)) { 1233 err = -EINVAL; 1234 goto err; 1235 } 1236 1237 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1238 if (!msg) { 1239 err = -ENOMEM; 1240 goto err; 1241 } 1242 1243 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1244 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 1245 0, 0); 1246 if (!nlh) { 1247 err = -EMSGSIZE; 1248 goto err_free; 1249 } 1250 1251 err = fill_port_info(msg, device, port, sock_net(skb->sk)); 1252 if (err) 1253 goto err_free; 1254 1255 nlmsg_end(msg, nlh); 1256 ib_device_put(device); 1257 1258 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1259 1260 err_free: 1261 nlmsg_free(msg); 1262 err: 1263 ib_device_put(device); 1264 return err; 1265 } 1266 1267 static int nldev_port_get_dumpit(struct sk_buff *skb, 1268 struct netlink_callback *cb) 1269 { 1270 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1271 struct ib_device *device; 1272 int start = cb->args[0]; 1273 struct nlmsghdr *nlh; 1274 u32 idx = 0; 1275 u32 ifindex; 1276 int err; 1277 unsigned int p; 1278 1279 err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1280 nldev_policy, NL_VALIDATE_LIBERAL, NULL); 1281 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1282 return -EINVAL; 1283 1284 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1285 device = ib_device_get_by_index(sock_net(skb->sk), ifindex); 1286 if (!device) 1287 return -EINVAL; 1288 1289 rdma_for_each_port (device, p) { 1290 /* 1291 * The dumpit function returns all information from specific 1292 * index. This specific index is taken from the netlink 1293 * messages request sent by user and it is available 1294 * in cb->args[0]. 1295 * 1296 * Usually, the user doesn't fill this field and it causes 1297 * to return everything. 1298 * 1299 */ 1300 if (idx < start) { 1301 idx++; 1302 continue; 1303 } 1304 1305 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 1306 cb->nlh->nlmsg_seq, 1307 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1308 RDMA_NLDEV_CMD_PORT_GET), 1309 0, NLM_F_MULTI); 1310 1311 if (!nlh || fill_port_info(skb, device, p, sock_net(skb->sk))) { 1312 nlmsg_cancel(skb, nlh); 1313 goto out; 1314 } 1315 idx++; 1316 nlmsg_end(skb, nlh); 1317 } 1318 1319 out: 1320 ib_device_put(device); 1321 cb->args[0] = idx; 1322 return skb->len; 1323 } 1324 1325 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1326 struct netlink_ext_ack *extack) 1327 { 1328 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1329 bool show_details = false; 1330 struct ib_device *device; 1331 struct sk_buff *msg; 1332 u32 index; 1333 int ret; 1334 1335 ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1336 nldev_policy, NL_VALIDATE_LIBERAL, extack); 1337 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1338 return -EINVAL; 1339 1340 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1341 device = ib_device_get_by_index(sock_net(skb->sk), index); 1342 if (!device) 1343 return -EINVAL; 1344 1345 if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]) 1346 show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]); 1347 1348 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1349 if (!msg) { 1350 ret = -ENOMEM; 1351 goto err; 1352 } 1353 1354 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1355 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 1356 0, 0); 1357 if (!nlh) { 1358 ret = -EMSGSIZE; 1359 goto err_free; 1360 } 1361 1362 ret = fill_res_info(msg, device, show_details); 1363 if (ret) 1364 goto err_free; 1365 1366 nlmsg_end(msg, nlh); 1367 ib_device_put(device); 1368 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1369 1370 err_free: 1371 nlmsg_free(msg); 1372 err: 1373 ib_device_put(device); 1374 return ret; 1375 } 1376 1377 static int _nldev_res_get_dumpit(struct ib_device *device, 1378 struct sk_buff *skb, 1379 struct netlink_callback *cb, 1380 unsigned int idx) 1381 { 1382 int start = cb->args[0]; 1383 struct nlmsghdr *nlh; 1384 1385 if (idx < start) 1386 return 0; 1387 1388 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1389 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 1390 0, NLM_F_MULTI); 1391 1392 if (!nlh || fill_res_info(skb, device, false)) { 1393 nlmsg_cancel(skb, nlh); 1394 goto out; 1395 } 1396 nlmsg_end(skb, nlh); 1397 1398 idx++; 1399 1400 out: 1401 cb->args[0] = idx; 1402 return skb->len; 1403 } 1404 1405 static int nldev_res_get_dumpit(struct sk_buff *skb, 1406 struct netlink_callback *cb) 1407 { 1408 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb); 1409 } 1410 1411 struct nldev_fill_res_entry { 1412 enum rdma_nldev_attr nldev_attr; 1413 u8 flags; 1414 u32 entry; 1415 u32 id; 1416 }; 1417 1418 enum nldev_res_flags { 1419 NLDEV_PER_DEV = 1 << 0, 1420 }; 1421 1422 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { 1423 [RDMA_RESTRACK_QP] = { 1424 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP, 1425 .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY, 1426 .id = RDMA_NLDEV_ATTR_RES_LQPN, 1427 }, 1428 [RDMA_RESTRACK_CM_ID] = { 1429 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID, 1430 .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, 1431 .id = RDMA_NLDEV_ATTR_RES_CM_IDN, 1432 }, 1433 [RDMA_RESTRACK_CQ] = { 1434 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ, 1435 .flags = NLDEV_PER_DEV, 1436 .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY, 1437 .id = RDMA_NLDEV_ATTR_RES_CQN, 1438 }, 1439 [RDMA_RESTRACK_MR] = { 1440 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR, 1441 .flags = NLDEV_PER_DEV, 1442 .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY, 1443 .id = RDMA_NLDEV_ATTR_RES_MRN, 1444 }, 1445 [RDMA_RESTRACK_PD] = { 1446 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD, 1447 .flags = NLDEV_PER_DEV, 1448 .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY, 1449 .id = RDMA_NLDEV_ATTR_RES_PDN, 1450 }, 1451 [RDMA_RESTRACK_COUNTER] = { 1452 .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER, 1453 .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY, 1454 .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID, 1455 }, 1456 [RDMA_RESTRACK_CTX] = { 1457 .nldev_attr = RDMA_NLDEV_ATTR_RES_CTX, 1458 .flags = NLDEV_PER_DEV, 1459 .entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY, 1460 .id = RDMA_NLDEV_ATTR_RES_CTXN, 1461 }, 1462 [RDMA_RESTRACK_SRQ] = { 1463 .nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ, 1464 .flags = NLDEV_PER_DEV, 1465 .entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY, 1466 .id = RDMA_NLDEV_ATTR_RES_SRQN, 1467 }, 1468 1469 }; 1470 1471 static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1472 struct netlink_ext_ack *extack, 1473 enum rdma_restrack_type res_type, 1474 res_fill_func_t fill_func) 1475 { 1476 const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; 1477 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1478 struct rdma_restrack_entry *res; 1479 struct ib_device *device; 1480 u32 index, id, port = 0; 1481 bool has_cap_net_admin; 1482 struct sk_buff *msg; 1483 int ret; 1484 1485 ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1486 nldev_policy, NL_VALIDATE_LIBERAL, extack); 1487 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id]) 1488 return -EINVAL; 1489 1490 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1491 device = ib_device_get_by_index(sock_net(skb->sk), index); 1492 if (!device) 1493 return -EINVAL; 1494 1495 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1496 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1497 if (!rdma_is_port_valid(device, port)) { 1498 ret = -EINVAL; 1499 goto err; 1500 } 1501 } 1502 1503 if ((port && fe->flags & NLDEV_PER_DEV) || 1504 (!port && ~fe->flags & NLDEV_PER_DEV)) { 1505 ret = -EINVAL; 1506 goto err; 1507 } 1508 1509 id = nla_get_u32(tb[fe->id]); 1510 res = rdma_restrack_get_byid(device, res_type, id); 1511 if (IS_ERR(res)) { 1512 ret = PTR_ERR(res); 1513 goto err; 1514 } 1515 1516 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1517 if (!msg) { 1518 ret = -ENOMEM; 1519 goto err_get; 1520 } 1521 1522 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1523 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1524 RDMA_NL_GET_OP(nlh->nlmsg_type)), 1525 0, 0); 1526 1527 if (!nlh || fill_nldev_handle(msg, device)) { 1528 ret = -EMSGSIZE; 1529 goto err_free; 1530 } 1531 1532 has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN); 1533 1534 ret = fill_func(msg, has_cap_net_admin, res, port); 1535 if (ret) 1536 goto err_free; 1537 1538 rdma_restrack_put(res); 1539 nlmsg_end(msg, nlh); 1540 ib_device_put(device); 1541 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1542 1543 err_free: 1544 nlmsg_free(msg); 1545 err_get: 1546 rdma_restrack_put(res); 1547 err: 1548 ib_device_put(device); 1549 return ret; 1550 } 1551 1552 static int res_get_common_dumpit(struct sk_buff *skb, 1553 struct netlink_callback *cb, 1554 enum rdma_restrack_type res_type, 1555 res_fill_func_t fill_func) 1556 { 1557 const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; 1558 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1559 struct rdma_restrack_entry *res; 1560 struct rdma_restrack_root *rt; 1561 int err, ret = 0, idx = 0; 1562 bool show_details = false; 1563 struct nlattr *table_attr; 1564 struct nlattr *entry_attr; 1565 struct ib_device *device; 1566 int start = cb->args[0]; 1567 bool has_cap_net_admin; 1568 struct nlmsghdr *nlh; 1569 unsigned long id; 1570 u32 index, port = 0; 1571 bool filled = false; 1572 1573 err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1574 nldev_policy, NL_VALIDATE_LIBERAL, NULL); 1575 /* 1576 * Right now, we are expecting the device index to get res information, 1577 * but it is possible to extend this code to return all devices in 1578 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX. 1579 * if it doesn't exist, we will iterate over all devices. 1580 * 1581 * But it is not needed for now. 1582 */ 1583 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1584 return -EINVAL; 1585 1586 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1587 device = ib_device_get_by_index(sock_net(skb->sk), index); 1588 if (!device) 1589 return -EINVAL; 1590 1591 if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]) 1592 show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]); 1593 1594 /* 1595 * If no PORT_INDEX is supplied, we will return all QPs from that device 1596 */ 1597 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1598 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1599 if (!rdma_is_port_valid(device, port)) { 1600 ret = -EINVAL; 1601 goto err_index; 1602 } 1603 } 1604 1605 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1606 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1607 RDMA_NL_GET_OP(cb->nlh->nlmsg_type)), 1608 0, NLM_F_MULTI); 1609 1610 if (!nlh || fill_nldev_handle(skb, device)) { 1611 ret = -EMSGSIZE; 1612 goto err; 1613 } 1614 1615 table_attr = nla_nest_start_noflag(skb, fe->nldev_attr); 1616 if (!table_attr) { 1617 ret = -EMSGSIZE; 1618 goto err; 1619 } 1620 1621 has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN); 1622 1623 rt = &device->res[res_type]; 1624 xa_lock(&rt->xa); 1625 /* 1626 * FIXME: if the skip ahead is something common this loop should 1627 * use xas_for_each & xas_pause to optimize, we can have a lot of 1628 * objects. 1629 */ 1630 xa_for_each(&rt->xa, id, res) { 1631 if (xa_get_mark(&rt->xa, res->id, RESTRACK_DD) && !show_details) 1632 goto next; 1633 1634 if (idx < start || !rdma_restrack_get(res)) 1635 goto next; 1636 1637 xa_unlock(&rt->xa); 1638 1639 filled = true; 1640 1641 entry_attr = nla_nest_start_noflag(skb, fe->entry); 1642 if (!entry_attr) { 1643 ret = -EMSGSIZE; 1644 rdma_restrack_put(res); 1645 goto msg_full; 1646 } 1647 1648 ret = fill_func(skb, has_cap_net_admin, res, port); 1649 1650 rdma_restrack_put(res); 1651 1652 if (ret) { 1653 nla_nest_cancel(skb, entry_attr); 1654 if (ret == -EMSGSIZE) 1655 goto msg_full; 1656 if (ret == -EAGAIN) 1657 goto again; 1658 goto res_err; 1659 } 1660 nla_nest_end(skb, entry_attr); 1661 again: xa_lock(&rt->xa); 1662 next: idx++; 1663 } 1664 xa_unlock(&rt->xa); 1665 1666 msg_full: 1667 nla_nest_end(skb, table_attr); 1668 nlmsg_end(skb, nlh); 1669 cb->args[0] = idx; 1670 1671 /* 1672 * No more entries to fill, cancel the message and 1673 * return 0 to mark end of dumpit. 1674 */ 1675 if (!filled) 1676 goto err; 1677 1678 ib_device_put(device); 1679 return skb->len; 1680 1681 res_err: 1682 nla_nest_cancel(skb, table_attr); 1683 1684 err: 1685 nlmsg_cancel(skb, nlh); 1686 1687 err_index: 1688 ib_device_put(device); 1689 return ret; 1690 } 1691 1692 #define RES_GET_FUNCS(name, type) \ 1693 static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \ 1694 struct netlink_callback *cb) \ 1695 { \ 1696 return res_get_common_dumpit(skb, cb, type, \ 1697 fill_res_##name##_entry); \ 1698 } \ 1699 static int nldev_res_get_##name##_doit(struct sk_buff *skb, \ 1700 struct nlmsghdr *nlh, \ 1701 struct netlink_ext_ack *extack) \ 1702 { \ 1703 return res_get_common_doit(skb, nlh, extack, type, \ 1704 fill_res_##name##_entry); \ 1705 } 1706 1707 RES_GET_FUNCS(qp, RDMA_RESTRACK_QP); 1708 RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP); 1709 RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID); 1710 RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ); 1711 RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ); 1712 RES_GET_FUNCS(pd, RDMA_RESTRACK_PD); 1713 RES_GET_FUNCS(mr, RDMA_RESTRACK_MR); 1714 RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR); 1715 RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER); 1716 RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX); 1717 RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ); 1718 RES_GET_FUNCS(srq_raw, RDMA_RESTRACK_SRQ); 1719 1720 static LIST_HEAD(link_ops); 1721 static DECLARE_RWSEM(link_ops_rwsem); 1722 1723 static const struct rdma_link_ops *link_ops_get(const char *type) 1724 { 1725 const struct rdma_link_ops *ops; 1726 1727 list_for_each_entry(ops, &link_ops, list) { 1728 if (!strcmp(ops->type, type)) 1729 goto out; 1730 } 1731 ops = NULL; 1732 out: 1733 return ops; 1734 } 1735 1736 void rdma_link_register(struct rdma_link_ops *ops) 1737 { 1738 down_write(&link_ops_rwsem); 1739 if (WARN_ON_ONCE(link_ops_get(ops->type))) 1740 goto out; 1741 list_add(&ops->list, &link_ops); 1742 out: 1743 up_write(&link_ops_rwsem); 1744 } 1745 EXPORT_SYMBOL(rdma_link_register); 1746 1747 void rdma_link_unregister(struct rdma_link_ops *ops) 1748 { 1749 down_write(&link_ops_rwsem); 1750 list_del(&ops->list); 1751 up_write(&link_ops_rwsem); 1752 } 1753 EXPORT_SYMBOL(rdma_link_unregister); 1754 1755 static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, 1756 struct netlink_ext_ack *extack) 1757 { 1758 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1759 char ibdev_name[IB_DEVICE_NAME_MAX]; 1760 const struct rdma_link_ops *ops; 1761 char ndev_name[IFNAMSIZ]; 1762 struct net_device *ndev; 1763 char type[IFNAMSIZ]; 1764 int err; 1765 1766 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1767 nldev_policy, extack); 1768 if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] || 1769 !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME]) 1770 return -EINVAL; 1771 1772 nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME], 1773 sizeof(ibdev_name)); 1774 if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0) 1775 return -EINVAL; 1776 1777 nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type)); 1778 nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME], 1779 sizeof(ndev_name)); 1780 1781 ndev = dev_get_by_name(sock_net(skb->sk), ndev_name); 1782 if (!ndev) 1783 return -ENODEV; 1784 1785 down_read(&link_ops_rwsem); 1786 ops = link_ops_get(type); 1787 #ifdef CONFIG_MODULES 1788 if (!ops) { 1789 up_read(&link_ops_rwsem); 1790 request_module("rdma-link-%s", type); 1791 down_read(&link_ops_rwsem); 1792 ops = link_ops_get(type); 1793 } 1794 #endif 1795 err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL; 1796 up_read(&link_ops_rwsem); 1797 dev_put(ndev); 1798 1799 return err; 1800 } 1801 1802 static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 1803 struct netlink_ext_ack *extack) 1804 { 1805 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1806 struct ib_device *device; 1807 u32 index; 1808 int err; 1809 1810 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1811 nldev_policy, extack); 1812 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 1813 return -EINVAL; 1814 1815 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1816 device = ib_device_get_by_index(sock_net(skb->sk), index); 1817 if (!device) 1818 return -EINVAL; 1819 1820 if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) { 1821 ib_device_put(device); 1822 return -EINVAL; 1823 } 1824 1825 ib_unregister_device_and_put(device); 1826 return 0; 1827 } 1828 1829 static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh, 1830 struct netlink_ext_ack *extack) 1831 { 1832 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1833 char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE]; 1834 struct ib_client_nl_info data = {}; 1835 struct ib_device *ibdev = NULL; 1836 struct sk_buff *msg; 1837 u32 index; 1838 int err; 1839 1840 err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, 1841 NL_VALIDATE_LIBERAL, extack); 1842 if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE]) 1843 return -EINVAL; 1844 1845 nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE], 1846 sizeof(client_name)); 1847 1848 if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) { 1849 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 1850 ibdev = ib_device_get_by_index(sock_net(skb->sk), index); 1851 if (!ibdev) 1852 return -EINVAL; 1853 1854 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1855 data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 1856 if (!rdma_is_port_valid(ibdev, data.port)) { 1857 err = -EINVAL; 1858 goto out_put; 1859 } 1860 } else { 1861 data.port = -1; 1862 } 1863 } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 1864 return -EINVAL; 1865 } 1866 1867 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1868 if (!msg) { 1869 err = -ENOMEM; 1870 goto out_put; 1871 } 1872 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1873 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1874 RDMA_NLDEV_CMD_GET_CHARDEV), 1875 0, 0); 1876 if (!nlh) { 1877 err = -EMSGSIZE; 1878 goto out_nlmsg; 1879 } 1880 1881 data.nl_msg = msg; 1882 err = ib_get_client_nl_info(ibdev, client_name, &data); 1883 if (err) 1884 goto out_nlmsg; 1885 1886 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV, 1887 huge_encode_dev(data.cdev->devt), 1888 RDMA_NLDEV_ATTR_PAD); 1889 if (err) 1890 goto out_data; 1891 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi, 1892 RDMA_NLDEV_ATTR_PAD); 1893 if (err) 1894 goto out_data; 1895 if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME, 1896 dev_name(data.cdev))) { 1897 err = -EMSGSIZE; 1898 goto out_data; 1899 } 1900 1901 nlmsg_end(msg, nlh); 1902 put_device(data.cdev); 1903 if (ibdev) 1904 ib_device_put(ibdev); 1905 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1906 1907 out_data: 1908 put_device(data.cdev); 1909 out_nlmsg: 1910 nlmsg_free(msg); 1911 out_put: 1912 if (ibdev) 1913 ib_device_put(ibdev); 1914 return err; 1915 } 1916 1917 static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1918 struct netlink_ext_ack *extack) 1919 { 1920 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 1921 struct sk_buff *msg; 1922 int err; 1923 1924 err = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 1925 nldev_policy, NL_VALIDATE_LIBERAL, extack); 1926 if (err) 1927 return err; 1928 1929 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1930 if (!msg) 1931 return -ENOMEM; 1932 1933 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 1934 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 1935 RDMA_NLDEV_CMD_SYS_GET), 1936 0, 0); 1937 if (!nlh) { 1938 nlmsg_free(msg); 1939 return -EMSGSIZE; 1940 } 1941 1942 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, 1943 (u8)ib_devices_shared_netns); 1944 if (err) { 1945 nlmsg_free(msg); 1946 return err; 1947 } 1948 1949 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, 1950 (u8)privileged_qkey); 1951 if (err) { 1952 nlmsg_free(msg); 1953 return err; 1954 } 1955 1956 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, 1); 1957 if (err) { 1958 nlmsg_free(msg); 1959 return err; 1960 } 1961 /* 1962 * Copy-on-fork is supported. 1963 * See commits: 1964 * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes") 1965 * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm") 1966 * for more details. Don't backport this without them. 1967 * 1968 * Return value ignored on purpose, assume copy-on-fork is not 1969 * supported in case of failure. 1970 */ 1971 nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1); 1972 1973 nlmsg_end(msg, nlh); 1974 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 1975 } 1976 1977 static int nldev_set_sys_set_netns_doit(struct nlattr *tb[]) 1978 { 1979 u8 enable; 1980 int err; 1981 1982 enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]); 1983 /* Only 0 and 1 are supported */ 1984 if (enable > 1) 1985 return -EINVAL; 1986 1987 err = rdma_compatdev_set(enable); 1988 return err; 1989 } 1990 1991 static int nldev_set_sys_set_pqkey_doit(struct nlattr *tb[]) 1992 { 1993 u8 enable; 1994 1995 enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE]); 1996 /* Only 0 and 1 are supported */ 1997 if (enable > 1) 1998 return -EINVAL; 1999 2000 privileged_qkey = enable; 2001 return 0; 2002 } 2003 2004 static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 2005 struct netlink_ext_ack *extack) 2006 { 2007 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2008 int err; 2009 2010 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2011 nldev_policy, extack); 2012 if (err) 2013 return -EINVAL; 2014 2015 if (tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]) 2016 return nldev_set_sys_set_netns_doit(tb); 2017 2018 if (tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE]) 2019 return nldev_set_sys_set_pqkey_doit(tb); 2020 2021 return -EINVAL; 2022 } 2023 2024 2025 static int nldev_stat_set_mode_doit(struct sk_buff *msg, 2026 struct netlink_ext_ack *extack, 2027 struct nlattr *tb[], 2028 struct ib_device *device, u32 port) 2029 { 2030 u32 mode, mask = 0, qpn, cntn = 0; 2031 int ret; 2032 2033 /* Currently only counter for QP is supported */ 2034 if (!tb[RDMA_NLDEV_ATTR_STAT_RES] || 2035 nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) 2036 return -EINVAL; 2037 2038 mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); 2039 if (mode == RDMA_COUNTER_MODE_AUTO) { 2040 if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]) 2041 mask = nla_get_u32( 2042 tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]); 2043 return rdma_counter_set_auto_mode(device, port, mask, extack); 2044 } 2045 2046 if (!tb[RDMA_NLDEV_ATTR_RES_LQPN]) 2047 return -EINVAL; 2048 2049 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); 2050 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) { 2051 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); 2052 ret = rdma_counter_bind_qpn(device, port, qpn, cntn); 2053 if (ret) 2054 return ret; 2055 } else { 2056 ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn); 2057 if (ret) 2058 return ret; 2059 } 2060 2061 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || 2062 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { 2063 ret = -EMSGSIZE; 2064 goto err_fill; 2065 } 2066 2067 return 0; 2068 2069 err_fill: 2070 rdma_counter_unbind_qpn(device, port, qpn, cntn); 2071 return ret; 2072 } 2073 2074 static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[], 2075 struct ib_device *device, 2076 u32 port) 2077 { 2078 struct rdma_hw_stats *stats; 2079 struct nlattr *entry_attr; 2080 unsigned long *target; 2081 int rem, i, ret = 0; 2082 u32 index; 2083 2084 stats = ib_get_hw_stats_port(device, port); 2085 if (!stats) 2086 return -EINVAL; 2087 2088 target = kcalloc(BITS_TO_LONGS(stats->num_counters), 2089 sizeof(*stats->is_disabled), GFP_KERNEL); 2090 if (!target) 2091 return -ENOMEM; 2092 2093 nla_for_each_nested(entry_attr, tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS], 2094 rem) { 2095 index = nla_get_u32(entry_attr); 2096 if ((index >= stats->num_counters) || 2097 !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) { 2098 ret = -EINVAL; 2099 goto out; 2100 } 2101 2102 set_bit(index, target); 2103 } 2104 2105 for (i = 0; i < stats->num_counters; i++) { 2106 if (!(stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL)) 2107 continue; 2108 2109 ret = rdma_counter_modify(device, port, i, test_bit(i, target)); 2110 if (ret) 2111 goto out; 2112 } 2113 2114 out: 2115 kfree(target); 2116 return ret; 2117 } 2118 2119 static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 2120 struct netlink_ext_ack *extack) 2121 { 2122 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2123 struct ib_device *device; 2124 struct sk_buff *msg; 2125 u32 index, port; 2126 int ret; 2127 2128 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, 2129 extack); 2130 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 2131 !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 2132 return -EINVAL; 2133 2134 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 2135 device = ib_device_get_by_index(sock_net(skb->sk), index); 2136 if (!device) 2137 return -EINVAL; 2138 2139 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 2140 if (!rdma_is_port_valid(device, port)) { 2141 ret = -EINVAL; 2142 goto err_put_device; 2143 } 2144 2145 if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] && 2146 !tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { 2147 ret = -EINVAL; 2148 goto err_put_device; 2149 } 2150 2151 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2152 if (!msg) { 2153 ret = -ENOMEM; 2154 goto err_put_device; 2155 } 2156 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 2157 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 2158 RDMA_NLDEV_CMD_STAT_SET), 2159 0, 0); 2160 if (!nlh || fill_nldev_handle(msg, device) || 2161 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { 2162 ret = -EMSGSIZE; 2163 goto err_free_msg; 2164 } 2165 2166 if (tb[RDMA_NLDEV_ATTR_STAT_MODE]) { 2167 ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port); 2168 if (ret) 2169 goto err_free_msg; 2170 } 2171 2172 if (tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { 2173 ret = nldev_stat_set_counter_dynamic_doit(tb, device, port); 2174 if (ret) 2175 goto err_free_msg; 2176 } 2177 2178 nlmsg_end(msg, nlh); 2179 ib_device_put(device); 2180 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 2181 2182 err_free_msg: 2183 nlmsg_free(msg); 2184 err_put_device: 2185 ib_device_put(device); 2186 return ret; 2187 } 2188 2189 static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 2190 struct netlink_ext_ack *extack) 2191 { 2192 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2193 struct ib_device *device; 2194 struct sk_buff *msg; 2195 u32 index, port, qpn, cntn; 2196 int ret; 2197 2198 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2199 nldev_policy, extack); 2200 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] || 2201 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || 2202 !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] || 2203 !tb[RDMA_NLDEV_ATTR_RES_LQPN]) 2204 return -EINVAL; 2205 2206 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) 2207 return -EINVAL; 2208 2209 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 2210 device = ib_device_get_by_index(sock_net(skb->sk), index); 2211 if (!device) 2212 return -EINVAL; 2213 2214 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 2215 if (!rdma_is_port_valid(device, port)) { 2216 ret = -EINVAL; 2217 goto err; 2218 } 2219 2220 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2221 if (!msg) { 2222 ret = -ENOMEM; 2223 goto err; 2224 } 2225 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 2226 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 2227 RDMA_NLDEV_CMD_STAT_SET), 2228 0, 0); 2229 if (!nlh) { 2230 ret = -EMSGSIZE; 2231 goto err_fill; 2232 } 2233 2234 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); 2235 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); 2236 if (fill_nldev_handle(msg, device) || 2237 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || 2238 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || 2239 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { 2240 ret = -EMSGSIZE; 2241 goto err_fill; 2242 } 2243 2244 ret = rdma_counter_unbind_qpn(device, port, qpn, cntn); 2245 if (ret) 2246 goto err_fill; 2247 2248 nlmsg_end(msg, nlh); 2249 ib_device_put(device); 2250 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 2251 2252 err_fill: 2253 nlmsg_free(msg); 2254 err: 2255 ib_device_put(device); 2256 return ret; 2257 } 2258 2259 static int stat_get_doit_default_counter(struct sk_buff *skb, 2260 struct nlmsghdr *nlh, 2261 struct netlink_ext_ack *extack, 2262 struct nlattr *tb[]) 2263 { 2264 struct rdma_hw_stats *stats; 2265 struct nlattr *table_attr; 2266 struct ib_device *device; 2267 int ret, num_cnts, i; 2268 struct sk_buff *msg; 2269 u32 index, port; 2270 u64 v; 2271 2272 if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 2273 return -EINVAL; 2274 2275 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 2276 device = ib_device_get_by_index(sock_net(skb->sk), index); 2277 if (!device) 2278 return -EINVAL; 2279 2280 if (!device->ops.alloc_hw_port_stats || !device->ops.get_hw_stats) { 2281 ret = -EINVAL; 2282 goto err; 2283 } 2284 2285 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 2286 stats = ib_get_hw_stats_port(device, port); 2287 if (!stats) { 2288 ret = -EINVAL; 2289 goto err; 2290 } 2291 2292 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2293 if (!msg) { 2294 ret = -ENOMEM; 2295 goto err; 2296 } 2297 2298 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 2299 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 2300 RDMA_NLDEV_CMD_STAT_GET), 2301 0, 0); 2302 2303 if (!nlh || fill_nldev_handle(msg, device) || 2304 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { 2305 ret = -EMSGSIZE; 2306 goto err_msg; 2307 } 2308 2309 mutex_lock(&stats->lock); 2310 2311 num_cnts = device->ops.get_hw_stats(device, stats, port, 0); 2312 if (num_cnts < 0) { 2313 ret = -EINVAL; 2314 goto err_stats; 2315 } 2316 2317 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); 2318 if (!table_attr) { 2319 ret = -EMSGSIZE; 2320 goto err_stats; 2321 } 2322 for (i = 0; i < num_cnts; i++) { 2323 if (test_bit(i, stats->is_disabled)) 2324 continue; 2325 2326 v = stats->value[i] + 2327 rdma_counter_get_hwstat_value(device, port, i); 2328 if (rdma_nl_stat_hwcounter_entry(msg, 2329 stats->descs[i].name, v)) { 2330 ret = -EMSGSIZE; 2331 goto err_table; 2332 } 2333 } 2334 nla_nest_end(msg, table_attr); 2335 2336 mutex_unlock(&stats->lock); 2337 nlmsg_end(msg, nlh); 2338 ib_device_put(device); 2339 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 2340 2341 err_table: 2342 nla_nest_cancel(msg, table_attr); 2343 err_stats: 2344 mutex_unlock(&stats->lock); 2345 err_msg: 2346 nlmsg_free(msg); 2347 err: 2348 ib_device_put(device); 2349 return ret; 2350 } 2351 2352 static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, 2353 struct netlink_ext_ack *extack, struct nlattr *tb[]) 2354 2355 { 2356 static enum rdma_nl_counter_mode mode; 2357 static enum rdma_nl_counter_mask mask; 2358 struct ib_device *device; 2359 struct sk_buff *msg; 2360 u32 index, port; 2361 int ret; 2362 2363 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) 2364 return nldev_res_get_counter_doit(skb, nlh, extack); 2365 2366 if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] || 2367 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 2368 return -EINVAL; 2369 2370 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 2371 device = ib_device_get_by_index(sock_net(skb->sk), index); 2372 if (!device) 2373 return -EINVAL; 2374 2375 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 2376 if (!rdma_is_port_valid(device, port)) { 2377 ret = -EINVAL; 2378 goto err; 2379 } 2380 2381 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2382 if (!msg) { 2383 ret = -ENOMEM; 2384 goto err; 2385 } 2386 2387 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 2388 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 2389 RDMA_NLDEV_CMD_STAT_GET), 2390 0, 0); 2391 if (!nlh) { 2392 ret = -EMSGSIZE; 2393 goto err_msg; 2394 } 2395 2396 ret = rdma_counter_get_mode(device, port, &mode, &mask); 2397 if (ret) 2398 goto err_msg; 2399 2400 if (fill_nldev_handle(msg, device) || 2401 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || 2402 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { 2403 ret = -EMSGSIZE; 2404 goto err_msg; 2405 } 2406 2407 if ((mode == RDMA_COUNTER_MODE_AUTO) && 2408 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { 2409 ret = -EMSGSIZE; 2410 goto err_msg; 2411 } 2412 2413 nlmsg_end(msg, nlh); 2414 ib_device_put(device); 2415 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 2416 2417 err_msg: 2418 nlmsg_free(msg); 2419 err: 2420 ib_device_put(device); 2421 return ret; 2422 } 2423 2424 static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 2425 struct netlink_ext_ack *extack) 2426 { 2427 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2428 int ret; 2429 2430 ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2431 nldev_policy, NL_VALIDATE_LIBERAL, extack); 2432 if (ret) 2433 return -EINVAL; 2434 2435 if (!tb[RDMA_NLDEV_ATTR_STAT_RES]) 2436 return stat_get_doit_default_counter(skb, nlh, extack, tb); 2437 2438 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { 2439 case RDMA_NLDEV_ATTR_RES_QP: 2440 ret = stat_get_doit_qp(skb, nlh, extack, tb); 2441 break; 2442 case RDMA_NLDEV_ATTR_RES_MR: 2443 ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR, 2444 fill_stat_mr_entry); 2445 break; 2446 default: 2447 ret = -EINVAL; 2448 break; 2449 } 2450 2451 return ret; 2452 } 2453 2454 static int nldev_stat_get_dumpit(struct sk_buff *skb, 2455 struct netlink_callback *cb) 2456 { 2457 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2458 int ret; 2459 2460 ret = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2461 nldev_policy, NL_VALIDATE_LIBERAL, NULL); 2462 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES]) 2463 return -EINVAL; 2464 2465 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { 2466 case RDMA_NLDEV_ATTR_RES_QP: 2467 ret = nldev_res_get_counter_dumpit(skb, cb); 2468 break; 2469 case RDMA_NLDEV_ATTR_RES_MR: 2470 ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR, 2471 fill_stat_mr_entry); 2472 break; 2473 default: 2474 ret = -EINVAL; 2475 break; 2476 } 2477 2478 return ret; 2479 } 2480 2481 static int nldev_stat_get_counter_status_doit(struct sk_buff *skb, 2482 struct nlmsghdr *nlh, 2483 struct netlink_ext_ack *extack) 2484 { 2485 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry; 2486 struct rdma_hw_stats *stats; 2487 struct ib_device *device; 2488 struct sk_buff *msg; 2489 u32 devid, port; 2490 int ret, i; 2491 2492 ret = __nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2493 nldev_policy, NL_VALIDATE_LIBERAL, extack); 2494 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 2495 !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 2496 return -EINVAL; 2497 2498 devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 2499 device = ib_device_get_by_index(sock_net(skb->sk), devid); 2500 if (!device) 2501 return -EINVAL; 2502 2503 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 2504 if (!rdma_is_port_valid(device, port)) { 2505 ret = -EINVAL; 2506 goto err; 2507 } 2508 2509 stats = ib_get_hw_stats_port(device, port); 2510 if (!stats) { 2511 ret = -EINVAL; 2512 goto err; 2513 } 2514 2515 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2516 if (!msg) { 2517 ret = -ENOMEM; 2518 goto err; 2519 } 2520 2521 nlh = nlmsg_put( 2522 msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 2523 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET_STATUS), 2524 0, 0); 2525 2526 ret = -EMSGSIZE; 2527 if (!nlh || fill_nldev_handle(msg, device) || 2528 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) 2529 goto err_msg; 2530 2531 table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); 2532 if (!table) 2533 goto err_msg; 2534 2535 mutex_lock(&stats->lock); 2536 for (i = 0; i < stats->num_counters; i++) { 2537 entry = nla_nest_start(msg, 2538 RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); 2539 if (!entry) 2540 goto err_msg_table; 2541 2542 if (nla_put_string(msg, 2543 RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, 2544 stats->descs[i].name) || 2545 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i)) 2546 goto err_msg_entry; 2547 2548 if ((stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) && 2549 (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, 2550 !test_bit(i, stats->is_disabled)))) 2551 goto err_msg_entry; 2552 2553 nla_nest_end(msg, entry); 2554 } 2555 mutex_unlock(&stats->lock); 2556 2557 nla_nest_end(msg, table); 2558 nlmsg_end(msg, nlh); 2559 ib_device_put(device); 2560 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); 2561 2562 err_msg_entry: 2563 nla_nest_cancel(msg, entry); 2564 err_msg_table: 2565 mutex_unlock(&stats->lock); 2566 nla_nest_cancel(msg, table); 2567 err_msg: 2568 nlmsg_free(msg); 2569 err: 2570 ib_device_put(device); 2571 return ret; 2572 } 2573 2574 static int nldev_newdev(struct sk_buff *skb, struct nlmsghdr *nlh, 2575 struct netlink_ext_ack *extack) 2576 { 2577 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2578 enum rdma_nl_dev_type type; 2579 struct ib_device *parent; 2580 char name[IFNAMSIZ] = {}; 2581 u32 parentid; 2582 int ret; 2583 2584 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2585 nldev_policy, extack); 2586 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 2587 !tb[RDMA_NLDEV_ATTR_DEV_NAME] || !tb[RDMA_NLDEV_ATTR_DEV_TYPE]) 2588 return -EINVAL; 2589 2590 nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], sizeof(name)); 2591 type = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_TYPE]); 2592 parentid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 2593 parent = ib_device_get_by_index(sock_net(skb->sk), parentid); 2594 if (!parent) 2595 return -EINVAL; 2596 2597 ret = ib_add_sub_device(parent, type, name); 2598 ib_device_put(parent); 2599 2600 return ret; 2601 } 2602 2603 static int nldev_deldev(struct sk_buff *skb, struct nlmsghdr *nlh, 2604 struct netlink_ext_ack *extack) 2605 { 2606 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 2607 struct ib_device *device; 2608 u32 devid; 2609 int ret; 2610 2611 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 2612 nldev_policy, extack); 2613 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 2614 return -EINVAL; 2615 2616 devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 2617 device = ib_device_get_by_index(sock_net(skb->sk), devid); 2618 if (!device) 2619 return -EINVAL; 2620 2621 return ib_del_sub_device_and_put(device); 2622 } 2623 2624 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { 2625 [RDMA_NLDEV_CMD_GET] = { 2626 .doit = nldev_get_doit, 2627 .dump = nldev_get_dumpit, 2628 }, 2629 [RDMA_NLDEV_CMD_GET_CHARDEV] = { 2630 .doit = nldev_get_chardev, 2631 }, 2632 [RDMA_NLDEV_CMD_SET] = { 2633 .doit = nldev_set_doit, 2634 .flags = RDMA_NL_ADMIN_PERM, 2635 }, 2636 [RDMA_NLDEV_CMD_NEWLINK] = { 2637 .doit = nldev_newlink, 2638 .flags = RDMA_NL_ADMIN_PERM, 2639 }, 2640 [RDMA_NLDEV_CMD_DELLINK] = { 2641 .doit = nldev_dellink, 2642 .flags = RDMA_NL_ADMIN_PERM, 2643 }, 2644 [RDMA_NLDEV_CMD_PORT_GET] = { 2645 .doit = nldev_port_get_doit, 2646 .dump = nldev_port_get_dumpit, 2647 }, 2648 [RDMA_NLDEV_CMD_RES_GET] = { 2649 .doit = nldev_res_get_doit, 2650 .dump = nldev_res_get_dumpit, 2651 }, 2652 [RDMA_NLDEV_CMD_RES_QP_GET] = { 2653 .doit = nldev_res_get_qp_doit, 2654 .dump = nldev_res_get_qp_dumpit, 2655 }, 2656 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = { 2657 .doit = nldev_res_get_cm_id_doit, 2658 .dump = nldev_res_get_cm_id_dumpit, 2659 }, 2660 [RDMA_NLDEV_CMD_RES_CQ_GET] = { 2661 .doit = nldev_res_get_cq_doit, 2662 .dump = nldev_res_get_cq_dumpit, 2663 }, 2664 [RDMA_NLDEV_CMD_RES_MR_GET] = { 2665 .doit = nldev_res_get_mr_doit, 2666 .dump = nldev_res_get_mr_dumpit, 2667 }, 2668 [RDMA_NLDEV_CMD_RES_PD_GET] = { 2669 .doit = nldev_res_get_pd_doit, 2670 .dump = nldev_res_get_pd_dumpit, 2671 }, 2672 [RDMA_NLDEV_CMD_RES_CTX_GET] = { 2673 .doit = nldev_res_get_ctx_doit, 2674 .dump = nldev_res_get_ctx_dumpit, 2675 }, 2676 [RDMA_NLDEV_CMD_RES_SRQ_GET] = { 2677 .doit = nldev_res_get_srq_doit, 2678 .dump = nldev_res_get_srq_dumpit, 2679 }, 2680 [RDMA_NLDEV_CMD_SYS_GET] = { 2681 .doit = nldev_sys_get_doit, 2682 }, 2683 [RDMA_NLDEV_CMD_SYS_SET] = { 2684 .doit = nldev_set_sys_set_doit, 2685 .flags = RDMA_NL_ADMIN_PERM, 2686 }, 2687 [RDMA_NLDEV_CMD_STAT_SET] = { 2688 .doit = nldev_stat_set_doit, 2689 .flags = RDMA_NL_ADMIN_PERM, 2690 }, 2691 [RDMA_NLDEV_CMD_STAT_GET] = { 2692 .doit = nldev_stat_get_doit, 2693 .dump = nldev_stat_get_dumpit, 2694 }, 2695 [RDMA_NLDEV_CMD_STAT_DEL] = { 2696 .doit = nldev_stat_del_doit, 2697 .flags = RDMA_NL_ADMIN_PERM, 2698 }, 2699 [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = { 2700 .doit = nldev_res_get_qp_raw_doit, 2701 .dump = nldev_res_get_qp_raw_dumpit, 2702 .flags = RDMA_NL_ADMIN_PERM, 2703 }, 2704 [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = { 2705 .doit = nldev_res_get_cq_raw_doit, 2706 .dump = nldev_res_get_cq_raw_dumpit, 2707 .flags = RDMA_NL_ADMIN_PERM, 2708 }, 2709 [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = { 2710 .doit = nldev_res_get_mr_raw_doit, 2711 .dump = nldev_res_get_mr_raw_dumpit, 2712 .flags = RDMA_NL_ADMIN_PERM, 2713 }, 2714 [RDMA_NLDEV_CMD_RES_SRQ_GET_RAW] = { 2715 .doit = nldev_res_get_srq_raw_doit, 2716 .dump = nldev_res_get_srq_raw_dumpit, 2717 .flags = RDMA_NL_ADMIN_PERM, 2718 }, 2719 [RDMA_NLDEV_CMD_STAT_GET_STATUS] = { 2720 .doit = nldev_stat_get_counter_status_doit, 2721 }, 2722 [RDMA_NLDEV_CMD_NEWDEV] = { 2723 .doit = nldev_newdev, 2724 .flags = RDMA_NL_ADMIN_PERM, 2725 }, 2726 [RDMA_NLDEV_CMD_DELDEV] = { 2727 .doit = nldev_deldev, 2728 .flags = RDMA_NL_ADMIN_PERM, 2729 }, 2730 }; 2731 2732 static int fill_mon_netdev_association(struct sk_buff *msg, 2733 struct ib_device *device, u32 port, 2734 const struct net *net) 2735 { 2736 struct net_device *netdev = ib_device_get_netdev(device, port); 2737 int ret = 0; 2738 2739 if (netdev && !net_eq(dev_net(netdev), net)) 2740 goto out; 2741 2742 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index); 2743 if (ret) 2744 goto out; 2745 2746 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, 2747 dev_name(&device->dev)); 2748 if (ret) 2749 goto out; 2750 2751 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port); 2752 if (ret) 2753 goto out; 2754 2755 if (netdev) { 2756 ret = nla_put_u32(msg, 2757 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); 2758 if (ret) 2759 goto out; 2760 2761 ret = nla_put_string(msg, 2762 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); 2763 } 2764 2765 out: 2766 dev_put(netdev); 2767 return ret; 2768 } 2769 2770 static void rdma_nl_notify_err_msg(struct ib_device *device, u32 port_num, 2771 enum rdma_nl_notify_event_type type) 2772 { 2773 struct net_device *netdev; 2774 2775 switch (type) { 2776 case RDMA_REGISTER_EVENT: 2777 dev_warn_ratelimited(&device->dev, 2778 "Failed to send RDMA monitor register device event\n"); 2779 break; 2780 case RDMA_UNREGISTER_EVENT: 2781 dev_warn_ratelimited(&device->dev, 2782 "Failed to send RDMA monitor unregister device event\n"); 2783 break; 2784 case RDMA_NETDEV_ATTACH_EVENT: 2785 netdev = ib_device_get_netdev(device, port_num); 2786 dev_warn_ratelimited(&device->dev, 2787 "Failed to send RDMA monitor netdev attach event: port %d netdev %d\n", 2788 port_num, netdev->ifindex); 2789 dev_put(netdev); 2790 break; 2791 case RDMA_NETDEV_DETACH_EVENT: 2792 dev_warn_ratelimited(&device->dev, 2793 "Failed to send RDMA monitor netdev detach event: port %d\n", 2794 port_num); 2795 break; 2796 default: 2797 break; 2798 } 2799 } 2800 2801 int rdma_nl_notify_event(struct ib_device *device, u32 port_num, 2802 enum rdma_nl_notify_event_type type) 2803 { 2804 struct sk_buff *skb; 2805 struct net *net; 2806 int ret = 0; 2807 void *nlh; 2808 2809 net = read_pnet(&device->coredev.rdma_net); 2810 if (!net) 2811 return -EINVAL; 2812 2813 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2814 if (!skb) 2815 return -ENOMEM; 2816 nlh = nlmsg_put(skb, 0, 0, 2817 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_MONITOR), 2818 0, 0); 2819 if (!nlh) 2820 goto err_free; 2821 2822 switch (type) { 2823 case RDMA_REGISTER_EVENT: 2824 case RDMA_UNREGISTER_EVENT: 2825 ret = fill_nldev_handle(skb, device); 2826 if (ret) 2827 goto err_free; 2828 break; 2829 case RDMA_NETDEV_ATTACH_EVENT: 2830 case RDMA_NETDEV_DETACH_EVENT: 2831 ret = fill_mon_netdev_association(skb, device, 2832 port_num, net); 2833 if (ret) 2834 goto err_free; 2835 break; 2836 default: 2837 break; 2838 } 2839 2840 ret = nla_put_u8(skb, RDMA_NLDEV_ATTR_EVENT_TYPE, type); 2841 if (ret) 2842 goto err_free; 2843 2844 nlmsg_end(skb, nlh); 2845 ret = rdma_nl_multicast(net, skb, RDMA_NL_GROUP_NOTIFY, GFP_KERNEL); 2846 if (ret && ret != -ESRCH) { 2847 skb = NULL; /* skb is freed in the netlink send-op handling */ 2848 goto err_free; 2849 } 2850 return 0; 2851 2852 err_free: 2853 rdma_nl_notify_err_msg(device, port_num, type); 2854 nlmsg_free(skb); 2855 return ret; 2856 } 2857 2858 void __init nldev_init(void) 2859 { 2860 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); 2861 } 2862 2863 void nldev_exit(void) 2864 { 2865 rdma_nl_unregister(RDMA_NL_NLDEV); 2866 } 2867 2868 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5); 2869