1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/netdevice.h> 4 #include <linux/notifier.h> 5 #include <linux/rtnetlink.h> 6 #include <net/net_namespace.h> 7 #include <net/sock.h> 8 #include <net/xdp.h> 9 #include <net/xdp_sock.h> 10 #include <net/netdev_rx_queue.h> 11 #include <net/netdev_queues.h> 12 #include <net/busy_poll.h> 13 14 #include "netdev-genl-gen.h" 15 #include "dev.h" 16 17 struct netdev_nl_dump_ctx { 18 unsigned long ifindex; 19 unsigned int rxq_idx; 20 unsigned int txq_idx; 21 unsigned int napi_id; 22 }; 23 24 static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) 25 { 26 NL_ASSERT_DUMP_CTX_FITS(struct netdev_nl_dump_ctx); 27 28 return (struct netdev_nl_dump_ctx *)cb->ctx; 29 } 30 31 static int 32 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, 33 const struct genl_info *info) 34 { 35 u64 xsk_features = 0; 36 u64 xdp_rx_meta = 0; 37 void *hdr; 38 39 hdr = genlmsg_iput(rsp, info); 40 if (!hdr) 41 return -EMSGSIZE; 42 43 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ 44 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ 45 xdp_rx_meta |= flag; 46 XDP_METADATA_KFUNC_xxx 47 #undef XDP_METADATA_KFUNC 48 49 if (netdev->xsk_tx_metadata_ops) { 50 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) 51 xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; 52 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) 53 xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; 54 } 55 56 if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || 57 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, 58 netdev->xdp_features, NETDEV_A_DEV_PAD) || 59 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, 60 xdp_rx_meta, NETDEV_A_DEV_PAD) || 61 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, 62 xsk_features, NETDEV_A_DEV_PAD)) { 63 genlmsg_cancel(rsp, hdr); 64 return -EINVAL; 65 } 66 67 if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { 68 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, 69 netdev->xdp_zc_max_segs)) { 70 genlmsg_cancel(rsp, hdr); 71 return -EINVAL; 72 } 73 } 74 75 genlmsg_end(rsp, hdr); 76 77 return 0; 78 } 79 80 static void 81 netdev_genl_dev_notify(struct net_device *netdev, int cmd) 82 { 83 struct genl_info info; 84 struct sk_buff *ntf; 85 86 if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), 87 NETDEV_NLGRP_MGMT)) 88 return; 89 90 genl_info_init_ntf(&info, &netdev_nl_family, cmd); 91 92 ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 93 if (!ntf) 94 return; 95 96 if (netdev_nl_dev_fill(netdev, ntf, &info)) { 97 nlmsg_free(ntf); 98 return; 99 } 100 101 genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 102 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); 103 } 104 105 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) 106 { 107 struct net_device *netdev; 108 struct sk_buff *rsp; 109 u32 ifindex; 110 int err; 111 112 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) 113 return -EINVAL; 114 115 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); 116 117 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 118 if (!rsp) 119 return -ENOMEM; 120 121 rtnl_lock(); 122 123 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 124 if (netdev) 125 err = netdev_nl_dev_fill(netdev, rsp, info); 126 else 127 err = -ENODEV; 128 129 rtnl_unlock(); 130 131 if (err) 132 goto err_free_msg; 133 134 return genlmsg_reply(rsp, info); 135 136 err_free_msg: 137 nlmsg_free(rsp); 138 return err; 139 } 140 141 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 142 { 143 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 144 struct net *net = sock_net(skb->sk); 145 struct net_device *netdev; 146 int err = 0; 147 148 rtnl_lock(); 149 for_each_netdev_dump(net, netdev, ctx->ifindex) { 150 err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); 151 if (err < 0) 152 break; 153 } 154 rtnl_unlock(); 155 156 return err; 157 } 158 159 static int 160 netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, 161 const struct genl_info *info) 162 { 163 void *hdr; 164 pid_t pid; 165 166 if (WARN_ON_ONCE(!napi->dev)) 167 return -EINVAL; 168 if (!(napi->dev->flags & IFF_UP)) 169 return 0; 170 171 hdr = genlmsg_iput(rsp, info); 172 if (!hdr) 173 return -EMSGSIZE; 174 175 if (napi->napi_id >= MIN_NAPI_ID && 176 nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) 177 goto nla_put_failure; 178 179 if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) 180 goto nla_put_failure; 181 182 if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) 183 goto nla_put_failure; 184 185 if (napi->thread) { 186 pid = task_pid_nr(napi->thread); 187 if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) 188 goto nla_put_failure; 189 } 190 191 genlmsg_end(rsp, hdr); 192 193 return 0; 194 195 nla_put_failure: 196 genlmsg_cancel(rsp, hdr); 197 return -EMSGSIZE; 198 } 199 200 int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) 201 { 202 struct napi_struct *napi; 203 struct sk_buff *rsp; 204 u32 napi_id; 205 int err; 206 207 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) 208 return -EINVAL; 209 210 napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); 211 212 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 213 if (!rsp) 214 return -ENOMEM; 215 216 rtnl_lock(); 217 218 napi = napi_by_id(napi_id); 219 if (napi) 220 err = netdev_nl_napi_fill_one(rsp, napi, info); 221 else 222 err = -EINVAL; 223 224 rtnl_unlock(); 225 226 if (err) 227 goto err_free_msg; 228 229 return genlmsg_reply(rsp, info); 230 231 err_free_msg: 232 nlmsg_free(rsp); 233 return err; 234 } 235 236 static int 237 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, 238 const struct genl_info *info, 239 struct netdev_nl_dump_ctx *ctx) 240 { 241 struct napi_struct *napi; 242 int err = 0; 243 244 if (!(netdev->flags & IFF_UP)) 245 return err; 246 247 list_for_each_entry(napi, &netdev->napi_list, dev_list) { 248 if (ctx->napi_id && napi->napi_id >= ctx->napi_id) 249 continue; 250 251 err = netdev_nl_napi_fill_one(rsp, napi, info); 252 if (err) 253 return err; 254 ctx->napi_id = napi->napi_id; 255 } 256 return err; 257 } 258 259 int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 260 { 261 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 262 const struct genl_info *info = genl_info_dump(cb); 263 struct net *net = sock_net(skb->sk); 264 struct net_device *netdev; 265 u32 ifindex = 0; 266 int err = 0; 267 268 if (info->attrs[NETDEV_A_NAPI_IFINDEX]) 269 ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]); 270 271 rtnl_lock(); 272 if (ifindex) { 273 netdev = __dev_get_by_index(net, ifindex); 274 if (netdev) 275 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 276 else 277 err = -ENODEV; 278 } else { 279 for_each_netdev_dump(net, netdev, ctx->ifindex) { 280 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 281 if (err < 0) 282 break; 283 ctx->napi_id = 0; 284 } 285 } 286 rtnl_unlock(); 287 288 return err; 289 } 290 291 static int 292 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, 293 u32 q_idx, u32 q_type, const struct genl_info *info) 294 { 295 struct netdev_rx_queue *rxq; 296 struct netdev_queue *txq; 297 void *hdr; 298 299 hdr = genlmsg_iput(rsp, info); 300 if (!hdr) 301 return -EMSGSIZE; 302 303 if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || 304 nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || 305 nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) 306 goto nla_put_failure; 307 308 switch (q_type) { 309 case NETDEV_QUEUE_TYPE_RX: 310 rxq = __netif_get_rx_queue(netdev, q_idx); 311 if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 312 rxq->napi->napi_id)) 313 goto nla_put_failure; 314 break; 315 case NETDEV_QUEUE_TYPE_TX: 316 txq = netdev_get_tx_queue(netdev, q_idx); 317 if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 318 txq->napi->napi_id)) 319 goto nla_put_failure; 320 } 321 322 genlmsg_end(rsp, hdr); 323 324 return 0; 325 326 nla_put_failure: 327 genlmsg_cancel(rsp, hdr); 328 return -EMSGSIZE; 329 } 330 331 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, 332 u32 q_type) 333 { 334 switch (q_type) { 335 case NETDEV_QUEUE_TYPE_RX: 336 if (q_id >= netdev->real_num_rx_queues) 337 return -EINVAL; 338 return 0; 339 case NETDEV_QUEUE_TYPE_TX: 340 if (q_id >= netdev->real_num_tx_queues) 341 return -EINVAL; 342 } 343 return 0; 344 } 345 346 static int 347 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, 348 u32 q_type, const struct genl_info *info) 349 { 350 int err = 0; 351 352 if (!(netdev->flags & IFF_UP)) 353 return err; 354 355 err = netdev_nl_queue_validate(netdev, q_idx, q_type); 356 if (err) 357 return err; 358 359 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); 360 } 361 362 int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) 363 { 364 u32 q_id, q_type, ifindex; 365 struct net_device *netdev; 366 struct sk_buff *rsp; 367 int err; 368 369 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || 370 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || 371 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) 372 return -EINVAL; 373 374 q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); 375 q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); 376 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 377 378 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 379 if (!rsp) 380 return -ENOMEM; 381 382 rtnl_lock(); 383 384 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 385 if (netdev) 386 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); 387 else 388 err = -ENODEV; 389 390 rtnl_unlock(); 391 392 if (err) 393 goto err_free_msg; 394 395 return genlmsg_reply(rsp, info); 396 397 err_free_msg: 398 nlmsg_free(rsp); 399 return err; 400 } 401 402 static int 403 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, 404 const struct genl_info *info, 405 struct netdev_nl_dump_ctx *ctx) 406 { 407 int err = 0; 408 int i; 409 410 if (!(netdev->flags & IFF_UP)) 411 return err; 412 413 for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) { 414 err = netdev_nl_queue_fill_one(rsp, netdev, i, 415 NETDEV_QUEUE_TYPE_RX, info); 416 if (err) 417 return err; 418 ctx->rxq_idx = i++; 419 } 420 for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) { 421 err = netdev_nl_queue_fill_one(rsp, netdev, i, 422 NETDEV_QUEUE_TYPE_TX, info); 423 if (err) 424 return err; 425 ctx->txq_idx = i++; 426 } 427 428 return err; 429 } 430 431 int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 432 { 433 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 434 const struct genl_info *info = genl_info_dump(cb); 435 struct net *net = sock_net(skb->sk); 436 struct net_device *netdev; 437 u32 ifindex = 0; 438 int err = 0; 439 440 if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) 441 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 442 443 rtnl_lock(); 444 if (ifindex) { 445 netdev = __dev_get_by_index(net, ifindex); 446 if (netdev) 447 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 448 else 449 err = -ENODEV; 450 } else { 451 for_each_netdev_dump(net, netdev, ctx->ifindex) { 452 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 453 if (err < 0) 454 break; 455 ctx->rxq_idx = 0; 456 ctx->txq_idx = 0; 457 } 458 } 459 rtnl_unlock(); 460 461 return err; 462 } 463 464 #define NETDEV_STAT_NOT_SET (~0ULL) 465 466 static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size) 467 { 468 const u64 *add = _add; 469 u64 *sum = _sum; 470 471 while (size) { 472 if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET) 473 *sum += *add; 474 sum++; 475 add++; 476 size -= 8; 477 } 478 } 479 480 static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) 481 { 482 if (value == NETDEV_STAT_NOT_SET) 483 return 0; 484 return nla_put_uint(rsp, attr_id, value); 485 } 486 487 static int 488 netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) 489 { 490 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || 491 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || 492 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail)) 493 return -EMSGSIZE; 494 return 0; 495 } 496 497 static int 498 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) 499 { 500 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || 501 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes)) 502 return -EMSGSIZE; 503 return 0; 504 } 505 506 static int 507 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, 508 u32 q_type, int i, const struct genl_info *info) 509 { 510 const struct netdev_stat_ops *ops = netdev->stat_ops; 511 struct netdev_queue_stats_rx rx; 512 struct netdev_queue_stats_tx tx; 513 void *hdr; 514 515 hdr = genlmsg_iput(rsp, info); 516 if (!hdr) 517 return -EMSGSIZE; 518 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || 519 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || 520 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) 521 goto nla_put_failure; 522 523 switch (q_type) { 524 case NETDEV_QUEUE_TYPE_RX: 525 memset(&rx, 0xff, sizeof(rx)); 526 ops->get_queue_stats_rx(netdev, i, &rx); 527 if (!memchr_inv(&rx, 0xff, sizeof(rx))) 528 goto nla_cancel; 529 if (netdev_nl_stats_write_rx(rsp, &rx)) 530 goto nla_put_failure; 531 break; 532 case NETDEV_QUEUE_TYPE_TX: 533 memset(&tx, 0xff, sizeof(tx)); 534 ops->get_queue_stats_tx(netdev, i, &tx); 535 if (!memchr_inv(&tx, 0xff, sizeof(tx))) 536 goto nla_cancel; 537 if (netdev_nl_stats_write_tx(rsp, &tx)) 538 goto nla_put_failure; 539 break; 540 } 541 542 genlmsg_end(rsp, hdr); 543 return 0; 544 545 nla_cancel: 546 genlmsg_cancel(rsp, hdr); 547 return 0; 548 nla_put_failure: 549 genlmsg_cancel(rsp, hdr); 550 return -EMSGSIZE; 551 } 552 553 static int 554 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, 555 const struct genl_info *info, 556 struct netdev_nl_dump_ctx *ctx) 557 { 558 const struct netdev_stat_ops *ops = netdev->stat_ops; 559 int i, err; 560 561 if (!(netdev->flags & IFF_UP)) 562 return 0; 563 564 i = ctx->rxq_idx; 565 while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { 566 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, 567 i, info); 568 if (err) 569 return err; 570 ctx->rxq_idx = i++; 571 } 572 i = ctx->txq_idx; 573 while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { 574 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, 575 i, info); 576 if (err) 577 return err; 578 ctx->txq_idx = i++; 579 } 580 581 ctx->rxq_idx = 0; 582 ctx->txq_idx = 0; 583 return 0; 584 } 585 586 static int 587 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, 588 const struct genl_info *info) 589 { 590 struct netdev_queue_stats_rx rx_sum, rx; 591 struct netdev_queue_stats_tx tx_sum, tx; 592 const struct netdev_stat_ops *ops; 593 void *hdr; 594 int i; 595 596 ops = netdev->stat_ops; 597 /* Netdev can't guarantee any complete counters */ 598 if (!ops->get_base_stats) 599 return 0; 600 601 memset(&rx_sum, 0xff, sizeof(rx_sum)); 602 memset(&tx_sum, 0xff, sizeof(tx_sum)); 603 604 ops->get_base_stats(netdev, &rx_sum, &tx_sum); 605 606 /* The op was there, but nothing reported, don't bother */ 607 if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && 608 !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum))) 609 return 0; 610 611 hdr = genlmsg_iput(rsp, info); 612 if (!hdr) 613 return -EMSGSIZE; 614 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) 615 goto nla_put_failure; 616 617 for (i = 0; i < netdev->real_num_rx_queues; i++) { 618 memset(&rx, 0xff, sizeof(rx)); 619 if (ops->get_queue_stats_rx) 620 ops->get_queue_stats_rx(netdev, i, &rx); 621 netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx)); 622 } 623 for (i = 0; i < netdev->real_num_tx_queues; i++) { 624 memset(&tx, 0xff, sizeof(tx)); 625 if (ops->get_queue_stats_tx) 626 ops->get_queue_stats_tx(netdev, i, &tx); 627 netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx)); 628 } 629 630 if (netdev_nl_stats_write_rx(rsp, &rx_sum) || 631 netdev_nl_stats_write_tx(rsp, &tx_sum)) 632 goto nla_put_failure; 633 634 genlmsg_end(rsp, hdr); 635 return 0; 636 637 nla_put_failure: 638 genlmsg_cancel(rsp, hdr); 639 return -EMSGSIZE; 640 } 641 642 int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, 643 struct netlink_callback *cb) 644 { 645 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 646 const struct genl_info *info = genl_info_dump(cb); 647 struct net *net = sock_net(skb->sk); 648 struct net_device *netdev; 649 unsigned int scope; 650 int err = 0; 651 652 scope = 0; 653 if (info->attrs[NETDEV_A_QSTATS_SCOPE]) 654 scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]); 655 656 rtnl_lock(); 657 for_each_netdev_dump(net, netdev, ctx->ifindex) { 658 if (!netdev->stat_ops) 659 continue; 660 661 switch (scope) { 662 case 0: 663 err = netdev_nl_stats_by_netdev(netdev, skb, info); 664 break; 665 case NETDEV_QSTATS_SCOPE_QUEUE: 666 err = netdev_nl_stats_by_queue(netdev, skb, info, ctx); 667 break; 668 } 669 if (err < 0) 670 break; 671 } 672 rtnl_unlock(); 673 674 return err; 675 } 676 677 static int netdev_genl_netdevice_event(struct notifier_block *nb, 678 unsigned long event, void *ptr) 679 { 680 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 681 682 switch (event) { 683 case NETDEV_REGISTER: 684 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); 685 break; 686 case NETDEV_UNREGISTER: 687 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); 688 break; 689 case NETDEV_XDP_FEAT_CHANGE: 690 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); 691 break; 692 } 693 694 return NOTIFY_OK; 695 } 696 697 static struct notifier_block netdev_genl_nb = { 698 .notifier_call = netdev_genl_netdevice_event, 699 }; 700 701 static int __init netdev_genl_init(void) 702 { 703 int err; 704 705 err = register_netdevice_notifier(&netdev_genl_nb); 706 if (err) 707 return err; 708 709 err = genl_register_family(&netdev_nl_family); 710 if (err) 711 goto err_unreg_ntf; 712 713 return 0; 714 715 err_unreg_ntf: 716 unregister_netdevice_notifier(&netdev_genl_nb); 717 return err; 718 } 719 720 subsys_initcall(netdev_genl_init); 721