1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/netdevice.h> 4 #include <linux/notifier.h> 5 #include <linux/rtnetlink.h> 6 #include <net/net_namespace.h> 7 #include <net/sock.h> 8 #include <net/xdp.h> 9 #include <net/xdp_sock.h> 10 #include <net/netdev_rx_queue.h> 11 #include <net/netdev_queues.h> 12 #include <net/busy_poll.h> 13 14 #include "netdev-genl-gen.h" 15 #include "dev.h" 16 17 struct netdev_nl_dump_ctx { 18 unsigned long ifindex; 19 unsigned int rxq_idx; 20 unsigned int txq_idx; 21 unsigned int napi_id; 22 }; 23 24 static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) 25 { 26 NL_ASSERT_DUMP_CTX_FITS(struct netdev_nl_dump_ctx); 27 28 return (struct netdev_nl_dump_ctx *)cb->ctx; 29 } 30 31 static int 32 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, 33 const struct genl_info *info) 34 { 35 u64 xsk_features = 0; 36 u64 xdp_rx_meta = 0; 37 void *hdr; 38 39 hdr = genlmsg_iput(rsp, info); 40 if (!hdr) 41 return -EMSGSIZE; 42 43 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ 44 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ 45 xdp_rx_meta |= flag; 46 XDP_METADATA_KFUNC_xxx 47 #undef XDP_METADATA_KFUNC 48 49 if (netdev->xsk_tx_metadata_ops) { 50 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) 51 xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; 52 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) 53 xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; 54 } 55 56 if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || 57 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, 58 netdev->xdp_features, NETDEV_A_DEV_PAD) || 59 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, 60 xdp_rx_meta, NETDEV_A_DEV_PAD) || 61 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, 62 xsk_features, NETDEV_A_DEV_PAD)) 63 goto err_cancel_msg; 64 65 if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { 66 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, 67 netdev->xdp_zc_max_segs)) 68 goto err_cancel_msg; 69 } 70 71 genlmsg_end(rsp, hdr); 72 73 return 0; 74 75 err_cancel_msg: 76 genlmsg_cancel(rsp, hdr); 77 return -EMSGSIZE; 78 } 79 80 static void 81 netdev_genl_dev_notify(struct net_device *netdev, int cmd) 82 { 83 struct genl_info info; 84 struct sk_buff *ntf; 85 86 if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), 87 NETDEV_NLGRP_MGMT)) 88 return; 89 90 genl_info_init_ntf(&info, &netdev_nl_family, cmd); 91 92 ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 93 if (!ntf) 94 return; 95 96 if (netdev_nl_dev_fill(netdev, ntf, &info)) { 97 nlmsg_free(ntf); 98 return; 99 } 100 101 genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 102 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); 103 } 104 105 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) 106 { 107 struct net_device *netdev; 108 struct sk_buff *rsp; 109 u32 ifindex; 110 int err; 111 112 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) 113 return -EINVAL; 114 115 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); 116 117 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 118 if (!rsp) 119 return -ENOMEM; 120 121 rtnl_lock(); 122 123 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 124 if (netdev) 125 err = netdev_nl_dev_fill(netdev, rsp, info); 126 else 127 err = -ENODEV; 128 129 rtnl_unlock(); 130 131 if (err) 132 goto err_free_msg; 133 134 return genlmsg_reply(rsp, info); 135 136 err_free_msg: 137 nlmsg_free(rsp); 138 return err; 139 } 140 141 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 142 { 143 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 144 struct net *net = sock_net(skb->sk); 145 struct net_device *netdev; 146 int err = 0; 147 148 rtnl_lock(); 149 for_each_netdev_dump(net, netdev, ctx->ifindex) { 150 err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); 151 if (err < 0) 152 break; 153 } 154 rtnl_unlock(); 155 156 return err; 157 } 158 159 static int 160 netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, 161 const struct genl_info *info) 162 { 163 void *hdr; 164 pid_t pid; 165 166 if (WARN_ON_ONCE(!napi->dev)) 167 return -EINVAL; 168 if (!(napi->dev->flags & IFF_UP)) 169 return 0; 170 171 hdr = genlmsg_iput(rsp, info); 172 if (!hdr) 173 return -EMSGSIZE; 174 175 if (napi->napi_id >= MIN_NAPI_ID && 176 nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) 177 goto nla_put_failure; 178 179 if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) 180 goto nla_put_failure; 181 182 if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) 183 goto nla_put_failure; 184 185 if (napi->thread) { 186 pid = task_pid_nr(napi->thread); 187 if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) 188 goto nla_put_failure; 189 } 190 191 genlmsg_end(rsp, hdr); 192 193 return 0; 194 195 nla_put_failure: 196 genlmsg_cancel(rsp, hdr); 197 return -EMSGSIZE; 198 } 199 200 int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) 201 { 202 struct napi_struct *napi; 203 struct sk_buff *rsp; 204 u32 napi_id; 205 int err; 206 207 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) 208 return -EINVAL; 209 210 napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); 211 212 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 213 if (!rsp) 214 return -ENOMEM; 215 216 rtnl_lock(); 217 218 napi = napi_by_id(napi_id); 219 if (napi) 220 err = netdev_nl_napi_fill_one(rsp, napi, info); 221 else 222 err = -EINVAL; 223 224 rtnl_unlock(); 225 226 if (err) 227 goto err_free_msg; 228 229 return genlmsg_reply(rsp, info); 230 231 err_free_msg: 232 nlmsg_free(rsp); 233 return err; 234 } 235 236 static int 237 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, 238 const struct genl_info *info, 239 struct netdev_nl_dump_ctx *ctx) 240 { 241 struct napi_struct *napi; 242 int err = 0; 243 244 if (!(netdev->flags & IFF_UP)) 245 return err; 246 247 list_for_each_entry(napi, &netdev->napi_list, dev_list) { 248 if (ctx->napi_id && napi->napi_id >= ctx->napi_id) 249 continue; 250 251 err = netdev_nl_napi_fill_one(rsp, napi, info); 252 if (err) 253 return err; 254 ctx->napi_id = napi->napi_id; 255 } 256 return err; 257 } 258 259 int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 260 { 261 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 262 const struct genl_info *info = genl_info_dump(cb); 263 struct net *net = sock_net(skb->sk); 264 struct net_device *netdev; 265 u32 ifindex = 0; 266 int err = 0; 267 268 if (info->attrs[NETDEV_A_NAPI_IFINDEX]) 269 ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]); 270 271 rtnl_lock(); 272 if (ifindex) { 273 netdev = __dev_get_by_index(net, ifindex); 274 if (netdev) 275 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 276 else 277 err = -ENODEV; 278 } else { 279 for_each_netdev_dump(net, netdev, ctx->ifindex) { 280 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 281 if (err < 0) 282 break; 283 ctx->napi_id = 0; 284 } 285 } 286 rtnl_unlock(); 287 288 return err; 289 } 290 291 static int 292 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, 293 u32 q_idx, u32 q_type, const struct genl_info *info) 294 { 295 struct netdev_rx_queue *rxq; 296 struct netdev_queue *txq; 297 void *hdr; 298 299 hdr = genlmsg_iput(rsp, info); 300 if (!hdr) 301 return -EMSGSIZE; 302 303 if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || 304 nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || 305 nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) 306 goto nla_put_failure; 307 308 switch (q_type) { 309 case NETDEV_QUEUE_TYPE_RX: 310 rxq = __netif_get_rx_queue(netdev, q_idx); 311 if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 312 rxq->napi->napi_id)) 313 goto nla_put_failure; 314 break; 315 case NETDEV_QUEUE_TYPE_TX: 316 txq = netdev_get_tx_queue(netdev, q_idx); 317 if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 318 txq->napi->napi_id)) 319 goto nla_put_failure; 320 } 321 322 genlmsg_end(rsp, hdr); 323 324 return 0; 325 326 nla_put_failure: 327 genlmsg_cancel(rsp, hdr); 328 return -EMSGSIZE; 329 } 330 331 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, 332 u32 q_type) 333 { 334 switch (q_type) { 335 case NETDEV_QUEUE_TYPE_RX: 336 if (q_id >= netdev->real_num_rx_queues) 337 return -EINVAL; 338 return 0; 339 case NETDEV_QUEUE_TYPE_TX: 340 if (q_id >= netdev->real_num_tx_queues) 341 return -EINVAL; 342 } 343 return 0; 344 } 345 346 static int 347 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, 348 u32 q_type, const struct genl_info *info) 349 { 350 int err = 0; 351 352 if (!(netdev->flags & IFF_UP)) 353 return err; 354 355 err = netdev_nl_queue_validate(netdev, q_idx, q_type); 356 if (err) 357 return err; 358 359 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); 360 } 361 362 int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) 363 { 364 u32 q_id, q_type, ifindex; 365 struct net_device *netdev; 366 struct sk_buff *rsp; 367 int err; 368 369 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || 370 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || 371 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) 372 return -EINVAL; 373 374 q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); 375 q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); 376 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 377 378 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 379 if (!rsp) 380 return -ENOMEM; 381 382 rtnl_lock(); 383 384 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 385 if (netdev) 386 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); 387 else 388 err = -ENODEV; 389 390 rtnl_unlock(); 391 392 if (err) 393 goto err_free_msg; 394 395 return genlmsg_reply(rsp, info); 396 397 err_free_msg: 398 nlmsg_free(rsp); 399 return err; 400 } 401 402 static int 403 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, 404 const struct genl_info *info, 405 struct netdev_nl_dump_ctx *ctx) 406 { 407 int err = 0; 408 int i; 409 410 if (!(netdev->flags & IFF_UP)) 411 return err; 412 413 for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) { 414 err = netdev_nl_queue_fill_one(rsp, netdev, i, 415 NETDEV_QUEUE_TYPE_RX, info); 416 if (err) 417 return err; 418 ctx->rxq_idx = i++; 419 } 420 for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) { 421 err = netdev_nl_queue_fill_one(rsp, netdev, i, 422 NETDEV_QUEUE_TYPE_TX, info); 423 if (err) 424 return err; 425 ctx->txq_idx = i++; 426 } 427 428 return err; 429 } 430 431 int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 432 { 433 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 434 const struct genl_info *info = genl_info_dump(cb); 435 struct net *net = sock_net(skb->sk); 436 struct net_device *netdev; 437 u32 ifindex = 0; 438 int err = 0; 439 440 if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) 441 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 442 443 rtnl_lock(); 444 if (ifindex) { 445 netdev = __dev_get_by_index(net, ifindex); 446 if (netdev) 447 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 448 else 449 err = -ENODEV; 450 } else { 451 for_each_netdev_dump(net, netdev, ctx->ifindex) { 452 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 453 if (err < 0) 454 break; 455 ctx->rxq_idx = 0; 456 ctx->txq_idx = 0; 457 } 458 } 459 rtnl_unlock(); 460 461 return err; 462 } 463 464 #define NETDEV_STAT_NOT_SET (~0ULL) 465 466 static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size) 467 { 468 const u64 *add = _add; 469 u64 *sum = _sum; 470 471 while (size) { 472 if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET) 473 *sum += *add; 474 sum++; 475 add++; 476 size -= 8; 477 } 478 } 479 480 static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) 481 { 482 if (value == NETDEV_STAT_NOT_SET) 483 return 0; 484 return nla_put_uint(rsp, attr_id, value); 485 } 486 487 static int 488 netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) 489 { 490 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || 491 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || 492 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || 493 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || 494 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || 495 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || 496 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || 497 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || 498 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) || 499 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) || 500 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) || 501 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) || 502 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits)) 503 return -EMSGSIZE; 504 return 0; 505 } 506 507 static int 508 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) 509 { 510 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || 511 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) || 512 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) || 513 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) || 514 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) || 515 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) || 516 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) || 517 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) || 518 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) || 519 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) || 520 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) || 521 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) || 522 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake)) 523 return -EMSGSIZE; 524 return 0; 525 } 526 527 static int 528 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, 529 u32 q_type, int i, const struct genl_info *info) 530 { 531 const struct netdev_stat_ops *ops = netdev->stat_ops; 532 struct netdev_queue_stats_rx rx; 533 struct netdev_queue_stats_tx tx; 534 void *hdr; 535 536 hdr = genlmsg_iput(rsp, info); 537 if (!hdr) 538 return -EMSGSIZE; 539 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || 540 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || 541 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) 542 goto nla_put_failure; 543 544 switch (q_type) { 545 case NETDEV_QUEUE_TYPE_RX: 546 memset(&rx, 0xff, sizeof(rx)); 547 ops->get_queue_stats_rx(netdev, i, &rx); 548 if (!memchr_inv(&rx, 0xff, sizeof(rx))) 549 goto nla_cancel; 550 if (netdev_nl_stats_write_rx(rsp, &rx)) 551 goto nla_put_failure; 552 break; 553 case NETDEV_QUEUE_TYPE_TX: 554 memset(&tx, 0xff, sizeof(tx)); 555 ops->get_queue_stats_tx(netdev, i, &tx); 556 if (!memchr_inv(&tx, 0xff, sizeof(tx))) 557 goto nla_cancel; 558 if (netdev_nl_stats_write_tx(rsp, &tx)) 559 goto nla_put_failure; 560 break; 561 } 562 563 genlmsg_end(rsp, hdr); 564 return 0; 565 566 nla_cancel: 567 genlmsg_cancel(rsp, hdr); 568 return 0; 569 nla_put_failure: 570 genlmsg_cancel(rsp, hdr); 571 return -EMSGSIZE; 572 } 573 574 static int 575 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, 576 const struct genl_info *info, 577 struct netdev_nl_dump_ctx *ctx) 578 { 579 const struct netdev_stat_ops *ops = netdev->stat_ops; 580 int i, err; 581 582 if (!(netdev->flags & IFF_UP)) 583 return 0; 584 585 i = ctx->rxq_idx; 586 while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { 587 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, 588 i, info); 589 if (err) 590 return err; 591 ctx->rxq_idx = i++; 592 } 593 i = ctx->txq_idx; 594 while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { 595 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, 596 i, info); 597 if (err) 598 return err; 599 ctx->txq_idx = i++; 600 } 601 602 ctx->rxq_idx = 0; 603 ctx->txq_idx = 0; 604 return 0; 605 } 606 607 static int 608 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, 609 const struct genl_info *info) 610 { 611 struct netdev_queue_stats_rx rx_sum, rx; 612 struct netdev_queue_stats_tx tx_sum, tx; 613 const struct netdev_stat_ops *ops; 614 void *hdr; 615 int i; 616 617 ops = netdev->stat_ops; 618 /* Netdev can't guarantee any complete counters */ 619 if (!ops->get_base_stats) 620 return 0; 621 622 memset(&rx_sum, 0xff, sizeof(rx_sum)); 623 memset(&tx_sum, 0xff, sizeof(tx_sum)); 624 625 ops->get_base_stats(netdev, &rx_sum, &tx_sum); 626 627 /* The op was there, but nothing reported, don't bother */ 628 if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && 629 !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum))) 630 return 0; 631 632 hdr = genlmsg_iput(rsp, info); 633 if (!hdr) 634 return -EMSGSIZE; 635 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) 636 goto nla_put_failure; 637 638 for (i = 0; i < netdev->real_num_rx_queues; i++) { 639 memset(&rx, 0xff, sizeof(rx)); 640 if (ops->get_queue_stats_rx) 641 ops->get_queue_stats_rx(netdev, i, &rx); 642 netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx)); 643 } 644 for (i = 0; i < netdev->real_num_tx_queues; i++) { 645 memset(&tx, 0xff, sizeof(tx)); 646 if (ops->get_queue_stats_tx) 647 ops->get_queue_stats_tx(netdev, i, &tx); 648 netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx)); 649 } 650 651 if (netdev_nl_stats_write_rx(rsp, &rx_sum) || 652 netdev_nl_stats_write_tx(rsp, &tx_sum)) 653 goto nla_put_failure; 654 655 genlmsg_end(rsp, hdr); 656 return 0; 657 658 nla_put_failure: 659 genlmsg_cancel(rsp, hdr); 660 return -EMSGSIZE; 661 } 662 663 static int 664 netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope, 665 struct sk_buff *skb, const struct genl_info *info, 666 struct netdev_nl_dump_ctx *ctx) 667 { 668 if (!netdev->stat_ops) 669 return 0; 670 671 switch (scope) { 672 case 0: 673 return netdev_nl_stats_by_netdev(netdev, skb, info); 674 case NETDEV_QSTATS_SCOPE_QUEUE: 675 return netdev_nl_stats_by_queue(netdev, skb, info, ctx); 676 } 677 678 return -EINVAL; /* Should not happen, per netlink policy */ 679 } 680 681 int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, 682 struct netlink_callback *cb) 683 { 684 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 685 const struct genl_info *info = genl_info_dump(cb); 686 struct net *net = sock_net(skb->sk); 687 struct net_device *netdev; 688 unsigned int ifindex; 689 unsigned int scope; 690 int err = 0; 691 692 scope = 0; 693 if (info->attrs[NETDEV_A_QSTATS_SCOPE]) 694 scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]); 695 696 ifindex = 0; 697 if (info->attrs[NETDEV_A_QSTATS_IFINDEX]) 698 ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]); 699 700 rtnl_lock(); 701 if (ifindex) { 702 netdev = __dev_get_by_index(net, ifindex); 703 if (netdev && netdev->stat_ops) { 704 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, 705 info, ctx); 706 } else { 707 NL_SET_BAD_ATTR(info->extack, 708 info->attrs[NETDEV_A_QSTATS_IFINDEX]); 709 err = netdev ? -EOPNOTSUPP : -ENODEV; 710 } 711 } else { 712 for_each_netdev_dump(net, netdev, ctx->ifindex) { 713 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, 714 info, ctx); 715 if (err < 0) 716 break; 717 } 718 } 719 rtnl_unlock(); 720 721 return err; 722 } 723 724 static int netdev_genl_netdevice_event(struct notifier_block *nb, 725 unsigned long event, void *ptr) 726 { 727 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 728 729 switch (event) { 730 case NETDEV_REGISTER: 731 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); 732 break; 733 case NETDEV_UNREGISTER: 734 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); 735 break; 736 case NETDEV_XDP_FEAT_CHANGE: 737 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); 738 break; 739 } 740 741 return NOTIFY_OK; 742 } 743 744 static struct notifier_block netdev_genl_nb = { 745 .notifier_call = netdev_genl_netdevice_event, 746 }; 747 748 static int __init netdev_genl_init(void) 749 { 750 int err; 751 752 err = register_netdevice_notifier(&netdev_genl_nb); 753 if (err) 754 return err; 755 756 err = genl_register_family(&netdev_nl_family); 757 if (err) 758 goto err_unreg_ntf; 759 760 return 0; 761 762 err_unreg_ntf: 763 unregister_netdevice_notifier(&netdev_genl_nb); 764 return err; 765 } 766 767 subsys_initcall(netdev_genl_init); 768