1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/netdevice.h> 4 #include <linux/notifier.h> 5 #include <linux/rtnetlink.h> 6 #include <net/busy_poll.h> 7 #include <net/net_namespace.h> 8 #include <net/netdev_queues.h> 9 #include <net/netdev_rx_queue.h> 10 #include <net/sock.h> 11 #include <net/xdp.h> 12 #include <net/xdp_sock.h> 13 14 #include "dev.h" 15 #include "devmem.h" 16 #include "netdev-genl-gen.h" 17 18 struct netdev_nl_dump_ctx { 19 unsigned long ifindex; 20 unsigned int rxq_idx; 21 unsigned int txq_idx; 22 unsigned int napi_id; 23 }; 24 25 static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) 26 { 27 NL_ASSERT_DUMP_CTX_FITS(struct netdev_nl_dump_ctx); 28 29 return (struct netdev_nl_dump_ctx *)cb->ctx; 30 } 31 32 static int 33 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, 34 const struct genl_info *info) 35 { 36 u64 xsk_features = 0; 37 u64 xdp_rx_meta = 0; 38 void *hdr; 39 40 hdr = genlmsg_iput(rsp, info); 41 if (!hdr) 42 return -EMSGSIZE; 43 44 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ 45 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ 46 xdp_rx_meta |= flag; 47 XDP_METADATA_KFUNC_xxx 48 #undef XDP_METADATA_KFUNC 49 50 if (netdev->xsk_tx_metadata_ops) { 51 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) 52 xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; 53 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) 54 xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; 55 } 56 57 if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || 58 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, 59 netdev->xdp_features, NETDEV_A_DEV_PAD) || 60 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, 61 xdp_rx_meta, NETDEV_A_DEV_PAD) || 62 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, 63 xsk_features, NETDEV_A_DEV_PAD)) 64 goto err_cancel_msg; 65 66 if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { 67 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, 68 netdev->xdp_zc_max_segs)) 69 goto err_cancel_msg; 70 } 71 72 genlmsg_end(rsp, hdr); 73 74 return 0; 75 76 err_cancel_msg: 77 genlmsg_cancel(rsp, hdr); 78 return -EMSGSIZE; 79 } 80 81 static void 82 netdev_genl_dev_notify(struct net_device *netdev, int cmd) 83 { 84 struct genl_info info; 85 struct sk_buff *ntf; 86 87 if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), 88 NETDEV_NLGRP_MGMT)) 89 return; 90 91 genl_info_init_ntf(&info, &netdev_nl_family, cmd); 92 93 ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 94 if (!ntf) 95 return; 96 97 if (netdev_nl_dev_fill(netdev, ntf, &info)) { 98 nlmsg_free(ntf); 99 return; 100 } 101 102 genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 103 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); 104 } 105 106 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) 107 { 108 struct net_device *netdev; 109 struct sk_buff *rsp; 110 u32 ifindex; 111 int err; 112 113 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) 114 return -EINVAL; 115 116 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); 117 118 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 119 if (!rsp) 120 return -ENOMEM; 121 122 rtnl_lock(); 123 124 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 125 if (netdev) 126 err = netdev_nl_dev_fill(netdev, rsp, info); 127 else 128 err = -ENODEV; 129 130 rtnl_unlock(); 131 132 if (err) 133 goto err_free_msg; 134 135 return genlmsg_reply(rsp, info); 136 137 err_free_msg: 138 nlmsg_free(rsp); 139 return err; 140 } 141 142 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 143 { 144 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 145 struct net *net = sock_net(skb->sk); 146 struct net_device *netdev; 147 int err = 0; 148 149 rtnl_lock(); 150 for_each_netdev_dump(net, netdev, ctx->ifindex) { 151 err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); 152 if (err < 0) 153 break; 154 } 155 rtnl_unlock(); 156 157 return err; 158 } 159 160 static int 161 netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, 162 const struct genl_info *info) 163 { 164 void *hdr; 165 pid_t pid; 166 167 if (WARN_ON_ONCE(!napi->dev)) 168 return -EINVAL; 169 if (!(napi->dev->flags & IFF_UP)) 170 return 0; 171 172 hdr = genlmsg_iput(rsp, info); 173 if (!hdr) 174 return -EMSGSIZE; 175 176 if (napi->napi_id >= MIN_NAPI_ID && 177 nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) 178 goto nla_put_failure; 179 180 if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) 181 goto nla_put_failure; 182 183 if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) 184 goto nla_put_failure; 185 186 if (napi->thread) { 187 pid = task_pid_nr(napi->thread); 188 if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) 189 goto nla_put_failure; 190 } 191 192 genlmsg_end(rsp, hdr); 193 194 return 0; 195 196 nla_put_failure: 197 genlmsg_cancel(rsp, hdr); 198 return -EMSGSIZE; 199 } 200 201 int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) 202 { 203 struct napi_struct *napi; 204 struct sk_buff *rsp; 205 u32 napi_id; 206 int err; 207 208 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) 209 return -EINVAL; 210 211 napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); 212 213 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 214 if (!rsp) 215 return -ENOMEM; 216 217 rtnl_lock(); 218 219 napi = napi_by_id(napi_id); 220 if (napi) { 221 err = netdev_nl_napi_fill_one(rsp, napi, info); 222 } else { 223 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); 224 err = -ENOENT; 225 } 226 227 rtnl_unlock(); 228 229 if (err) 230 goto err_free_msg; 231 232 return genlmsg_reply(rsp, info); 233 234 err_free_msg: 235 nlmsg_free(rsp); 236 return err; 237 } 238 239 static int 240 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, 241 const struct genl_info *info, 242 struct netdev_nl_dump_ctx *ctx) 243 { 244 struct napi_struct *napi; 245 int err = 0; 246 247 if (!(netdev->flags & IFF_UP)) 248 return err; 249 250 list_for_each_entry(napi, &netdev->napi_list, dev_list) { 251 if (ctx->napi_id && napi->napi_id >= ctx->napi_id) 252 continue; 253 254 err = netdev_nl_napi_fill_one(rsp, napi, info); 255 if (err) 256 return err; 257 ctx->napi_id = napi->napi_id; 258 } 259 return err; 260 } 261 262 int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 263 { 264 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 265 const struct genl_info *info = genl_info_dump(cb); 266 struct net *net = sock_net(skb->sk); 267 struct net_device *netdev; 268 u32 ifindex = 0; 269 int err = 0; 270 271 if (info->attrs[NETDEV_A_NAPI_IFINDEX]) 272 ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]); 273 274 rtnl_lock(); 275 if (ifindex) { 276 netdev = __dev_get_by_index(net, ifindex); 277 if (netdev) 278 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 279 else 280 err = -ENODEV; 281 } else { 282 for_each_netdev_dump(net, netdev, ctx->ifindex) { 283 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 284 if (err < 0) 285 break; 286 ctx->napi_id = 0; 287 } 288 } 289 rtnl_unlock(); 290 291 return err; 292 } 293 294 static int 295 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, 296 u32 q_idx, u32 q_type, const struct genl_info *info) 297 { 298 struct net_devmem_dmabuf_binding *binding; 299 struct netdev_rx_queue *rxq; 300 struct netdev_queue *txq; 301 void *hdr; 302 303 hdr = genlmsg_iput(rsp, info); 304 if (!hdr) 305 return -EMSGSIZE; 306 307 if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || 308 nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || 309 nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) 310 goto nla_put_failure; 311 312 switch (q_type) { 313 case NETDEV_QUEUE_TYPE_RX: 314 rxq = __netif_get_rx_queue(netdev, q_idx); 315 if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 316 rxq->napi->napi_id)) 317 goto nla_put_failure; 318 319 binding = rxq->mp_params.mp_priv; 320 if (binding && 321 nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id)) 322 goto nla_put_failure; 323 324 break; 325 case NETDEV_QUEUE_TYPE_TX: 326 txq = netdev_get_tx_queue(netdev, q_idx); 327 if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 328 txq->napi->napi_id)) 329 goto nla_put_failure; 330 } 331 332 genlmsg_end(rsp, hdr); 333 334 return 0; 335 336 nla_put_failure: 337 genlmsg_cancel(rsp, hdr); 338 return -EMSGSIZE; 339 } 340 341 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, 342 u32 q_type) 343 { 344 switch (q_type) { 345 case NETDEV_QUEUE_TYPE_RX: 346 if (q_id >= netdev->real_num_rx_queues) 347 return -EINVAL; 348 return 0; 349 case NETDEV_QUEUE_TYPE_TX: 350 if (q_id >= netdev->real_num_tx_queues) 351 return -EINVAL; 352 } 353 return 0; 354 } 355 356 static int 357 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, 358 u32 q_type, const struct genl_info *info) 359 { 360 int err = 0; 361 362 if (!(netdev->flags & IFF_UP)) 363 return err; 364 365 err = netdev_nl_queue_validate(netdev, q_idx, q_type); 366 if (err) 367 return err; 368 369 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); 370 } 371 372 int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) 373 { 374 u32 q_id, q_type, ifindex; 375 struct net_device *netdev; 376 struct sk_buff *rsp; 377 int err; 378 379 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || 380 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || 381 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) 382 return -EINVAL; 383 384 q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); 385 q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); 386 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 387 388 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 389 if (!rsp) 390 return -ENOMEM; 391 392 rtnl_lock(); 393 394 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 395 if (netdev) 396 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); 397 else 398 err = -ENODEV; 399 400 rtnl_unlock(); 401 402 if (err) 403 goto err_free_msg; 404 405 return genlmsg_reply(rsp, info); 406 407 err_free_msg: 408 nlmsg_free(rsp); 409 return err; 410 } 411 412 static int 413 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, 414 const struct genl_info *info, 415 struct netdev_nl_dump_ctx *ctx) 416 { 417 int err = 0; 418 int i; 419 420 if (!(netdev->flags & IFF_UP)) 421 return err; 422 423 for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) { 424 err = netdev_nl_queue_fill_one(rsp, netdev, i, 425 NETDEV_QUEUE_TYPE_RX, info); 426 if (err) 427 return err; 428 ctx->rxq_idx = i++; 429 } 430 for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) { 431 err = netdev_nl_queue_fill_one(rsp, netdev, i, 432 NETDEV_QUEUE_TYPE_TX, info); 433 if (err) 434 return err; 435 ctx->txq_idx = i++; 436 } 437 438 return err; 439 } 440 441 int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 442 { 443 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 444 const struct genl_info *info = genl_info_dump(cb); 445 struct net *net = sock_net(skb->sk); 446 struct net_device *netdev; 447 u32 ifindex = 0; 448 int err = 0; 449 450 if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) 451 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 452 453 rtnl_lock(); 454 if (ifindex) { 455 netdev = __dev_get_by_index(net, ifindex); 456 if (netdev) 457 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 458 else 459 err = -ENODEV; 460 } else { 461 for_each_netdev_dump(net, netdev, ctx->ifindex) { 462 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 463 if (err < 0) 464 break; 465 ctx->rxq_idx = 0; 466 ctx->txq_idx = 0; 467 } 468 } 469 rtnl_unlock(); 470 471 return err; 472 } 473 474 #define NETDEV_STAT_NOT_SET (~0ULL) 475 476 static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size) 477 { 478 const u64 *add = _add; 479 u64 *sum = _sum; 480 481 while (size) { 482 if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET) 483 *sum += *add; 484 sum++; 485 add++; 486 size -= 8; 487 } 488 } 489 490 static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) 491 { 492 if (value == NETDEV_STAT_NOT_SET) 493 return 0; 494 return nla_put_uint(rsp, attr_id, value); 495 } 496 497 static int 498 netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) 499 { 500 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || 501 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || 502 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || 503 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || 504 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || 505 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || 506 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || 507 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || 508 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) || 509 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) || 510 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) || 511 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) || 512 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits)) 513 return -EMSGSIZE; 514 return 0; 515 } 516 517 static int 518 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) 519 { 520 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || 521 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) || 522 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) || 523 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) || 524 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) || 525 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) || 526 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) || 527 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) || 528 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) || 529 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) || 530 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) || 531 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) || 532 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake)) 533 return -EMSGSIZE; 534 return 0; 535 } 536 537 static int 538 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, 539 u32 q_type, int i, const struct genl_info *info) 540 { 541 const struct netdev_stat_ops *ops = netdev->stat_ops; 542 struct netdev_queue_stats_rx rx; 543 struct netdev_queue_stats_tx tx; 544 void *hdr; 545 546 hdr = genlmsg_iput(rsp, info); 547 if (!hdr) 548 return -EMSGSIZE; 549 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || 550 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || 551 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) 552 goto nla_put_failure; 553 554 switch (q_type) { 555 case NETDEV_QUEUE_TYPE_RX: 556 memset(&rx, 0xff, sizeof(rx)); 557 ops->get_queue_stats_rx(netdev, i, &rx); 558 if (!memchr_inv(&rx, 0xff, sizeof(rx))) 559 goto nla_cancel; 560 if (netdev_nl_stats_write_rx(rsp, &rx)) 561 goto nla_put_failure; 562 break; 563 case NETDEV_QUEUE_TYPE_TX: 564 memset(&tx, 0xff, sizeof(tx)); 565 ops->get_queue_stats_tx(netdev, i, &tx); 566 if (!memchr_inv(&tx, 0xff, sizeof(tx))) 567 goto nla_cancel; 568 if (netdev_nl_stats_write_tx(rsp, &tx)) 569 goto nla_put_failure; 570 break; 571 } 572 573 genlmsg_end(rsp, hdr); 574 return 0; 575 576 nla_cancel: 577 genlmsg_cancel(rsp, hdr); 578 return 0; 579 nla_put_failure: 580 genlmsg_cancel(rsp, hdr); 581 return -EMSGSIZE; 582 } 583 584 static int 585 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, 586 const struct genl_info *info, 587 struct netdev_nl_dump_ctx *ctx) 588 { 589 const struct netdev_stat_ops *ops = netdev->stat_ops; 590 int i, err; 591 592 if (!(netdev->flags & IFF_UP)) 593 return 0; 594 595 i = ctx->rxq_idx; 596 while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { 597 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, 598 i, info); 599 if (err) 600 return err; 601 ctx->rxq_idx = i++; 602 } 603 i = ctx->txq_idx; 604 while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { 605 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, 606 i, info); 607 if (err) 608 return err; 609 ctx->txq_idx = i++; 610 } 611 612 ctx->rxq_idx = 0; 613 ctx->txq_idx = 0; 614 return 0; 615 } 616 617 static int 618 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, 619 const struct genl_info *info) 620 { 621 struct netdev_queue_stats_rx rx_sum, rx; 622 struct netdev_queue_stats_tx tx_sum, tx; 623 const struct netdev_stat_ops *ops; 624 void *hdr; 625 int i; 626 627 ops = netdev->stat_ops; 628 /* Netdev can't guarantee any complete counters */ 629 if (!ops->get_base_stats) 630 return 0; 631 632 memset(&rx_sum, 0xff, sizeof(rx_sum)); 633 memset(&tx_sum, 0xff, sizeof(tx_sum)); 634 635 ops->get_base_stats(netdev, &rx_sum, &tx_sum); 636 637 /* The op was there, but nothing reported, don't bother */ 638 if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && 639 !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum))) 640 return 0; 641 642 hdr = genlmsg_iput(rsp, info); 643 if (!hdr) 644 return -EMSGSIZE; 645 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) 646 goto nla_put_failure; 647 648 for (i = 0; i < netdev->real_num_rx_queues; i++) { 649 memset(&rx, 0xff, sizeof(rx)); 650 if (ops->get_queue_stats_rx) 651 ops->get_queue_stats_rx(netdev, i, &rx); 652 netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx)); 653 } 654 for (i = 0; i < netdev->real_num_tx_queues; i++) { 655 memset(&tx, 0xff, sizeof(tx)); 656 if (ops->get_queue_stats_tx) 657 ops->get_queue_stats_tx(netdev, i, &tx); 658 netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx)); 659 } 660 661 if (netdev_nl_stats_write_rx(rsp, &rx_sum) || 662 netdev_nl_stats_write_tx(rsp, &tx_sum)) 663 goto nla_put_failure; 664 665 genlmsg_end(rsp, hdr); 666 return 0; 667 668 nla_put_failure: 669 genlmsg_cancel(rsp, hdr); 670 return -EMSGSIZE; 671 } 672 673 static int 674 netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope, 675 struct sk_buff *skb, const struct genl_info *info, 676 struct netdev_nl_dump_ctx *ctx) 677 { 678 if (!netdev->stat_ops) 679 return 0; 680 681 switch (scope) { 682 case 0: 683 return netdev_nl_stats_by_netdev(netdev, skb, info); 684 case NETDEV_QSTATS_SCOPE_QUEUE: 685 return netdev_nl_stats_by_queue(netdev, skb, info, ctx); 686 } 687 688 return -EINVAL; /* Should not happen, per netlink policy */ 689 } 690 691 int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, 692 struct netlink_callback *cb) 693 { 694 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 695 const struct genl_info *info = genl_info_dump(cb); 696 struct net *net = sock_net(skb->sk); 697 struct net_device *netdev; 698 unsigned int ifindex; 699 unsigned int scope; 700 int err = 0; 701 702 scope = 0; 703 if (info->attrs[NETDEV_A_QSTATS_SCOPE]) 704 scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]); 705 706 ifindex = 0; 707 if (info->attrs[NETDEV_A_QSTATS_IFINDEX]) 708 ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]); 709 710 rtnl_lock(); 711 if (ifindex) { 712 netdev = __dev_get_by_index(net, ifindex); 713 if (netdev && netdev->stat_ops) { 714 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, 715 info, ctx); 716 } else { 717 NL_SET_BAD_ATTR(info->extack, 718 info->attrs[NETDEV_A_QSTATS_IFINDEX]); 719 err = netdev ? -EOPNOTSUPP : -ENODEV; 720 } 721 } else { 722 for_each_netdev_dump(net, netdev, ctx->ifindex) { 723 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, 724 info, ctx); 725 if (err < 0) 726 break; 727 } 728 } 729 rtnl_unlock(); 730 731 return err; 732 } 733 734 int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) 735 { 736 struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; 737 struct net_devmem_dmabuf_binding *binding; 738 struct list_head *sock_binding_list; 739 u32 ifindex, dmabuf_fd, rxq_idx; 740 struct net_device *netdev; 741 struct sk_buff *rsp; 742 struct nlattr *attr; 743 int rem, err = 0; 744 void *hdr; 745 746 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || 747 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) || 748 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES)) 749 return -EINVAL; 750 751 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); 752 dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); 753 754 sock_binding_list = genl_sk_priv_get(&netdev_nl_family, 755 NETLINK_CB(skb).sk); 756 if (IS_ERR(sock_binding_list)) 757 return PTR_ERR(sock_binding_list); 758 759 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 760 if (!rsp) 761 return -ENOMEM; 762 763 hdr = genlmsg_iput(rsp, info); 764 if (!hdr) { 765 err = -EMSGSIZE; 766 goto err_genlmsg_free; 767 } 768 769 rtnl_lock(); 770 771 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 772 if (!netdev || !netif_device_present(netdev)) { 773 err = -ENODEV; 774 goto err_unlock; 775 } 776 777 if (dev_xdp_prog_count(netdev)) { 778 NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached"); 779 err = -EEXIST; 780 goto err_unlock; 781 } 782 783 binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack); 784 if (IS_ERR(binding)) { 785 err = PTR_ERR(binding); 786 goto err_unlock; 787 } 788 789 nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, 790 genlmsg_data(info->genlhdr), 791 genlmsg_len(info->genlhdr), rem) { 792 err = nla_parse_nested( 793 tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, 794 netdev_queue_id_nl_policy, info->extack); 795 if (err < 0) 796 goto err_unbind; 797 798 if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || 799 NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { 800 err = -EINVAL; 801 goto err_unbind; 802 } 803 804 if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { 805 NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); 806 err = -EINVAL; 807 goto err_unbind; 808 } 809 810 rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); 811 812 err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding, 813 info->extack); 814 if (err) 815 goto err_unbind; 816 } 817 818 list_add(&binding->list, sock_binding_list); 819 820 nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); 821 genlmsg_end(rsp, hdr); 822 823 err = genlmsg_reply(rsp, info); 824 if (err) 825 goto err_unbind; 826 827 rtnl_unlock(); 828 829 return 0; 830 831 err_unbind: 832 net_devmem_unbind_dmabuf(binding); 833 err_unlock: 834 rtnl_unlock(); 835 err_genlmsg_free: 836 nlmsg_free(rsp); 837 return err; 838 } 839 840 void netdev_nl_sock_priv_init(struct list_head *priv) 841 { 842 INIT_LIST_HEAD(priv); 843 } 844 845 void netdev_nl_sock_priv_destroy(struct list_head *priv) 846 { 847 struct net_devmem_dmabuf_binding *binding; 848 struct net_devmem_dmabuf_binding *temp; 849 850 list_for_each_entry_safe(binding, temp, priv, list) { 851 rtnl_lock(); 852 net_devmem_unbind_dmabuf(binding); 853 rtnl_unlock(); 854 } 855 } 856 857 static int netdev_genl_netdevice_event(struct notifier_block *nb, 858 unsigned long event, void *ptr) 859 { 860 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 861 862 switch (event) { 863 case NETDEV_REGISTER: 864 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); 865 break; 866 case NETDEV_UNREGISTER: 867 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); 868 break; 869 case NETDEV_XDP_FEAT_CHANGE: 870 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); 871 break; 872 } 873 874 return NOTIFY_OK; 875 } 876 877 static struct notifier_block netdev_genl_nb = { 878 .notifier_call = netdev_genl_netdevice_event, 879 }; 880 881 static int __init netdev_genl_init(void) 882 { 883 int err; 884 885 err = register_netdevice_notifier(&netdev_genl_nb); 886 if (err) 887 return err; 888 889 err = genl_register_family(&netdev_nl_family); 890 if (err) 891 goto err_unreg_ntf; 892 893 return 0; 894 895 err_unreg_ntf: 896 unregister_netdevice_notifier(&netdev_genl_nb); 897 return err; 898 } 899 900 subsys_initcall(netdev_genl_init); 901