1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/netdevice.h> 4 #include <linux/notifier.h> 5 #include <linux/rtnetlink.h> 6 #include <net/busy_poll.h> 7 #include <net/net_namespace.h> 8 #include <net/netdev_queues.h> 9 #include <net/netdev_rx_queue.h> 10 #include <net/sock.h> 11 #include <net/xdp.h> 12 #include <net/xdp_sock.h> 13 14 #include "dev.h" 15 #include "devmem.h" 16 #include "netdev-genl-gen.h" 17 18 struct netdev_nl_dump_ctx { 19 unsigned long ifindex; 20 unsigned int rxq_idx; 21 unsigned int txq_idx; 22 unsigned int napi_id; 23 }; 24 25 static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) 26 { 27 NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx); 28 29 return (struct netdev_nl_dump_ctx *)cb->ctx; 30 } 31 32 static int 33 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, 34 const struct genl_info *info) 35 { 36 u64 xsk_features = 0; 37 u64 xdp_rx_meta = 0; 38 void *hdr; 39 40 hdr = genlmsg_iput(rsp, info); 41 if (!hdr) 42 return -EMSGSIZE; 43 44 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ 45 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ 46 xdp_rx_meta |= flag; 47 XDP_METADATA_KFUNC_xxx 48 #undef XDP_METADATA_KFUNC 49 50 if (netdev->xsk_tx_metadata_ops) { 51 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) 52 xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; 53 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) 54 xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; 55 } 56 57 if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || 58 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, 59 netdev->xdp_features, NETDEV_A_DEV_PAD) || 60 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, 61 xdp_rx_meta, NETDEV_A_DEV_PAD) || 62 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, 63 xsk_features, NETDEV_A_DEV_PAD)) 64 goto err_cancel_msg; 65 66 if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { 67 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, 68 netdev->xdp_zc_max_segs)) 69 goto err_cancel_msg; 70 } 71 72 genlmsg_end(rsp, hdr); 73 74 return 0; 75 76 err_cancel_msg: 77 genlmsg_cancel(rsp, hdr); 78 return -EMSGSIZE; 79 } 80 81 static void 82 netdev_genl_dev_notify(struct net_device *netdev, int cmd) 83 { 84 struct genl_info info; 85 struct sk_buff *ntf; 86 87 if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), 88 NETDEV_NLGRP_MGMT)) 89 return; 90 91 genl_info_init_ntf(&info, &netdev_nl_family, cmd); 92 93 ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 94 if (!ntf) 95 return; 96 97 if (netdev_nl_dev_fill(netdev, ntf, &info)) { 98 nlmsg_free(ntf); 99 return; 100 } 101 102 genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 103 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); 104 } 105 106 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) 107 { 108 struct net_device *netdev; 109 struct sk_buff *rsp; 110 u32 ifindex; 111 int err; 112 113 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) 114 return -EINVAL; 115 116 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); 117 118 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 119 if (!rsp) 120 return -ENOMEM; 121 122 rtnl_lock(); 123 124 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 125 if (netdev) 126 err = netdev_nl_dev_fill(netdev, rsp, info); 127 else 128 err = -ENODEV; 129 130 rtnl_unlock(); 131 132 if (err) 133 goto err_free_msg; 134 135 return genlmsg_reply(rsp, info); 136 137 err_free_msg: 138 nlmsg_free(rsp); 139 return err; 140 } 141 142 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 143 { 144 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 145 struct net *net = sock_net(skb->sk); 146 struct net_device *netdev; 147 int err = 0; 148 149 rtnl_lock(); 150 for_each_netdev_dump(net, netdev, ctx->ifindex) { 151 err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); 152 if (err < 0) 153 break; 154 } 155 rtnl_unlock(); 156 157 return err; 158 } 159 160 static int 161 netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, 162 const struct genl_info *info) 163 { 164 unsigned long gro_flush_timeout; 165 u32 napi_defer_hard_irqs; 166 void *hdr; 167 pid_t pid; 168 169 if (WARN_ON_ONCE(!napi->dev)) 170 return -EINVAL; 171 if (!(napi->dev->flags & IFF_UP)) 172 return 0; 173 174 hdr = genlmsg_iput(rsp, info); 175 if (!hdr) 176 return -EMSGSIZE; 177 178 if (napi->napi_id >= MIN_NAPI_ID && 179 nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) 180 goto nla_put_failure; 181 182 if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) 183 goto nla_put_failure; 184 185 if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) 186 goto nla_put_failure; 187 188 if (napi->thread) { 189 pid = task_pid_nr(napi->thread); 190 if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) 191 goto nla_put_failure; 192 } 193 194 napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi); 195 if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS, 196 napi_defer_hard_irqs)) 197 goto nla_put_failure; 198 199 gro_flush_timeout = napi_get_gro_flush_timeout(napi); 200 if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, 201 gro_flush_timeout)) 202 goto nla_put_failure; 203 204 genlmsg_end(rsp, hdr); 205 206 return 0; 207 208 nla_put_failure: 209 genlmsg_cancel(rsp, hdr); 210 return -EMSGSIZE; 211 } 212 213 int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) 214 { 215 struct napi_struct *napi; 216 struct sk_buff *rsp; 217 u32 napi_id; 218 int err; 219 220 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) 221 return -EINVAL; 222 223 napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); 224 225 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 226 if (!rsp) 227 return -ENOMEM; 228 229 rtnl_lock(); 230 231 napi = napi_by_id(napi_id); 232 if (napi) { 233 err = netdev_nl_napi_fill_one(rsp, napi, info); 234 } else { 235 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); 236 err = -ENOENT; 237 } 238 239 rtnl_unlock(); 240 241 if (err) 242 goto err_free_msg; 243 244 return genlmsg_reply(rsp, info); 245 246 err_free_msg: 247 nlmsg_free(rsp); 248 return err; 249 } 250 251 static int 252 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, 253 const struct genl_info *info, 254 struct netdev_nl_dump_ctx *ctx) 255 { 256 struct napi_struct *napi; 257 int err = 0; 258 259 if (!(netdev->flags & IFF_UP)) 260 return err; 261 262 list_for_each_entry(napi, &netdev->napi_list, dev_list) { 263 if (ctx->napi_id && napi->napi_id >= ctx->napi_id) 264 continue; 265 266 err = netdev_nl_napi_fill_one(rsp, napi, info); 267 if (err) 268 return err; 269 ctx->napi_id = napi->napi_id; 270 } 271 return err; 272 } 273 274 int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 275 { 276 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 277 const struct genl_info *info = genl_info_dump(cb); 278 struct net *net = sock_net(skb->sk); 279 struct net_device *netdev; 280 u32 ifindex = 0; 281 int err = 0; 282 283 if (info->attrs[NETDEV_A_NAPI_IFINDEX]) 284 ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]); 285 286 rtnl_lock(); 287 if (ifindex) { 288 netdev = __dev_get_by_index(net, ifindex); 289 if (netdev) 290 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 291 else 292 err = -ENODEV; 293 } else { 294 for_each_netdev_dump(net, netdev, ctx->ifindex) { 295 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 296 if (err < 0) 297 break; 298 ctx->napi_id = 0; 299 } 300 } 301 rtnl_unlock(); 302 303 return err; 304 } 305 306 static int 307 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, 308 u32 q_idx, u32 q_type, const struct genl_info *info) 309 { 310 struct net_devmem_dmabuf_binding *binding; 311 struct netdev_rx_queue *rxq; 312 struct netdev_queue *txq; 313 void *hdr; 314 315 hdr = genlmsg_iput(rsp, info); 316 if (!hdr) 317 return -EMSGSIZE; 318 319 if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || 320 nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || 321 nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) 322 goto nla_put_failure; 323 324 switch (q_type) { 325 case NETDEV_QUEUE_TYPE_RX: 326 rxq = __netif_get_rx_queue(netdev, q_idx); 327 if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 328 rxq->napi->napi_id)) 329 goto nla_put_failure; 330 331 binding = rxq->mp_params.mp_priv; 332 if (binding && 333 nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id)) 334 goto nla_put_failure; 335 336 break; 337 case NETDEV_QUEUE_TYPE_TX: 338 txq = netdev_get_tx_queue(netdev, q_idx); 339 if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 340 txq->napi->napi_id)) 341 goto nla_put_failure; 342 } 343 344 genlmsg_end(rsp, hdr); 345 346 return 0; 347 348 nla_put_failure: 349 genlmsg_cancel(rsp, hdr); 350 return -EMSGSIZE; 351 } 352 353 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, 354 u32 q_type) 355 { 356 switch (q_type) { 357 case NETDEV_QUEUE_TYPE_RX: 358 if (q_id >= netdev->real_num_rx_queues) 359 return -EINVAL; 360 return 0; 361 case NETDEV_QUEUE_TYPE_TX: 362 if (q_id >= netdev->real_num_tx_queues) 363 return -EINVAL; 364 } 365 return 0; 366 } 367 368 static int 369 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, 370 u32 q_type, const struct genl_info *info) 371 { 372 int err = 0; 373 374 if (!(netdev->flags & IFF_UP)) 375 return err; 376 377 err = netdev_nl_queue_validate(netdev, q_idx, q_type); 378 if (err) 379 return err; 380 381 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); 382 } 383 384 int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) 385 { 386 u32 q_id, q_type, ifindex; 387 struct net_device *netdev; 388 struct sk_buff *rsp; 389 int err; 390 391 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || 392 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || 393 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) 394 return -EINVAL; 395 396 q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); 397 q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); 398 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 399 400 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 401 if (!rsp) 402 return -ENOMEM; 403 404 rtnl_lock(); 405 406 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 407 if (netdev) 408 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); 409 else 410 err = -ENODEV; 411 412 rtnl_unlock(); 413 414 if (err) 415 goto err_free_msg; 416 417 return genlmsg_reply(rsp, info); 418 419 err_free_msg: 420 nlmsg_free(rsp); 421 return err; 422 } 423 424 static int 425 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, 426 const struct genl_info *info, 427 struct netdev_nl_dump_ctx *ctx) 428 { 429 int err = 0; 430 int i; 431 432 if (!(netdev->flags & IFF_UP)) 433 return err; 434 435 for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) { 436 err = netdev_nl_queue_fill_one(rsp, netdev, i, 437 NETDEV_QUEUE_TYPE_RX, info); 438 if (err) 439 return err; 440 ctx->rxq_idx = i++; 441 } 442 for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) { 443 err = netdev_nl_queue_fill_one(rsp, netdev, i, 444 NETDEV_QUEUE_TYPE_TX, info); 445 if (err) 446 return err; 447 ctx->txq_idx = i++; 448 } 449 450 return err; 451 } 452 453 int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 454 { 455 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 456 const struct genl_info *info = genl_info_dump(cb); 457 struct net *net = sock_net(skb->sk); 458 struct net_device *netdev; 459 u32 ifindex = 0; 460 int err = 0; 461 462 if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) 463 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 464 465 rtnl_lock(); 466 if (ifindex) { 467 netdev = __dev_get_by_index(net, ifindex); 468 if (netdev) 469 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 470 else 471 err = -ENODEV; 472 } else { 473 for_each_netdev_dump(net, netdev, ctx->ifindex) { 474 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 475 if (err < 0) 476 break; 477 ctx->rxq_idx = 0; 478 ctx->txq_idx = 0; 479 } 480 } 481 rtnl_unlock(); 482 483 return err; 484 } 485 486 #define NETDEV_STAT_NOT_SET (~0ULL) 487 488 static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size) 489 { 490 const u64 *add = _add; 491 u64 *sum = _sum; 492 493 while (size) { 494 if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET) 495 *sum += *add; 496 sum++; 497 add++; 498 size -= 8; 499 } 500 } 501 502 static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) 503 { 504 if (value == NETDEV_STAT_NOT_SET) 505 return 0; 506 return nla_put_uint(rsp, attr_id, value); 507 } 508 509 static int 510 netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) 511 { 512 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || 513 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || 514 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || 515 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || 516 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || 517 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || 518 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || 519 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || 520 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) || 521 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) || 522 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) || 523 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) || 524 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits)) 525 return -EMSGSIZE; 526 return 0; 527 } 528 529 static int 530 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) 531 { 532 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || 533 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) || 534 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) || 535 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) || 536 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) || 537 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) || 538 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) || 539 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) || 540 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) || 541 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) || 542 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) || 543 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) || 544 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake)) 545 return -EMSGSIZE; 546 return 0; 547 } 548 549 static int 550 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, 551 u32 q_type, int i, const struct genl_info *info) 552 { 553 const struct netdev_stat_ops *ops = netdev->stat_ops; 554 struct netdev_queue_stats_rx rx; 555 struct netdev_queue_stats_tx tx; 556 void *hdr; 557 558 hdr = genlmsg_iput(rsp, info); 559 if (!hdr) 560 return -EMSGSIZE; 561 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || 562 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || 563 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) 564 goto nla_put_failure; 565 566 switch (q_type) { 567 case NETDEV_QUEUE_TYPE_RX: 568 memset(&rx, 0xff, sizeof(rx)); 569 ops->get_queue_stats_rx(netdev, i, &rx); 570 if (!memchr_inv(&rx, 0xff, sizeof(rx))) 571 goto nla_cancel; 572 if (netdev_nl_stats_write_rx(rsp, &rx)) 573 goto nla_put_failure; 574 break; 575 case NETDEV_QUEUE_TYPE_TX: 576 memset(&tx, 0xff, sizeof(tx)); 577 ops->get_queue_stats_tx(netdev, i, &tx); 578 if (!memchr_inv(&tx, 0xff, sizeof(tx))) 579 goto nla_cancel; 580 if (netdev_nl_stats_write_tx(rsp, &tx)) 581 goto nla_put_failure; 582 break; 583 } 584 585 genlmsg_end(rsp, hdr); 586 return 0; 587 588 nla_cancel: 589 genlmsg_cancel(rsp, hdr); 590 return 0; 591 nla_put_failure: 592 genlmsg_cancel(rsp, hdr); 593 return -EMSGSIZE; 594 } 595 596 static int 597 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, 598 const struct genl_info *info, 599 struct netdev_nl_dump_ctx *ctx) 600 { 601 const struct netdev_stat_ops *ops = netdev->stat_ops; 602 int i, err; 603 604 if (!(netdev->flags & IFF_UP)) 605 return 0; 606 607 i = ctx->rxq_idx; 608 while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { 609 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, 610 i, info); 611 if (err) 612 return err; 613 ctx->rxq_idx = i++; 614 } 615 i = ctx->txq_idx; 616 while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { 617 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, 618 i, info); 619 if (err) 620 return err; 621 ctx->txq_idx = i++; 622 } 623 624 ctx->rxq_idx = 0; 625 ctx->txq_idx = 0; 626 return 0; 627 } 628 629 static int 630 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, 631 const struct genl_info *info) 632 { 633 struct netdev_queue_stats_rx rx_sum, rx; 634 struct netdev_queue_stats_tx tx_sum, tx; 635 const struct netdev_stat_ops *ops; 636 void *hdr; 637 int i; 638 639 ops = netdev->stat_ops; 640 /* Netdev can't guarantee any complete counters */ 641 if (!ops->get_base_stats) 642 return 0; 643 644 memset(&rx_sum, 0xff, sizeof(rx_sum)); 645 memset(&tx_sum, 0xff, sizeof(tx_sum)); 646 647 ops->get_base_stats(netdev, &rx_sum, &tx_sum); 648 649 /* The op was there, but nothing reported, don't bother */ 650 if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && 651 !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum))) 652 return 0; 653 654 hdr = genlmsg_iput(rsp, info); 655 if (!hdr) 656 return -EMSGSIZE; 657 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) 658 goto nla_put_failure; 659 660 for (i = 0; i < netdev->real_num_rx_queues; i++) { 661 memset(&rx, 0xff, sizeof(rx)); 662 if (ops->get_queue_stats_rx) 663 ops->get_queue_stats_rx(netdev, i, &rx); 664 netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx)); 665 } 666 for (i = 0; i < netdev->real_num_tx_queues; i++) { 667 memset(&tx, 0xff, sizeof(tx)); 668 if (ops->get_queue_stats_tx) 669 ops->get_queue_stats_tx(netdev, i, &tx); 670 netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx)); 671 } 672 673 if (netdev_nl_stats_write_rx(rsp, &rx_sum) || 674 netdev_nl_stats_write_tx(rsp, &tx_sum)) 675 goto nla_put_failure; 676 677 genlmsg_end(rsp, hdr); 678 return 0; 679 680 nla_put_failure: 681 genlmsg_cancel(rsp, hdr); 682 return -EMSGSIZE; 683 } 684 685 static int 686 netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope, 687 struct sk_buff *skb, const struct genl_info *info, 688 struct netdev_nl_dump_ctx *ctx) 689 { 690 if (!netdev->stat_ops) 691 return 0; 692 693 switch (scope) { 694 case 0: 695 return netdev_nl_stats_by_netdev(netdev, skb, info); 696 case NETDEV_QSTATS_SCOPE_QUEUE: 697 return netdev_nl_stats_by_queue(netdev, skb, info, ctx); 698 } 699 700 return -EINVAL; /* Should not happen, per netlink policy */ 701 } 702 703 int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, 704 struct netlink_callback *cb) 705 { 706 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 707 const struct genl_info *info = genl_info_dump(cb); 708 struct net *net = sock_net(skb->sk); 709 struct net_device *netdev; 710 unsigned int ifindex; 711 unsigned int scope; 712 int err = 0; 713 714 scope = 0; 715 if (info->attrs[NETDEV_A_QSTATS_SCOPE]) 716 scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]); 717 718 ifindex = 0; 719 if (info->attrs[NETDEV_A_QSTATS_IFINDEX]) 720 ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]); 721 722 rtnl_lock(); 723 if (ifindex) { 724 netdev = __dev_get_by_index(net, ifindex); 725 if (netdev && netdev->stat_ops) { 726 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, 727 info, ctx); 728 } else { 729 NL_SET_BAD_ATTR(info->extack, 730 info->attrs[NETDEV_A_QSTATS_IFINDEX]); 731 err = netdev ? -EOPNOTSUPP : -ENODEV; 732 } 733 } else { 734 for_each_netdev_dump(net, netdev, ctx->ifindex) { 735 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, 736 info, ctx); 737 if (err < 0) 738 break; 739 } 740 } 741 rtnl_unlock(); 742 743 return err; 744 } 745 746 int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) 747 { 748 struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; 749 struct net_devmem_dmabuf_binding *binding; 750 struct list_head *sock_binding_list; 751 u32 ifindex, dmabuf_fd, rxq_idx; 752 struct net_device *netdev; 753 struct sk_buff *rsp; 754 struct nlattr *attr; 755 int rem, err = 0; 756 void *hdr; 757 758 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || 759 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) || 760 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES)) 761 return -EINVAL; 762 763 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); 764 dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); 765 766 sock_binding_list = genl_sk_priv_get(&netdev_nl_family, 767 NETLINK_CB(skb).sk); 768 if (IS_ERR(sock_binding_list)) 769 return PTR_ERR(sock_binding_list); 770 771 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 772 if (!rsp) 773 return -ENOMEM; 774 775 hdr = genlmsg_iput(rsp, info); 776 if (!hdr) { 777 err = -EMSGSIZE; 778 goto err_genlmsg_free; 779 } 780 781 rtnl_lock(); 782 783 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 784 if (!netdev || !netif_device_present(netdev)) { 785 err = -ENODEV; 786 goto err_unlock; 787 } 788 789 if (dev_xdp_prog_count(netdev)) { 790 NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached"); 791 err = -EEXIST; 792 goto err_unlock; 793 } 794 795 binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack); 796 if (IS_ERR(binding)) { 797 err = PTR_ERR(binding); 798 goto err_unlock; 799 } 800 801 nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, 802 genlmsg_data(info->genlhdr), 803 genlmsg_len(info->genlhdr), rem) { 804 err = nla_parse_nested( 805 tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, 806 netdev_queue_id_nl_policy, info->extack); 807 if (err < 0) 808 goto err_unbind; 809 810 if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || 811 NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { 812 err = -EINVAL; 813 goto err_unbind; 814 } 815 816 if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { 817 NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); 818 err = -EINVAL; 819 goto err_unbind; 820 } 821 822 rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); 823 824 err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding, 825 info->extack); 826 if (err) 827 goto err_unbind; 828 } 829 830 list_add(&binding->list, sock_binding_list); 831 832 nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); 833 genlmsg_end(rsp, hdr); 834 835 err = genlmsg_reply(rsp, info); 836 if (err) 837 goto err_unbind; 838 839 rtnl_unlock(); 840 841 return 0; 842 843 err_unbind: 844 net_devmem_unbind_dmabuf(binding); 845 err_unlock: 846 rtnl_unlock(); 847 err_genlmsg_free: 848 nlmsg_free(rsp); 849 return err; 850 } 851 852 void netdev_nl_sock_priv_init(struct list_head *priv) 853 { 854 INIT_LIST_HEAD(priv); 855 } 856 857 void netdev_nl_sock_priv_destroy(struct list_head *priv) 858 { 859 struct net_devmem_dmabuf_binding *binding; 860 struct net_devmem_dmabuf_binding *temp; 861 862 list_for_each_entry_safe(binding, temp, priv, list) { 863 rtnl_lock(); 864 net_devmem_unbind_dmabuf(binding); 865 rtnl_unlock(); 866 } 867 } 868 869 static int netdev_genl_netdevice_event(struct notifier_block *nb, 870 unsigned long event, void *ptr) 871 { 872 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 873 874 switch (event) { 875 case NETDEV_REGISTER: 876 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); 877 break; 878 case NETDEV_UNREGISTER: 879 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); 880 break; 881 case NETDEV_XDP_FEAT_CHANGE: 882 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); 883 break; 884 } 885 886 return NOTIFY_OK; 887 } 888 889 static struct notifier_block netdev_genl_nb = { 890 .notifier_call = netdev_genl_netdevice_event, 891 }; 892 893 static int __init netdev_genl_init(void) 894 { 895 int err; 896 897 err = register_netdevice_notifier(&netdev_genl_nb); 898 if (err) 899 return err; 900 901 err = genl_register_family(&netdev_nl_family); 902 if (err) 903 goto err_unreg_ntf; 904 905 return 0; 906 907 err_unreg_ntf: 908 unregister_netdevice_notifier(&netdev_genl_nb); 909 return err; 910 } 911 912 subsys_initcall(netdev_genl_init); 913