1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/netdevice.h> 4 #include <linux/notifier.h> 5 #include <linux/rtnetlink.h> 6 #include <net/busy_poll.h> 7 #include <net/net_namespace.h> 8 #include <net/netdev_queues.h> 9 #include <net/netdev_rx_queue.h> 10 #include <net/sock.h> 11 #include <net/xdp.h> 12 #include <net/xdp_sock.h> 13 14 #include "dev.h" 15 #include "devmem.h" 16 #include "netdev-genl-gen.h" 17 18 struct netdev_nl_dump_ctx { 19 unsigned long ifindex; 20 unsigned int rxq_idx; 21 unsigned int txq_idx; 22 unsigned int napi_id; 23 }; 24 25 static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) 26 { 27 NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx); 28 29 return (struct netdev_nl_dump_ctx *)cb->ctx; 30 } 31 32 static int 33 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, 34 const struct genl_info *info) 35 { 36 u64 xsk_features = 0; 37 u64 xdp_rx_meta = 0; 38 void *hdr; 39 40 hdr = genlmsg_iput(rsp, info); 41 if (!hdr) 42 return -EMSGSIZE; 43 44 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ 45 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ 46 xdp_rx_meta |= flag; 47 XDP_METADATA_KFUNC_xxx 48 #undef XDP_METADATA_KFUNC 49 50 if (netdev->xsk_tx_metadata_ops) { 51 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) 52 xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; 53 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) 54 xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; 55 } 56 57 if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || 58 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, 59 netdev->xdp_features, NETDEV_A_DEV_PAD) || 60 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, 61 xdp_rx_meta, NETDEV_A_DEV_PAD) || 62 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, 63 xsk_features, NETDEV_A_DEV_PAD)) 64 goto err_cancel_msg; 65 66 if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { 67 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, 68 netdev->xdp_zc_max_segs)) 69 goto err_cancel_msg; 70 } 71 72 genlmsg_end(rsp, hdr); 73 74 return 0; 75 76 err_cancel_msg: 77 genlmsg_cancel(rsp, hdr); 78 return -EMSGSIZE; 79 } 80 81 static void 82 netdev_genl_dev_notify(struct net_device *netdev, int cmd) 83 { 84 struct genl_info info; 85 struct sk_buff *ntf; 86 87 if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), 88 NETDEV_NLGRP_MGMT)) 89 return; 90 91 genl_info_init_ntf(&info, &netdev_nl_family, cmd); 92 93 ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 94 if (!ntf) 95 return; 96 97 if (netdev_nl_dev_fill(netdev, ntf, &info)) { 98 nlmsg_free(ntf); 99 return; 100 } 101 102 genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 103 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); 104 } 105 106 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) 107 { 108 struct net_device *netdev; 109 struct sk_buff *rsp; 110 u32 ifindex; 111 int err; 112 113 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) 114 return -EINVAL; 115 116 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); 117 118 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 119 if (!rsp) 120 return -ENOMEM; 121 122 rtnl_lock(); 123 124 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 125 if (netdev) 126 err = netdev_nl_dev_fill(netdev, rsp, info); 127 else 128 err = -ENODEV; 129 130 rtnl_unlock(); 131 132 if (err) 133 goto err_free_msg; 134 135 return genlmsg_reply(rsp, info); 136 137 err_free_msg: 138 nlmsg_free(rsp); 139 return err; 140 } 141 142 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 143 { 144 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 145 struct net *net = sock_net(skb->sk); 146 struct net_device *netdev; 147 int err = 0; 148 149 rtnl_lock(); 150 for_each_netdev_dump(net, netdev, ctx->ifindex) { 151 err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); 152 if (err < 0) 153 break; 154 } 155 rtnl_unlock(); 156 157 return err; 158 } 159 160 static int 161 netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, 162 const struct genl_info *info) 163 { 164 u32 napi_defer_hard_irqs; 165 void *hdr; 166 pid_t pid; 167 168 if (WARN_ON_ONCE(!napi->dev)) 169 return -EINVAL; 170 if (!(napi->dev->flags & IFF_UP)) 171 return 0; 172 173 hdr = genlmsg_iput(rsp, info); 174 if (!hdr) 175 return -EMSGSIZE; 176 177 if (napi->napi_id >= MIN_NAPI_ID && 178 nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) 179 goto nla_put_failure; 180 181 if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) 182 goto nla_put_failure; 183 184 if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) 185 goto nla_put_failure; 186 187 if (napi->thread) { 188 pid = task_pid_nr(napi->thread); 189 if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) 190 goto nla_put_failure; 191 } 192 193 napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi); 194 if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS, 195 napi_defer_hard_irqs)) 196 goto nla_put_failure; 197 198 genlmsg_end(rsp, hdr); 199 200 return 0; 201 202 nla_put_failure: 203 genlmsg_cancel(rsp, hdr); 204 return -EMSGSIZE; 205 } 206 207 int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) 208 { 209 struct napi_struct *napi; 210 struct sk_buff *rsp; 211 u32 napi_id; 212 int err; 213 214 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) 215 return -EINVAL; 216 217 napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); 218 219 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 220 if (!rsp) 221 return -ENOMEM; 222 223 rtnl_lock(); 224 225 napi = napi_by_id(napi_id); 226 if (napi) { 227 err = netdev_nl_napi_fill_one(rsp, napi, info); 228 } else { 229 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); 230 err = -ENOENT; 231 } 232 233 rtnl_unlock(); 234 235 if (err) 236 goto err_free_msg; 237 238 return genlmsg_reply(rsp, info); 239 240 err_free_msg: 241 nlmsg_free(rsp); 242 return err; 243 } 244 245 static int 246 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, 247 const struct genl_info *info, 248 struct netdev_nl_dump_ctx *ctx) 249 { 250 struct napi_struct *napi; 251 int err = 0; 252 253 if (!(netdev->flags & IFF_UP)) 254 return err; 255 256 list_for_each_entry(napi, &netdev->napi_list, dev_list) { 257 if (ctx->napi_id && napi->napi_id >= ctx->napi_id) 258 continue; 259 260 err = netdev_nl_napi_fill_one(rsp, napi, info); 261 if (err) 262 return err; 263 ctx->napi_id = napi->napi_id; 264 } 265 return err; 266 } 267 268 int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 269 { 270 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 271 const struct genl_info *info = genl_info_dump(cb); 272 struct net *net = sock_net(skb->sk); 273 struct net_device *netdev; 274 u32 ifindex = 0; 275 int err = 0; 276 277 if (info->attrs[NETDEV_A_NAPI_IFINDEX]) 278 ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]); 279 280 rtnl_lock(); 281 if (ifindex) { 282 netdev = __dev_get_by_index(net, ifindex); 283 if (netdev) 284 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 285 else 286 err = -ENODEV; 287 } else { 288 for_each_netdev_dump(net, netdev, ctx->ifindex) { 289 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 290 if (err < 0) 291 break; 292 ctx->napi_id = 0; 293 } 294 } 295 rtnl_unlock(); 296 297 return err; 298 } 299 300 static int 301 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, 302 u32 q_idx, u32 q_type, const struct genl_info *info) 303 { 304 struct net_devmem_dmabuf_binding *binding; 305 struct netdev_rx_queue *rxq; 306 struct netdev_queue *txq; 307 void *hdr; 308 309 hdr = genlmsg_iput(rsp, info); 310 if (!hdr) 311 return -EMSGSIZE; 312 313 if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || 314 nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || 315 nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) 316 goto nla_put_failure; 317 318 switch (q_type) { 319 case NETDEV_QUEUE_TYPE_RX: 320 rxq = __netif_get_rx_queue(netdev, q_idx); 321 if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 322 rxq->napi->napi_id)) 323 goto nla_put_failure; 324 325 binding = rxq->mp_params.mp_priv; 326 if (binding && 327 nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id)) 328 goto nla_put_failure; 329 330 break; 331 case NETDEV_QUEUE_TYPE_TX: 332 txq = netdev_get_tx_queue(netdev, q_idx); 333 if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 334 txq->napi->napi_id)) 335 goto nla_put_failure; 336 } 337 338 genlmsg_end(rsp, hdr); 339 340 return 0; 341 342 nla_put_failure: 343 genlmsg_cancel(rsp, hdr); 344 return -EMSGSIZE; 345 } 346 347 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, 348 u32 q_type) 349 { 350 switch (q_type) { 351 case NETDEV_QUEUE_TYPE_RX: 352 if (q_id >= netdev->real_num_rx_queues) 353 return -EINVAL; 354 return 0; 355 case NETDEV_QUEUE_TYPE_TX: 356 if (q_id >= netdev->real_num_tx_queues) 357 return -EINVAL; 358 } 359 return 0; 360 } 361 362 static int 363 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, 364 u32 q_type, const struct genl_info *info) 365 { 366 int err = 0; 367 368 if (!(netdev->flags & IFF_UP)) 369 return err; 370 371 err = netdev_nl_queue_validate(netdev, q_idx, q_type); 372 if (err) 373 return err; 374 375 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); 376 } 377 378 int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) 379 { 380 u32 q_id, q_type, ifindex; 381 struct net_device *netdev; 382 struct sk_buff *rsp; 383 int err; 384 385 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || 386 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || 387 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) 388 return -EINVAL; 389 390 q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); 391 q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); 392 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 393 394 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 395 if (!rsp) 396 return -ENOMEM; 397 398 rtnl_lock(); 399 400 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 401 if (netdev) 402 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); 403 else 404 err = -ENODEV; 405 406 rtnl_unlock(); 407 408 if (err) 409 goto err_free_msg; 410 411 return genlmsg_reply(rsp, info); 412 413 err_free_msg: 414 nlmsg_free(rsp); 415 return err; 416 } 417 418 static int 419 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, 420 const struct genl_info *info, 421 struct netdev_nl_dump_ctx *ctx) 422 { 423 int err = 0; 424 int i; 425 426 if (!(netdev->flags & IFF_UP)) 427 return err; 428 429 for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) { 430 err = netdev_nl_queue_fill_one(rsp, netdev, i, 431 NETDEV_QUEUE_TYPE_RX, info); 432 if (err) 433 return err; 434 ctx->rxq_idx = i++; 435 } 436 for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) { 437 err = netdev_nl_queue_fill_one(rsp, netdev, i, 438 NETDEV_QUEUE_TYPE_TX, info); 439 if (err) 440 return err; 441 ctx->txq_idx = i++; 442 } 443 444 return err; 445 } 446 447 int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 448 { 449 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 450 const struct genl_info *info = genl_info_dump(cb); 451 struct net *net = sock_net(skb->sk); 452 struct net_device *netdev; 453 u32 ifindex = 0; 454 int err = 0; 455 456 if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) 457 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 458 459 rtnl_lock(); 460 if (ifindex) { 461 netdev = __dev_get_by_index(net, ifindex); 462 if (netdev) 463 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 464 else 465 err = -ENODEV; 466 } else { 467 for_each_netdev_dump(net, netdev, ctx->ifindex) { 468 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 469 if (err < 0) 470 break; 471 ctx->rxq_idx = 0; 472 ctx->txq_idx = 0; 473 } 474 } 475 rtnl_unlock(); 476 477 return err; 478 } 479 480 #define NETDEV_STAT_NOT_SET (~0ULL) 481 482 static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size) 483 { 484 const u64 *add = _add; 485 u64 *sum = _sum; 486 487 while (size) { 488 if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET) 489 *sum += *add; 490 sum++; 491 add++; 492 size -= 8; 493 } 494 } 495 496 static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) 497 { 498 if (value == NETDEV_STAT_NOT_SET) 499 return 0; 500 return nla_put_uint(rsp, attr_id, value); 501 } 502 503 static int 504 netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) 505 { 506 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || 507 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || 508 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || 509 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || 510 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || 511 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || 512 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || 513 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || 514 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) || 515 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) || 516 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) || 517 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) || 518 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits)) 519 return -EMSGSIZE; 520 return 0; 521 } 522 523 static int 524 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) 525 { 526 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || 527 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) || 528 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) || 529 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) || 530 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) || 531 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) || 532 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) || 533 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) || 534 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) || 535 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) || 536 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) || 537 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) || 538 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake)) 539 return -EMSGSIZE; 540 return 0; 541 } 542 543 static int 544 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, 545 u32 q_type, int i, const struct genl_info *info) 546 { 547 const struct netdev_stat_ops *ops = netdev->stat_ops; 548 struct netdev_queue_stats_rx rx; 549 struct netdev_queue_stats_tx tx; 550 void *hdr; 551 552 hdr = genlmsg_iput(rsp, info); 553 if (!hdr) 554 return -EMSGSIZE; 555 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || 556 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || 557 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) 558 goto nla_put_failure; 559 560 switch (q_type) { 561 case NETDEV_QUEUE_TYPE_RX: 562 memset(&rx, 0xff, sizeof(rx)); 563 ops->get_queue_stats_rx(netdev, i, &rx); 564 if (!memchr_inv(&rx, 0xff, sizeof(rx))) 565 goto nla_cancel; 566 if (netdev_nl_stats_write_rx(rsp, &rx)) 567 goto nla_put_failure; 568 break; 569 case NETDEV_QUEUE_TYPE_TX: 570 memset(&tx, 0xff, sizeof(tx)); 571 ops->get_queue_stats_tx(netdev, i, &tx); 572 if (!memchr_inv(&tx, 0xff, sizeof(tx))) 573 goto nla_cancel; 574 if (netdev_nl_stats_write_tx(rsp, &tx)) 575 goto nla_put_failure; 576 break; 577 } 578 579 genlmsg_end(rsp, hdr); 580 return 0; 581 582 nla_cancel: 583 genlmsg_cancel(rsp, hdr); 584 return 0; 585 nla_put_failure: 586 genlmsg_cancel(rsp, hdr); 587 return -EMSGSIZE; 588 } 589 590 static int 591 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, 592 const struct genl_info *info, 593 struct netdev_nl_dump_ctx *ctx) 594 { 595 const struct netdev_stat_ops *ops = netdev->stat_ops; 596 int i, err; 597 598 if (!(netdev->flags & IFF_UP)) 599 return 0; 600 601 i = ctx->rxq_idx; 602 while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { 603 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, 604 i, info); 605 if (err) 606 return err; 607 ctx->rxq_idx = i++; 608 } 609 i = ctx->txq_idx; 610 while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { 611 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, 612 i, info); 613 if (err) 614 return err; 615 ctx->txq_idx = i++; 616 } 617 618 ctx->rxq_idx = 0; 619 ctx->txq_idx = 0; 620 return 0; 621 } 622 623 static int 624 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, 625 const struct genl_info *info) 626 { 627 struct netdev_queue_stats_rx rx_sum, rx; 628 struct netdev_queue_stats_tx tx_sum, tx; 629 const struct netdev_stat_ops *ops; 630 void *hdr; 631 int i; 632 633 ops = netdev->stat_ops; 634 /* Netdev can't guarantee any complete counters */ 635 if (!ops->get_base_stats) 636 return 0; 637 638 memset(&rx_sum, 0xff, sizeof(rx_sum)); 639 memset(&tx_sum, 0xff, sizeof(tx_sum)); 640 641 ops->get_base_stats(netdev, &rx_sum, &tx_sum); 642 643 /* The op was there, but nothing reported, don't bother */ 644 if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && 645 !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum))) 646 return 0; 647 648 hdr = genlmsg_iput(rsp, info); 649 if (!hdr) 650 return -EMSGSIZE; 651 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) 652 goto nla_put_failure; 653 654 for (i = 0; i < netdev->real_num_rx_queues; i++) { 655 memset(&rx, 0xff, sizeof(rx)); 656 if (ops->get_queue_stats_rx) 657 ops->get_queue_stats_rx(netdev, i, &rx); 658 netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx)); 659 } 660 for (i = 0; i < netdev->real_num_tx_queues; i++) { 661 memset(&tx, 0xff, sizeof(tx)); 662 if (ops->get_queue_stats_tx) 663 ops->get_queue_stats_tx(netdev, i, &tx); 664 netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx)); 665 } 666 667 if (netdev_nl_stats_write_rx(rsp, &rx_sum) || 668 netdev_nl_stats_write_tx(rsp, &tx_sum)) 669 goto nla_put_failure; 670 671 genlmsg_end(rsp, hdr); 672 return 0; 673 674 nla_put_failure: 675 genlmsg_cancel(rsp, hdr); 676 return -EMSGSIZE; 677 } 678 679 static int 680 netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope, 681 struct sk_buff *skb, const struct genl_info *info, 682 struct netdev_nl_dump_ctx *ctx) 683 { 684 if (!netdev->stat_ops) 685 return 0; 686 687 switch (scope) { 688 case 0: 689 return netdev_nl_stats_by_netdev(netdev, skb, info); 690 case NETDEV_QSTATS_SCOPE_QUEUE: 691 return netdev_nl_stats_by_queue(netdev, skb, info, ctx); 692 } 693 694 return -EINVAL; /* Should not happen, per netlink policy */ 695 } 696 697 int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, 698 struct netlink_callback *cb) 699 { 700 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 701 const struct genl_info *info = genl_info_dump(cb); 702 struct net *net = sock_net(skb->sk); 703 struct net_device *netdev; 704 unsigned int ifindex; 705 unsigned int scope; 706 int err = 0; 707 708 scope = 0; 709 if (info->attrs[NETDEV_A_QSTATS_SCOPE]) 710 scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]); 711 712 ifindex = 0; 713 if (info->attrs[NETDEV_A_QSTATS_IFINDEX]) 714 ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]); 715 716 rtnl_lock(); 717 if (ifindex) { 718 netdev = __dev_get_by_index(net, ifindex); 719 if (netdev && netdev->stat_ops) { 720 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, 721 info, ctx); 722 } else { 723 NL_SET_BAD_ATTR(info->extack, 724 info->attrs[NETDEV_A_QSTATS_IFINDEX]); 725 err = netdev ? -EOPNOTSUPP : -ENODEV; 726 } 727 } else { 728 for_each_netdev_dump(net, netdev, ctx->ifindex) { 729 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, 730 info, ctx); 731 if (err < 0) 732 break; 733 } 734 } 735 rtnl_unlock(); 736 737 return err; 738 } 739 740 int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) 741 { 742 struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; 743 struct net_devmem_dmabuf_binding *binding; 744 struct list_head *sock_binding_list; 745 u32 ifindex, dmabuf_fd, rxq_idx; 746 struct net_device *netdev; 747 struct sk_buff *rsp; 748 struct nlattr *attr; 749 int rem, err = 0; 750 void *hdr; 751 752 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || 753 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) || 754 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES)) 755 return -EINVAL; 756 757 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); 758 dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); 759 760 sock_binding_list = genl_sk_priv_get(&netdev_nl_family, 761 NETLINK_CB(skb).sk); 762 if (IS_ERR(sock_binding_list)) 763 return PTR_ERR(sock_binding_list); 764 765 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 766 if (!rsp) 767 return -ENOMEM; 768 769 hdr = genlmsg_iput(rsp, info); 770 if (!hdr) { 771 err = -EMSGSIZE; 772 goto err_genlmsg_free; 773 } 774 775 rtnl_lock(); 776 777 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 778 if (!netdev || !netif_device_present(netdev)) { 779 err = -ENODEV; 780 goto err_unlock; 781 } 782 783 if (dev_xdp_prog_count(netdev)) { 784 NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached"); 785 err = -EEXIST; 786 goto err_unlock; 787 } 788 789 binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack); 790 if (IS_ERR(binding)) { 791 err = PTR_ERR(binding); 792 goto err_unlock; 793 } 794 795 nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, 796 genlmsg_data(info->genlhdr), 797 genlmsg_len(info->genlhdr), rem) { 798 err = nla_parse_nested( 799 tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, 800 netdev_queue_id_nl_policy, info->extack); 801 if (err < 0) 802 goto err_unbind; 803 804 if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || 805 NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { 806 err = -EINVAL; 807 goto err_unbind; 808 } 809 810 if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { 811 NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); 812 err = -EINVAL; 813 goto err_unbind; 814 } 815 816 rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); 817 818 err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding, 819 info->extack); 820 if (err) 821 goto err_unbind; 822 } 823 824 list_add(&binding->list, sock_binding_list); 825 826 nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); 827 genlmsg_end(rsp, hdr); 828 829 err = genlmsg_reply(rsp, info); 830 if (err) 831 goto err_unbind; 832 833 rtnl_unlock(); 834 835 return 0; 836 837 err_unbind: 838 net_devmem_unbind_dmabuf(binding); 839 err_unlock: 840 rtnl_unlock(); 841 err_genlmsg_free: 842 nlmsg_free(rsp); 843 return err; 844 } 845 846 void netdev_nl_sock_priv_init(struct list_head *priv) 847 { 848 INIT_LIST_HEAD(priv); 849 } 850 851 void netdev_nl_sock_priv_destroy(struct list_head *priv) 852 { 853 struct net_devmem_dmabuf_binding *binding; 854 struct net_devmem_dmabuf_binding *temp; 855 856 list_for_each_entry_safe(binding, temp, priv, list) { 857 rtnl_lock(); 858 net_devmem_unbind_dmabuf(binding); 859 rtnl_unlock(); 860 } 861 } 862 863 static int netdev_genl_netdevice_event(struct notifier_block *nb, 864 unsigned long event, void *ptr) 865 { 866 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 867 868 switch (event) { 869 case NETDEV_REGISTER: 870 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); 871 break; 872 case NETDEV_UNREGISTER: 873 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); 874 break; 875 case NETDEV_XDP_FEAT_CHANGE: 876 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); 877 break; 878 } 879 880 return NOTIFY_OK; 881 } 882 883 static struct notifier_block netdev_genl_nb = { 884 .notifier_call = netdev_genl_netdevice_event, 885 }; 886 887 static int __init netdev_genl_init(void) 888 { 889 int err; 890 891 err = register_netdevice_notifier(&netdev_genl_nb); 892 if (err) 893 return err; 894 895 err = genl_register_family(&netdev_nl_family); 896 if (err) 897 goto err_unreg_ntf; 898 899 return 0; 900 901 err_unreg_ntf: 902 unregister_netdevice_notifier(&netdev_genl_nb); 903 return err; 904 } 905 906 subsys_initcall(netdev_genl_init); 907