1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/netdevice.h> 4 #include <linux/notifier.h> 5 #include <linux/rtnetlink.h> 6 #include <net/busy_poll.h> 7 #include <net/net_namespace.h> 8 #include <net/netdev_queues.h> 9 #include <net/netdev_rx_queue.h> 10 #include <net/sock.h> 11 #include <net/xdp.h> 12 #include <net/xdp_sock.h> 13 14 #include "dev.h" 15 #include "devmem.h" 16 #include "netdev-genl-gen.h" 17 18 struct netdev_nl_dump_ctx { 19 unsigned long ifindex; 20 unsigned int rxq_idx; 21 unsigned int txq_idx; 22 unsigned int napi_id; 23 }; 24 25 static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) 26 { 27 NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx); 28 29 return (struct netdev_nl_dump_ctx *)cb->ctx; 30 } 31 32 static int 33 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, 34 const struct genl_info *info) 35 { 36 u64 xsk_features = 0; 37 u64 xdp_rx_meta = 0; 38 void *hdr; 39 40 hdr = genlmsg_iput(rsp, info); 41 if (!hdr) 42 return -EMSGSIZE; 43 44 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ 45 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ 46 xdp_rx_meta |= flag; 47 XDP_METADATA_KFUNC_xxx 48 #undef XDP_METADATA_KFUNC 49 50 if (netdev->xsk_tx_metadata_ops) { 51 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) 52 xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; 53 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) 54 xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; 55 } 56 57 if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || 58 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, 59 netdev->xdp_features, NETDEV_A_DEV_PAD) || 60 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, 61 xdp_rx_meta, NETDEV_A_DEV_PAD) || 62 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, 63 xsk_features, NETDEV_A_DEV_PAD)) 64 goto err_cancel_msg; 65 66 if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { 67 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, 68 netdev->xdp_zc_max_segs)) 69 goto err_cancel_msg; 70 } 71 72 genlmsg_end(rsp, hdr); 73 74 return 0; 75 76 err_cancel_msg: 77 genlmsg_cancel(rsp, hdr); 78 return -EMSGSIZE; 79 } 80 81 static void 82 netdev_genl_dev_notify(struct net_device *netdev, int cmd) 83 { 84 struct genl_info info; 85 struct sk_buff *ntf; 86 87 if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), 88 NETDEV_NLGRP_MGMT)) 89 return; 90 91 genl_info_init_ntf(&info, &netdev_nl_family, cmd); 92 93 ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 94 if (!ntf) 95 return; 96 97 if (netdev_nl_dev_fill(netdev, ntf, &info)) { 98 nlmsg_free(ntf); 99 return; 100 } 101 102 genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 103 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); 104 } 105 106 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) 107 { 108 struct net_device *netdev; 109 struct sk_buff *rsp; 110 u32 ifindex; 111 int err; 112 113 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) 114 return -EINVAL; 115 116 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); 117 118 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 119 if (!rsp) 120 return -ENOMEM; 121 122 rtnl_lock(); 123 124 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 125 if (netdev) 126 err = netdev_nl_dev_fill(netdev, rsp, info); 127 else 128 err = -ENODEV; 129 130 rtnl_unlock(); 131 132 if (err) 133 goto err_free_msg; 134 135 return genlmsg_reply(rsp, info); 136 137 err_free_msg: 138 nlmsg_free(rsp); 139 return err; 140 } 141 142 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 143 { 144 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 145 struct net *net = sock_net(skb->sk); 146 struct net_device *netdev; 147 int err = 0; 148 149 rtnl_lock(); 150 for_each_netdev_dump(net, netdev, ctx->ifindex) { 151 err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); 152 if (err < 0) 153 break; 154 } 155 rtnl_unlock(); 156 157 return err; 158 } 159 160 static int 161 netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, 162 const struct genl_info *info) 163 { 164 unsigned long irq_suspend_timeout; 165 unsigned long gro_flush_timeout; 166 u32 napi_defer_hard_irqs; 167 void *hdr; 168 pid_t pid; 169 170 if (!(napi->dev->flags & IFF_UP)) 171 return 0; 172 173 hdr = genlmsg_iput(rsp, info); 174 if (!hdr) 175 return -EMSGSIZE; 176 177 if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) 178 goto nla_put_failure; 179 180 if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) 181 goto nla_put_failure; 182 183 if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) 184 goto nla_put_failure; 185 186 if (napi->thread) { 187 pid = task_pid_nr(napi->thread); 188 if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) 189 goto nla_put_failure; 190 } 191 192 napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi); 193 if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS, 194 napi_defer_hard_irqs)) 195 goto nla_put_failure; 196 197 irq_suspend_timeout = napi_get_irq_suspend_timeout(napi); 198 if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT, 199 irq_suspend_timeout)) 200 goto nla_put_failure; 201 202 gro_flush_timeout = napi_get_gro_flush_timeout(napi); 203 if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, 204 gro_flush_timeout)) 205 goto nla_put_failure; 206 207 genlmsg_end(rsp, hdr); 208 209 return 0; 210 211 nla_put_failure: 212 genlmsg_cancel(rsp, hdr); 213 return -EMSGSIZE; 214 } 215 216 int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) 217 { 218 struct napi_struct *napi; 219 struct sk_buff *rsp; 220 u32 napi_id; 221 int err; 222 223 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) 224 return -EINVAL; 225 226 napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); 227 228 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 229 if (!rsp) 230 return -ENOMEM; 231 232 rtnl_lock(); 233 rcu_read_lock(); 234 235 napi = netdev_napi_by_id(genl_info_net(info), napi_id); 236 if (napi) { 237 err = netdev_nl_napi_fill_one(rsp, napi, info); 238 } else { 239 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); 240 err = -ENOENT; 241 } 242 243 rcu_read_unlock(); 244 rtnl_unlock(); 245 246 if (err) { 247 goto err_free_msg; 248 } else if (!rsp->len) { 249 err = -ENOENT; 250 goto err_free_msg; 251 } 252 253 return genlmsg_reply(rsp, info); 254 255 err_free_msg: 256 nlmsg_free(rsp); 257 return err; 258 } 259 260 static int 261 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, 262 const struct genl_info *info, 263 struct netdev_nl_dump_ctx *ctx) 264 { 265 struct napi_struct *napi; 266 unsigned int prev_id; 267 int err = 0; 268 269 if (!(netdev->flags & IFF_UP)) 270 return err; 271 272 prev_id = UINT_MAX; 273 list_for_each_entry(napi, &netdev->napi_list, dev_list) { 274 if (napi->napi_id < MIN_NAPI_ID) 275 continue; 276 277 /* Dump continuation below depends on the list being sorted */ 278 WARN_ON_ONCE(napi->napi_id >= prev_id); 279 prev_id = napi->napi_id; 280 281 if (ctx->napi_id && napi->napi_id >= ctx->napi_id) 282 continue; 283 284 err = netdev_nl_napi_fill_one(rsp, napi, info); 285 if (err) 286 return err; 287 ctx->napi_id = napi->napi_id; 288 } 289 return err; 290 } 291 292 int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 293 { 294 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 295 const struct genl_info *info = genl_info_dump(cb); 296 struct net *net = sock_net(skb->sk); 297 struct net_device *netdev; 298 u32 ifindex = 0; 299 int err = 0; 300 301 if (info->attrs[NETDEV_A_NAPI_IFINDEX]) 302 ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]); 303 304 rtnl_lock(); 305 if (ifindex) { 306 netdev = __dev_get_by_index(net, ifindex); 307 if (netdev) 308 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 309 else 310 err = -ENODEV; 311 } else { 312 for_each_netdev_dump(net, netdev, ctx->ifindex) { 313 err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); 314 if (err < 0) 315 break; 316 ctx->napi_id = 0; 317 } 318 } 319 rtnl_unlock(); 320 321 return err; 322 } 323 324 static int 325 netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info) 326 { 327 u64 irq_suspend_timeout = 0; 328 u64 gro_flush_timeout = 0; 329 u32 defer = 0; 330 331 if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) { 332 defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]); 333 napi_set_defer_hard_irqs(napi, defer); 334 } 335 336 if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) { 337 irq_suspend_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]); 338 napi_set_irq_suspend_timeout(napi, irq_suspend_timeout); 339 } 340 341 if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) { 342 gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]); 343 napi_set_gro_flush_timeout(napi, gro_flush_timeout); 344 } 345 346 return 0; 347 } 348 349 int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info) 350 { 351 struct napi_struct *napi; 352 unsigned int napi_id; 353 int err; 354 355 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) 356 return -EINVAL; 357 358 napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); 359 360 rtnl_lock(); 361 rcu_read_lock(); 362 363 napi = netdev_napi_by_id(genl_info_net(info), napi_id); 364 if (napi) { 365 err = netdev_nl_napi_set_config(napi, info); 366 } else { 367 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); 368 err = -ENOENT; 369 } 370 371 rcu_read_unlock(); 372 rtnl_unlock(); 373 374 return err; 375 } 376 377 static int 378 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, 379 u32 q_idx, u32 q_type, const struct genl_info *info) 380 { 381 struct net_devmem_dmabuf_binding *binding; 382 struct netdev_rx_queue *rxq; 383 struct netdev_queue *txq; 384 void *hdr; 385 386 hdr = genlmsg_iput(rsp, info); 387 if (!hdr) 388 return -EMSGSIZE; 389 390 if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || 391 nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || 392 nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) 393 goto nla_put_failure; 394 395 switch (q_type) { 396 case NETDEV_QUEUE_TYPE_RX: 397 rxq = __netif_get_rx_queue(netdev, q_idx); 398 if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 399 rxq->napi->napi_id)) 400 goto nla_put_failure; 401 402 binding = rxq->mp_params.mp_priv; 403 if (binding && 404 nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id)) 405 goto nla_put_failure; 406 407 break; 408 case NETDEV_QUEUE_TYPE_TX: 409 txq = netdev_get_tx_queue(netdev, q_idx); 410 if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 411 txq->napi->napi_id)) 412 goto nla_put_failure; 413 } 414 415 genlmsg_end(rsp, hdr); 416 417 return 0; 418 419 nla_put_failure: 420 genlmsg_cancel(rsp, hdr); 421 return -EMSGSIZE; 422 } 423 424 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, 425 u32 q_type) 426 { 427 switch (q_type) { 428 case NETDEV_QUEUE_TYPE_RX: 429 if (q_id >= netdev->real_num_rx_queues) 430 return -EINVAL; 431 return 0; 432 case NETDEV_QUEUE_TYPE_TX: 433 if (q_id >= netdev->real_num_tx_queues) 434 return -EINVAL; 435 } 436 return 0; 437 } 438 439 static int 440 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, 441 u32 q_type, const struct genl_info *info) 442 { 443 int err; 444 445 if (!(netdev->flags & IFF_UP)) 446 return -ENOENT; 447 448 err = netdev_nl_queue_validate(netdev, q_idx, q_type); 449 if (err) 450 return err; 451 452 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); 453 } 454 455 int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) 456 { 457 u32 q_id, q_type, ifindex; 458 struct net_device *netdev; 459 struct sk_buff *rsp; 460 int err; 461 462 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || 463 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || 464 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) 465 return -EINVAL; 466 467 q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); 468 q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); 469 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 470 471 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 472 if (!rsp) 473 return -ENOMEM; 474 475 rtnl_lock(); 476 477 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 478 if (netdev) 479 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); 480 else 481 err = -ENODEV; 482 483 rtnl_unlock(); 484 485 if (err) 486 goto err_free_msg; 487 488 return genlmsg_reply(rsp, info); 489 490 err_free_msg: 491 nlmsg_free(rsp); 492 return err; 493 } 494 495 static int 496 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, 497 const struct genl_info *info, 498 struct netdev_nl_dump_ctx *ctx) 499 { 500 int err = 0; 501 502 if (!(netdev->flags & IFF_UP)) 503 return err; 504 505 for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) { 506 err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx, 507 NETDEV_QUEUE_TYPE_RX, info); 508 if (err) 509 return err; 510 } 511 for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) { 512 err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx, 513 NETDEV_QUEUE_TYPE_TX, info); 514 if (err) 515 return err; 516 } 517 518 return err; 519 } 520 521 int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 522 { 523 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 524 const struct genl_info *info = genl_info_dump(cb); 525 struct net *net = sock_net(skb->sk); 526 struct net_device *netdev; 527 u32 ifindex = 0; 528 int err = 0; 529 530 if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) 531 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 532 533 rtnl_lock(); 534 if (ifindex) { 535 netdev = __dev_get_by_index(net, ifindex); 536 if (netdev) 537 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 538 else 539 err = -ENODEV; 540 } else { 541 for_each_netdev_dump(net, netdev, ctx->ifindex) { 542 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 543 if (err < 0) 544 break; 545 ctx->rxq_idx = 0; 546 ctx->txq_idx = 0; 547 } 548 } 549 rtnl_unlock(); 550 551 return err; 552 } 553 554 #define NETDEV_STAT_NOT_SET (~0ULL) 555 556 static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size) 557 { 558 const u64 *add = _add; 559 u64 *sum = _sum; 560 561 while (size) { 562 if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET) 563 *sum += *add; 564 sum++; 565 add++; 566 size -= 8; 567 } 568 } 569 570 static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) 571 { 572 if (value == NETDEV_STAT_NOT_SET) 573 return 0; 574 return nla_put_uint(rsp, attr_id, value); 575 } 576 577 static int 578 netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) 579 { 580 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || 581 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || 582 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || 583 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || 584 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || 585 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || 586 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || 587 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || 588 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) || 589 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) || 590 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) || 591 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) || 592 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits)) 593 return -EMSGSIZE; 594 return 0; 595 } 596 597 static int 598 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) 599 { 600 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || 601 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) || 602 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) || 603 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) || 604 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) || 605 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) || 606 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) || 607 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) || 608 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) || 609 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) || 610 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) || 611 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) || 612 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake)) 613 return -EMSGSIZE; 614 return 0; 615 } 616 617 static int 618 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, 619 u32 q_type, int i, const struct genl_info *info) 620 { 621 const struct netdev_stat_ops *ops = netdev->stat_ops; 622 struct netdev_queue_stats_rx rx; 623 struct netdev_queue_stats_tx tx; 624 void *hdr; 625 626 hdr = genlmsg_iput(rsp, info); 627 if (!hdr) 628 return -EMSGSIZE; 629 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || 630 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || 631 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) 632 goto nla_put_failure; 633 634 switch (q_type) { 635 case NETDEV_QUEUE_TYPE_RX: 636 memset(&rx, 0xff, sizeof(rx)); 637 ops->get_queue_stats_rx(netdev, i, &rx); 638 if (!memchr_inv(&rx, 0xff, sizeof(rx))) 639 goto nla_cancel; 640 if (netdev_nl_stats_write_rx(rsp, &rx)) 641 goto nla_put_failure; 642 break; 643 case NETDEV_QUEUE_TYPE_TX: 644 memset(&tx, 0xff, sizeof(tx)); 645 ops->get_queue_stats_tx(netdev, i, &tx); 646 if (!memchr_inv(&tx, 0xff, sizeof(tx))) 647 goto nla_cancel; 648 if (netdev_nl_stats_write_tx(rsp, &tx)) 649 goto nla_put_failure; 650 break; 651 } 652 653 genlmsg_end(rsp, hdr); 654 return 0; 655 656 nla_cancel: 657 genlmsg_cancel(rsp, hdr); 658 return 0; 659 nla_put_failure: 660 genlmsg_cancel(rsp, hdr); 661 return -EMSGSIZE; 662 } 663 664 static int 665 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, 666 const struct genl_info *info, 667 struct netdev_nl_dump_ctx *ctx) 668 { 669 const struct netdev_stat_ops *ops = netdev->stat_ops; 670 int i, err; 671 672 if (!(netdev->flags & IFF_UP)) 673 return 0; 674 675 i = ctx->rxq_idx; 676 while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { 677 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, 678 i, info); 679 if (err) 680 return err; 681 ctx->rxq_idx = ++i; 682 } 683 i = ctx->txq_idx; 684 while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { 685 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, 686 i, info); 687 if (err) 688 return err; 689 ctx->txq_idx = ++i; 690 } 691 692 ctx->rxq_idx = 0; 693 ctx->txq_idx = 0; 694 return 0; 695 } 696 697 static int 698 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, 699 const struct genl_info *info) 700 { 701 struct netdev_queue_stats_rx rx_sum, rx; 702 struct netdev_queue_stats_tx tx_sum, tx; 703 const struct netdev_stat_ops *ops; 704 void *hdr; 705 int i; 706 707 ops = netdev->stat_ops; 708 /* Netdev can't guarantee any complete counters */ 709 if (!ops->get_base_stats) 710 return 0; 711 712 memset(&rx_sum, 0xff, sizeof(rx_sum)); 713 memset(&tx_sum, 0xff, sizeof(tx_sum)); 714 715 ops->get_base_stats(netdev, &rx_sum, &tx_sum); 716 717 /* The op was there, but nothing reported, don't bother */ 718 if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && 719 !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum))) 720 return 0; 721 722 hdr = genlmsg_iput(rsp, info); 723 if (!hdr) 724 return -EMSGSIZE; 725 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) 726 goto nla_put_failure; 727 728 for (i = 0; i < netdev->real_num_rx_queues; i++) { 729 memset(&rx, 0xff, sizeof(rx)); 730 if (ops->get_queue_stats_rx) 731 ops->get_queue_stats_rx(netdev, i, &rx); 732 netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx)); 733 } 734 for (i = 0; i < netdev->real_num_tx_queues; i++) { 735 memset(&tx, 0xff, sizeof(tx)); 736 if (ops->get_queue_stats_tx) 737 ops->get_queue_stats_tx(netdev, i, &tx); 738 netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx)); 739 } 740 741 if (netdev_nl_stats_write_rx(rsp, &rx_sum) || 742 netdev_nl_stats_write_tx(rsp, &tx_sum)) 743 goto nla_put_failure; 744 745 genlmsg_end(rsp, hdr); 746 return 0; 747 748 nla_put_failure: 749 genlmsg_cancel(rsp, hdr); 750 return -EMSGSIZE; 751 } 752 753 static int 754 netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope, 755 struct sk_buff *skb, const struct genl_info *info, 756 struct netdev_nl_dump_ctx *ctx) 757 { 758 if (!netdev->stat_ops) 759 return 0; 760 761 switch (scope) { 762 case 0: 763 return netdev_nl_stats_by_netdev(netdev, skb, info); 764 case NETDEV_QSTATS_SCOPE_QUEUE: 765 return netdev_nl_stats_by_queue(netdev, skb, info, ctx); 766 } 767 768 return -EINVAL; /* Should not happen, per netlink policy */ 769 } 770 771 int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, 772 struct netlink_callback *cb) 773 { 774 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 775 const struct genl_info *info = genl_info_dump(cb); 776 struct net *net = sock_net(skb->sk); 777 struct net_device *netdev; 778 unsigned int ifindex; 779 unsigned int scope; 780 int err = 0; 781 782 scope = 0; 783 if (info->attrs[NETDEV_A_QSTATS_SCOPE]) 784 scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]); 785 786 ifindex = 0; 787 if (info->attrs[NETDEV_A_QSTATS_IFINDEX]) 788 ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]); 789 790 rtnl_lock(); 791 if (ifindex) { 792 netdev = __dev_get_by_index(net, ifindex); 793 if (netdev && netdev->stat_ops) { 794 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, 795 info, ctx); 796 } else { 797 NL_SET_BAD_ATTR(info->extack, 798 info->attrs[NETDEV_A_QSTATS_IFINDEX]); 799 err = netdev ? -EOPNOTSUPP : -ENODEV; 800 } 801 } else { 802 for_each_netdev_dump(net, netdev, ctx->ifindex) { 803 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, 804 info, ctx); 805 if (err < 0) 806 break; 807 } 808 } 809 rtnl_unlock(); 810 811 return err; 812 } 813 814 int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) 815 { 816 struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; 817 struct net_devmem_dmabuf_binding *binding; 818 struct list_head *sock_binding_list; 819 u32 ifindex, dmabuf_fd, rxq_idx; 820 struct net_device *netdev; 821 struct sk_buff *rsp; 822 struct nlattr *attr; 823 int rem, err = 0; 824 void *hdr; 825 826 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || 827 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) || 828 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES)) 829 return -EINVAL; 830 831 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); 832 dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); 833 834 sock_binding_list = genl_sk_priv_get(&netdev_nl_family, 835 NETLINK_CB(skb).sk); 836 if (IS_ERR(sock_binding_list)) 837 return PTR_ERR(sock_binding_list); 838 839 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 840 if (!rsp) 841 return -ENOMEM; 842 843 hdr = genlmsg_iput(rsp, info); 844 if (!hdr) { 845 err = -EMSGSIZE; 846 goto err_genlmsg_free; 847 } 848 849 rtnl_lock(); 850 851 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 852 if (!netdev || !netif_device_present(netdev)) { 853 err = -ENODEV; 854 goto err_unlock; 855 } 856 857 if (dev_xdp_prog_count(netdev)) { 858 NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached"); 859 err = -EEXIST; 860 goto err_unlock; 861 } 862 863 binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack); 864 if (IS_ERR(binding)) { 865 err = PTR_ERR(binding); 866 goto err_unlock; 867 } 868 869 nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, 870 genlmsg_data(info->genlhdr), 871 genlmsg_len(info->genlhdr), rem) { 872 err = nla_parse_nested( 873 tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, 874 netdev_queue_id_nl_policy, info->extack); 875 if (err < 0) 876 goto err_unbind; 877 878 if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || 879 NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { 880 err = -EINVAL; 881 goto err_unbind; 882 } 883 884 if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { 885 NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); 886 err = -EINVAL; 887 goto err_unbind; 888 } 889 890 rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); 891 892 err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding, 893 info->extack); 894 if (err) 895 goto err_unbind; 896 } 897 898 list_add(&binding->list, sock_binding_list); 899 900 nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); 901 genlmsg_end(rsp, hdr); 902 903 err = genlmsg_reply(rsp, info); 904 if (err) 905 goto err_unbind; 906 907 rtnl_unlock(); 908 909 return 0; 910 911 err_unbind: 912 net_devmem_unbind_dmabuf(binding); 913 err_unlock: 914 rtnl_unlock(); 915 err_genlmsg_free: 916 nlmsg_free(rsp); 917 return err; 918 } 919 920 void netdev_nl_sock_priv_init(struct list_head *priv) 921 { 922 INIT_LIST_HEAD(priv); 923 } 924 925 void netdev_nl_sock_priv_destroy(struct list_head *priv) 926 { 927 struct net_devmem_dmabuf_binding *binding; 928 struct net_devmem_dmabuf_binding *temp; 929 930 list_for_each_entry_safe(binding, temp, priv, list) { 931 rtnl_lock(); 932 net_devmem_unbind_dmabuf(binding); 933 rtnl_unlock(); 934 } 935 } 936 937 static int netdev_genl_netdevice_event(struct notifier_block *nb, 938 unsigned long event, void *ptr) 939 { 940 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 941 942 switch (event) { 943 case NETDEV_REGISTER: 944 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); 945 break; 946 case NETDEV_UNREGISTER: 947 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); 948 break; 949 case NETDEV_XDP_FEAT_CHANGE: 950 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); 951 break; 952 } 953 954 return NOTIFY_OK; 955 } 956 957 static struct notifier_block netdev_genl_nb = { 958 .notifier_call = netdev_genl_netdevice_event, 959 }; 960 961 static int __init netdev_genl_init(void) 962 { 963 int err; 964 965 err = register_netdevice_notifier(&netdev_genl_nb); 966 if (err) 967 return err; 968 969 err = genl_register_family(&netdev_nl_family); 970 if (err) 971 goto err_unreg_ntf; 972 973 return 0; 974 975 err_unreg_ntf: 976 unregister_netdevice_notifier(&netdev_genl_nb); 977 return err; 978 } 979 980 subsys_initcall(netdev_genl_init); 981