Lines Matching refs:rsp

33 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,  in netdev_nl_dev_fill()  argument
40 hdr = genlmsg_iput(rsp, info); in netdev_nl_dev_fill()
57 if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || in netdev_nl_dev_fill()
58 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, in netdev_nl_dev_fill()
60 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, in netdev_nl_dev_fill()
62 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, in netdev_nl_dev_fill()
67 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, in netdev_nl_dev_fill()
72 genlmsg_end(rsp, hdr); in netdev_nl_dev_fill()
77 genlmsg_cancel(rsp, hdr); in netdev_nl_dev_fill()
109 struct sk_buff *rsp; in netdev_nl_dev_get_doit() local
118 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); in netdev_nl_dev_get_doit()
119 if (!rsp) in netdev_nl_dev_get_doit()
126 err = netdev_nl_dev_fill(netdev, rsp, info); in netdev_nl_dev_get_doit()
135 return genlmsg_reply(rsp, info); in netdev_nl_dev_get_doit()
138 nlmsg_free(rsp); in netdev_nl_dev_get_doit()
161 netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, in netdev_nl_napi_fill_one() argument
173 hdr = genlmsg_iput(rsp, info); in netdev_nl_napi_fill_one()
177 if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) in netdev_nl_napi_fill_one()
180 if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) in netdev_nl_napi_fill_one()
183 if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) in netdev_nl_napi_fill_one()
188 if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) in netdev_nl_napi_fill_one()
193 if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS, in netdev_nl_napi_fill_one()
198 if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT, in netdev_nl_napi_fill_one()
203 if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, in netdev_nl_napi_fill_one()
207 genlmsg_end(rsp, hdr); in netdev_nl_napi_fill_one()
212 genlmsg_cancel(rsp, hdr); in netdev_nl_napi_fill_one()
219 struct sk_buff *rsp; in netdev_nl_napi_get_doit() local
228 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); in netdev_nl_napi_get_doit()
229 if (!rsp) in netdev_nl_napi_get_doit()
234 err = netdev_nl_napi_fill_one(rsp, napi, info); in netdev_nl_napi_get_doit()
243 } else if (!rsp->len) { in netdev_nl_napi_get_doit()
248 return genlmsg_reply(rsp, info); in netdev_nl_napi_get_doit()
251 nlmsg_free(rsp); in netdev_nl_napi_get_doit()
256 netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, in netdev_nl_napi_dump_one() argument
279 err = netdev_nl_napi_fill_one(rsp, napi, info); in netdev_nl_napi_dump_one()
368 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, in netdev_nl_queue_fill_one() argument
376 hdr = genlmsg_iput(rsp, info); in netdev_nl_queue_fill_one()
380 if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || in netdev_nl_queue_fill_one()
381 nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || in netdev_nl_queue_fill_one()
382 nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) in netdev_nl_queue_fill_one()
388 if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, in netdev_nl_queue_fill_one()
394 nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id)) in netdev_nl_queue_fill_one()
400 if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, in netdev_nl_queue_fill_one()
405 genlmsg_end(rsp, hdr); in netdev_nl_queue_fill_one()
410 genlmsg_cancel(rsp, hdr); in netdev_nl_queue_fill_one()
430 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, in netdev_nl_queue_fill() argument
442 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); in netdev_nl_queue_fill()
449 struct sk_buff *rsp; in netdev_nl_queue_get_doit() local
461 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); in netdev_nl_queue_get_doit()
462 if (!rsp) in netdev_nl_queue_get_doit()
469 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); in netdev_nl_queue_get_doit()
480 return genlmsg_reply(rsp, info); in netdev_nl_queue_get_doit()
483 nlmsg_free(rsp); in netdev_nl_queue_get_doit()
488 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, in netdev_nl_queue_dump_one() argument
498 err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx, in netdev_nl_queue_dump_one()
504 err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx, in netdev_nl_queue_dump_one()
564 static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) in netdev_stat_put() argument
568 return nla_put_uint(rsp, attr_id, value); in netdev_stat_put()
572 netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) in netdev_nl_stats_write_rx() argument
574 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || in netdev_nl_stats_write_rx()
575 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || in netdev_nl_stats_write_rx()
576 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || in netdev_nl_stats_write_rx()
577 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || in netdev_nl_stats_write_rx()
578 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || in netdev_nl_stats_write_rx()
579 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || in netdev_nl_stats_write_rx()
580 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || in netdev_nl_stats_write_rx()
581 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || in netdev_nl_stats_write_rx()
582 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) || in netdev_nl_stats_write_rx()
583 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) || in netdev_nl_stats_write_rx()
584 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) || in netdev_nl_stats_write_rx()
585 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) || in netdev_nl_stats_write_rx()
586 netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits)) in netdev_nl_stats_write_rx()
592 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) in netdev_nl_stats_write_tx() argument
594 if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || in netdev_nl_stats_write_tx()
595 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) || in netdev_nl_stats_write_tx()
596 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) || in netdev_nl_stats_write_tx()
597 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) || in netdev_nl_stats_write_tx()
598 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) || in netdev_nl_stats_write_tx()
599 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) || in netdev_nl_stats_write_tx()
600 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) || in netdev_nl_stats_write_tx()
601 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) || in netdev_nl_stats_write_tx()
602 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) || in netdev_nl_stats_write_tx()
603 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) || in netdev_nl_stats_write_tx()
604 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) || in netdev_nl_stats_write_tx()
605 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) || in netdev_nl_stats_write_tx()
606 netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake)) in netdev_nl_stats_write_tx()
612 netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, in netdev_nl_stats_queue() argument
620 hdr = genlmsg_iput(rsp, info); in netdev_nl_stats_queue()
623 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || in netdev_nl_stats_queue()
624 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || in netdev_nl_stats_queue()
625 nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) in netdev_nl_stats_queue()
634 if (netdev_nl_stats_write_rx(rsp, &rx)) in netdev_nl_stats_queue()
642 if (netdev_nl_stats_write_tx(rsp, &tx)) in netdev_nl_stats_queue()
647 genlmsg_end(rsp, hdr); in netdev_nl_stats_queue()
651 genlmsg_cancel(rsp, hdr); in netdev_nl_stats_queue()
654 genlmsg_cancel(rsp, hdr); in netdev_nl_stats_queue()
659 netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, in netdev_nl_stats_by_queue() argument
671 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, in netdev_nl_stats_by_queue()
679 err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, in netdev_nl_stats_by_queue()
692 netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, in netdev_nl_stats_by_netdev() argument
716 hdr = genlmsg_iput(rsp, info); in netdev_nl_stats_by_netdev()
719 if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) in netdev_nl_stats_by_netdev()
735 if (netdev_nl_stats_write_rx(rsp, &rx_sum) || in netdev_nl_stats_by_netdev()
736 netdev_nl_stats_write_tx(rsp, &tx_sum)) in netdev_nl_stats_by_netdev()
739 genlmsg_end(rsp, hdr); in netdev_nl_stats_by_netdev()
743 genlmsg_cancel(rsp, hdr); in netdev_nl_stats_by_netdev()
815 struct sk_buff *rsp; in netdev_nl_bind_rx_doit() local
833 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); in netdev_nl_bind_rx_doit()
834 if (!rsp) in netdev_nl_bind_rx_doit()
837 hdr = genlmsg_iput(rsp, info); in netdev_nl_bind_rx_doit()
894 nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); in netdev_nl_bind_rx_doit()
895 genlmsg_end(rsp, hdr); in netdev_nl_bind_rx_doit()
897 err = genlmsg_reply(rsp, info); in netdev_nl_bind_rx_doit()
910 nlmsg_free(rsp); in netdev_nl_bind_rx_doit()