1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/netdevice.h> 4 #include <linux/notifier.h> 5 #include <linux/rtnetlink.h> 6 #include <net/net_namespace.h> 7 #include <net/sock.h> 8 #include <net/xdp.h> 9 #include <net/xdp_sock.h> 10 #include <net/netdev_rx_queue.h> 11 12 #include "netdev-genl-gen.h" 13 14 struct netdev_nl_dump_ctx { 15 unsigned long ifindex; 16 unsigned int rxq_idx; 17 unsigned int txq_idx; 18 }; 19 20 static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) 21 { 22 NL_ASSERT_DUMP_CTX_FITS(struct netdev_nl_dump_ctx); 23 24 return (struct netdev_nl_dump_ctx *)cb->ctx; 25 } 26 27 static int 28 netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, 29 const struct genl_info *info) 30 { 31 u64 xsk_features = 0; 32 u64 xdp_rx_meta = 0; 33 void *hdr; 34 35 hdr = genlmsg_iput(rsp, info); 36 if (!hdr) 37 return -EMSGSIZE; 38 39 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ 40 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ 41 xdp_rx_meta |= flag; 42 XDP_METADATA_KFUNC_xxx 43 #undef XDP_METADATA_KFUNC 44 45 if (netdev->xsk_tx_metadata_ops) { 46 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) 47 xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; 48 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) 49 xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; 50 } 51 52 if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || 53 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, 54 netdev->xdp_features, NETDEV_A_DEV_PAD) || 55 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, 56 xdp_rx_meta, NETDEV_A_DEV_PAD) || 57 nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, 58 xsk_features, NETDEV_A_DEV_PAD)) { 59 genlmsg_cancel(rsp, hdr); 60 return -EINVAL; 61 } 62 63 if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { 64 if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, 65 netdev->xdp_zc_max_segs)) { 66 genlmsg_cancel(rsp, hdr); 67 return -EINVAL; 68 } 69 } 70 71 genlmsg_end(rsp, hdr); 72 73 return 0; 74 } 75 76 static void 77 netdev_genl_dev_notify(struct net_device *netdev, int cmd) 78 { 79 struct genl_info info; 80 struct sk_buff *ntf; 81 82 if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), 83 NETDEV_NLGRP_MGMT)) 84 return; 85 86 genl_info_init_ntf(&info, &netdev_nl_family, cmd); 87 88 ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 89 if (!ntf) 90 return; 91 92 if (netdev_nl_dev_fill(netdev, ntf, &info)) { 93 nlmsg_free(ntf); 94 return; 95 } 96 97 genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 98 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); 99 } 100 101 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) 102 { 103 struct net_device *netdev; 104 struct sk_buff *rsp; 105 u32 ifindex; 106 int err; 107 108 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) 109 return -EINVAL; 110 111 ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); 112 113 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 114 if (!rsp) 115 return -ENOMEM; 116 117 rtnl_lock(); 118 119 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 120 if (netdev) 121 err = netdev_nl_dev_fill(netdev, rsp, info); 122 else 123 err = -ENODEV; 124 125 rtnl_unlock(); 126 127 if (err) 128 goto err_free_msg; 129 130 return genlmsg_reply(rsp, info); 131 132 err_free_msg: 133 nlmsg_free(rsp); 134 return err; 135 } 136 137 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 138 { 139 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 140 struct net *net = sock_net(skb->sk); 141 struct net_device *netdev; 142 int err = 0; 143 144 rtnl_lock(); 145 for_each_netdev_dump(net, netdev, ctx->ifindex) { 146 err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); 147 if (err < 0) 148 break; 149 } 150 rtnl_unlock(); 151 152 if (err != -EMSGSIZE) 153 return err; 154 155 return skb->len; 156 } 157 158 int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) 159 { 160 return -EOPNOTSUPP; 161 } 162 163 int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 164 { 165 return -EOPNOTSUPP; 166 } 167 168 static int 169 netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, 170 u32 q_idx, u32 q_type, const struct genl_info *info) 171 { 172 struct netdev_rx_queue *rxq; 173 struct netdev_queue *txq; 174 void *hdr; 175 176 hdr = genlmsg_iput(rsp, info); 177 if (!hdr) 178 return -EMSGSIZE; 179 180 if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || 181 nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || 182 nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) 183 goto nla_put_failure; 184 185 switch (q_type) { 186 case NETDEV_QUEUE_TYPE_RX: 187 rxq = __netif_get_rx_queue(netdev, q_idx); 188 if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 189 rxq->napi->napi_id)) 190 goto nla_put_failure; 191 break; 192 case NETDEV_QUEUE_TYPE_TX: 193 txq = netdev_get_tx_queue(netdev, q_idx); 194 if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, 195 txq->napi->napi_id)) 196 goto nla_put_failure; 197 } 198 199 genlmsg_end(rsp, hdr); 200 201 return 0; 202 203 nla_put_failure: 204 genlmsg_cancel(rsp, hdr); 205 return -EMSGSIZE; 206 } 207 208 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, 209 u32 q_type) 210 { 211 switch (q_type) { 212 case NETDEV_QUEUE_TYPE_RX: 213 if (q_id >= netdev->real_num_rx_queues) 214 return -EINVAL; 215 return 0; 216 case NETDEV_QUEUE_TYPE_TX: 217 if (q_id >= netdev->real_num_tx_queues) 218 return -EINVAL; 219 } 220 return 0; 221 } 222 223 static int 224 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, 225 u32 q_type, const struct genl_info *info) 226 { 227 int err = 0; 228 229 if (!(netdev->flags & IFF_UP)) 230 return err; 231 232 err = netdev_nl_queue_validate(netdev, q_idx, q_type); 233 if (err) 234 return err; 235 236 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); 237 } 238 239 int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) 240 { 241 u32 q_id, q_type, ifindex; 242 struct net_device *netdev; 243 struct sk_buff *rsp; 244 int err; 245 246 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || 247 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || 248 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) 249 return -EINVAL; 250 251 q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); 252 q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); 253 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 254 255 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); 256 if (!rsp) 257 return -ENOMEM; 258 259 rtnl_lock(); 260 261 netdev = __dev_get_by_index(genl_info_net(info), ifindex); 262 if (netdev) 263 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); 264 else 265 err = -ENODEV; 266 267 rtnl_unlock(); 268 269 if (err) 270 goto err_free_msg; 271 272 return genlmsg_reply(rsp, info); 273 274 err_free_msg: 275 nlmsg_free(rsp); 276 return err; 277 } 278 279 static int 280 netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, 281 const struct genl_info *info, 282 struct netdev_nl_dump_ctx *ctx) 283 { 284 int err = 0; 285 int i; 286 287 if (!(netdev->flags & IFF_UP)) 288 return err; 289 290 for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) { 291 err = netdev_nl_queue_fill_one(rsp, netdev, i, 292 NETDEV_QUEUE_TYPE_RX, info); 293 if (err) 294 return err; 295 ctx->rxq_idx = i++; 296 } 297 for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) { 298 err = netdev_nl_queue_fill_one(rsp, netdev, i, 299 NETDEV_QUEUE_TYPE_TX, info); 300 if (err) 301 return err; 302 ctx->txq_idx = i++; 303 } 304 305 return err; 306 } 307 308 int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 309 { 310 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); 311 const struct genl_info *info = genl_info_dump(cb); 312 struct net *net = sock_net(skb->sk); 313 struct net_device *netdev; 314 u32 ifindex = 0; 315 int err = 0; 316 317 if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) 318 ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); 319 320 rtnl_lock(); 321 if (ifindex) { 322 netdev = __dev_get_by_index(net, ifindex); 323 if (netdev) 324 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 325 else 326 err = -ENODEV; 327 } else { 328 for_each_netdev_dump(net, netdev, ctx->ifindex) { 329 err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); 330 if (err < 0) 331 break; 332 ctx->rxq_idx = 0; 333 ctx->txq_idx = 0; 334 } 335 } 336 rtnl_unlock(); 337 338 if (err != -EMSGSIZE) 339 return err; 340 341 return skb->len; 342 } 343 344 static int netdev_genl_netdevice_event(struct notifier_block *nb, 345 unsigned long event, void *ptr) 346 { 347 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 348 349 switch (event) { 350 case NETDEV_REGISTER: 351 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); 352 break; 353 case NETDEV_UNREGISTER: 354 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); 355 break; 356 case NETDEV_XDP_FEAT_CHANGE: 357 netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); 358 break; 359 } 360 361 return NOTIFY_OK; 362 } 363 364 static struct notifier_block netdev_genl_nb = { 365 .notifier_call = netdev_genl_netdevice_event, 366 }; 367 368 static int __init netdev_genl_init(void) 369 { 370 int err; 371 372 err = register_netdevice_notifier(&netdev_genl_nb); 373 if (err) 374 return err; 375 376 err = genl_register_family(&netdev_nl_family); 377 if (err) 378 goto err_unreg_ntf; 379 380 return 0; 381 382 err_unreg_ntf: 383 unregister_netdevice_notifier(&netdev_genl_nb); 384 return err; 385 } 386 387 subsys_initcall(netdev_genl_init); 388