1 /* 2 * Copyright (c) 2007-2014 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * 02110-1301, USA 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/if_arp.h> 24 #include <linux/if_vlan.h> 25 #include <linux/in.h> 26 #include <linux/ip.h> 27 #include <linux/jhash.h> 28 #include <linux/delay.h> 29 #include <linux/time.h> 30 #include <linux/etherdevice.h> 31 #include <linux/genetlink.h> 32 #include <linux/kernel.h> 33 #include <linux/kthread.h> 34 #include <linux/mutex.h> 35 #include <linux/percpu.h> 36 #include <linux/rcupdate.h> 37 #include <linux/tcp.h> 38 #include <linux/udp.h> 39 #include <linux/ethtool.h> 40 #include <linux/wait.h> 41 #include <asm/div64.h> 42 #include <linux/highmem.h> 43 #include <linux/netfilter_bridge.h> 44 #include <linux/netfilter_ipv4.h> 45 #include <linux/inetdevice.h> 46 #include <linux/list.h> 47 #include <linux/openvswitch.h> 48 #include <linux/rculist.h> 49 #include <linux/dmi.h> 50 #include <net/genetlink.h> 51 #include <net/net_namespace.h> 52 #include <net/netns/generic.h> 53 54 #include "datapath.h" 55 #include "flow.h" 56 #include "flow_table.h" 57 #include "flow_netlink.h" 58 #include "vport-internal_dev.h" 59 #include "vport-netdev.h" 60 61 unsigned int ovs_net_id __read_mostly; 62 63 static struct genl_family dp_packet_genl_family; 64 static struct genl_family dp_flow_genl_family; 65 static struct genl_family dp_datapath_genl_family; 66 67 static const struct nla_policy flow_policy[]; 68 69 static const struct genl_multicast_group ovs_dp_flow_multicast_group = { 70 .name = OVS_FLOW_MCGROUP, 71 }; 72 73 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = { 74 .name = OVS_DATAPATH_MCGROUP, 75 }; 76 77 static const struct genl_multicast_group ovs_dp_vport_multicast_group = { 78 .name = OVS_VPORT_MCGROUP, 79 }; 80 81 /* Check if need to build a reply message. 82 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */ 83 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info, 84 unsigned int group) 85 { 86 return info->nlhdr->nlmsg_flags & NLM_F_ECHO || 87 genl_has_listeners(family, genl_info_net(info), group); 88 } 89 90 static void ovs_notify(struct genl_family *family, 91 struct sk_buff *skb, struct genl_info *info) 92 { 93 genl_notify(family, skb, info, 0, GFP_KERNEL); 94 } 95 96 /** 97 * DOC: Locking: 98 * 99 * All writes e.g. Writes to device state (add/remove datapath, port, set 100 * operations on vports, etc.), Writes to other state (flow table 101 * modifications, set miscellaneous datapath parameters, etc.) are protected 102 * by ovs_lock. 103 * 104 * Reads are protected by RCU. 105 * 106 * There are a few special cases (mostly stats) that have their own 107 * synchronization but they nest under all of above and don't interact with 108 * each other. 109 * 110 * The RTNL lock nests inside ovs_mutex. 111 */ 112 113 static DEFINE_MUTEX(ovs_mutex); 114 115 void ovs_lock(void) 116 { 117 mutex_lock(&ovs_mutex); 118 } 119 120 void ovs_unlock(void) 121 { 122 mutex_unlock(&ovs_mutex); 123 } 124 125 #ifdef CONFIG_LOCKDEP 126 int lockdep_ovsl_is_held(void) 127 { 128 if (debug_locks) 129 return lockdep_is_held(&ovs_mutex); 130 else 131 return 1; 132 } 133 #endif 134 135 static struct vport *new_vport(const struct vport_parms *); 136 static int queue_gso_packets(struct datapath *dp, struct sk_buff *, 137 const struct sw_flow_key *, 138 const struct dp_upcall_info *, 139 uint32_t cutlen); 140 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *, 141 const struct sw_flow_key *, 142 const struct dp_upcall_info *, 143 uint32_t cutlen); 144 145 /* Must be called with rcu_read_lock. */ 146 static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex) 147 { 148 struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex); 149 150 if (dev) { 151 struct vport *vport = ovs_internal_dev_get_vport(dev); 152 if (vport) 153 return vport->dp; 154 } 155 156 return NULL; 157 } 158 159 /* The caller must hold either ovs_mutex or rcu_read_lock to keep the 160 * returned dp pointer valid. 161 */ 162 static inline struct datapath *get_dp(struct net *net, int dp_ifindex) 163 { 164 struct datapath *dp; 165 166 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held()); 167 rcu_read_lock(); 168 dp = get_dp_rcu(net, dp_ifindex); 169 rcu_read_unlock(); 170 171 return dp; 172 } 173 174 /* Must be called with rcu_read_lock or ovs_mutex. */ 175 const char *ovs_dp_name(const struct datapath *dp) 176 { 177 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL); 178 return ovs_vport_name(vport); 179 } 180 181 static int get_dpifindex(const struct datapath *dp) 182 { 183 struct vport *local; 184 int ifindex; 185 186 rcu_read_lock(); 187 188 local = ovs_vport_rcu(dp, OVSP_LOCAL); 189 if (local) 190 ifindex = local->dev->ifindex; 191 else 192 ifindex = 0; 193 194 rcu_read_unlock(); 195 196 return ifindex; 197 } 198 199 static void destroy_dp_rcu(struct rcu_head *rcu) 200 { 201 struct datapath *dp = container_of(rcu, struct datapath, rcu); 202 203 ovs_flow_tbl_destroy(&dp->table); 204 free_percpu(dp->stats_percpu); 205 kfree(dp->ports); 206 kfree(dp); 207 } 208 209 static struct hlist_head *vport_hash_bucket(const struct datapath *dp, 210 u16 port_no) 211 { 212 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)]; 213 } 214 215 /* Called with ovs_mutex or RCU read lock. */ 216 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) 217 { 218 struct vport *vport; 219 struct hlist_head *head; 220 221 head = vport_hash_bucket(dp, port_no); 222 hlist_for_each_entry_rcu(vport, head, dp_hash_node) { 223 if (vport->port_no == port_no) 224 return vport; 225 } 226 return NULL; 227 } 228 229 /* Called with ovs_mutex. */ 230 static struct vport *new_vport(const struct vport_parms *parms) 231 { 232 struct vport *vport; 233 234 vport = ovs_vport_add(parms); 235 if (!IS_ERR(vport)) { 236 struct datapath *dp = parms->dp; 237 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no); 238 239 hlist_add_head_rcu(&vport->dp_hash_node, head); 240 } 241 return vport; 242 } 243 244 void ovs_dp_detach_port(struct vport *p) 245 { 246 ASSERT_OVSL(); 247 248 /* First drop references to device. */ 249 hlist_del_rcu(&p->dp_hash_node); 250 251 /* Then destroy it. */ 252 ovs_vport_del(p); 253 } 254 255 /* Must be called with rcu_read_lock. */ 256 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) 257 { 258 const struct vport *p = OVS_CB(skb)->input_vport; 259 struct datapath *dp = p->dp; 260 struct sw_flow *flow; 261 struct sw_flow_actions *sf_acts; 262 struct dp_stats_percpu *stats; 263 u64 *stats_counter; 264 u32 n_mask_hit; 265 266 stats = this_cpu_ptr(dp->stats_percpu); 267 268 /* Look up flow. */ 269 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit); 270 if (unlikely(!flow)) { 271 struct dp_upcall_info upcall; 272 int error; 273 274 memset(&upcall, 0, sizeof(upcall)); 275 upcall.cmd = OVS_PACKET_CMD_MISS; 276 upcall.portid = ovs_vport_find_upcall_portid(p, skb); 277 upcall.mru = OVS_CB(skb)->mru; 278 error = ovs_dp_upcall(dp, skb, key, &upcall, 0); 279 if (unlikely(error)) 280 kfree_skb(skb); 281 else 282 consume_skb(skb); 283 stats_counter = &stats->n_missed; 284 goto out; 285 } 286 287 ovs_flow_stats_update(flow, key->tp.flags, skb); 288 sf_acts = rcu_dereference(flow->sf_acts); 289 ovs_execute_actions(dp, skb, sf_acts, key); 290 291 stats_counter = &stats->n_hit; 292 293 out: 294 /* Update datapath statistics. */ 295 u64_stats_update_begin(&stats->syncp); 296 (*stats_counter)++; 297 stats->n_mask_hit += n_mask_hit; 298 u64_stats_update_end(&stats->syncp); 299 } 300 301 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, 302 const struct sw_flow_key *key, 303 const struct dp_upcall_info *upcall_info, 304 uint32_t cutlen) 305 { 306 struct dp_stats_percpu *stats; 307 int err; 308 309 if (upcall_info->portid == 0) { 310 err = -ENOTCONN; 311 goto err; 312 } 313 314 if (!skb_is_gso(skb)) 315 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen); 316 else 317 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen); 318 if (err) 319 goto err; 320 321 return 0; 322 323 err: 324 stats = this_cpu_ptr(dp->stats_percpu); 325 326 u64_stats_update_begin(&stats->syncp); 327 stats->n_lost++; 328 u64_stats_update_end(&stats->syncp); 329 330 return err; 331 } 332 333 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, 334 const struct sw_flow_key *key, 335 const struct dp_upcall_info *upcall_info, 336 uint32_t cutlen) 337 { 338 unsigned short gso_type = skb_shinfo(skb)->gso_type; 339 struct sw_flow_key later_key; 340 struct sk_buff *segs, *nskb; 341 int err; 342 343 BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET); 344 segs = __skb_gso_segment(skb, NETIF_F_SG, false); 345 if (IS_ERR(segs)) 346 return PTR_ERR(segs); 347 if (segs == NULL) 348 return -EINVAL; 349 350 if (gso_type & SKB_GSO_UDP) { 351 /* The initial flow key extracted by ovs_flow_key_extract() 352 * in this case is for a first fragment, so we need to 353 * properly mark later fragments. 354 */ 355 later_key = *key; 356 later_key.ip.frag = OVS_FRAG_TYPE_LATER; 357 } 358 359 /* Queue all of the segments. */ 360 skb = segs; 361 do { 362 if (gso_type & SKB_GSO_UDP && skb != segs) 363 key = &later_key; 364 365 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen); 366 if (err) 367 break; 368 369 } while ((skb = skb->next)); 370 371 /* Free all of the segments. */ 372 skb = segs; 373 do { 374 nskb = skb->next; 375 if (err) 376 kfree_skb(skb); 377 else 378 consume_skb(skb); 379 } while ((skb = nskb)); 380 return err; 381 } 382 383 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info, 384 unsigned int hdrlen) 385 { 386 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) 387 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ 388 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */ 389 + nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */ 390 391 /* OVS_PACKET_ATTR_USERDATA */ 392 if (upcall_info->userdata) 393 size += NLA_ALIGN(upcall_info->userdata->nla_len); 394 395 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */ 396 if (upcall_info->egress_tun_info) 397 size += nla_total_size(ovs_tun_key_attr_size()); 398 399 /* OVS_PACKET_ATTR_ACTIONS */ 400 if (upcall_info->actions_len) 401 size += nla_total_size(upcall_info->actions_len); 402 403 /* OVS_PACKET_ATTR_MRU */ 404 if (upcall_info->mru) 405 size += nla_total_size(sizeof(upcall_info->mru)); 406 407 return size; 408 } 409 410 static void pad_packet(struct datapath *dp, struct sk_buff *skb) 411 { 412 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) { 413 size_t plen = NLA_ALIGN(skb->len) - skb->len; 414 415 if (plen > 0) 416 memset(skb_put(skb, plen), 0, plen); 417 } 418 } 419 420 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, 421 const struct sw_flow_key *key, 422 const struct dp_upcall_info *upcall_info, 423 uint32_t cutlen) 424 { 425 struct ovs_header *upcall; 426 struct sk_buff *nskb = NULL; 427 struct sk_buff *user_skb = NULL; /* to be queued to userspace */ 428 struct nlattr *nla; 429 size_t len; 430 unsigned int hlen; 431 int err, dp_ifindex; 432 433 dp_ifindex = get_dpifindex(dp); 434 if (!dp_ifindex) 435 return -ENODEV; 436 437 if (skb_vlan_tag_present(skb)) { 438 nskb = skb_clone(skb, GFP_ATOMIC); 439 if (!nskb) 440 return -ENOMEM; 441 442 nskb = __vlan_hwaccel_push_inside(nskb); 443 if (!nskb) 444 return -ENOMEM; 445 446 skb = nskb; 447 } 448 449 if (nla_attr_size(skb->len) > USHRT_MAX) { 450 err = -EFBIG; 451 goto out; 452 } 453 454 /* Complete checksum if needed */ 455 if (skb->ip_summed == CHECKSUM_PARTIAL && 456 (err = skb_checksum_help(skb))) 457 goto out; 458 459 /* Older versions of OVS user space enforce alignment of the last 460 * Netlink attribute to NLA_ALIGNTO which would require extensive 461 * padding logic. Only perform zerocopy if padding is not required. 462 */ 463 if (dp->user_features & OVS_DP_F_UNALIGNED) 464 hlen = skb_zerocopy_headlen(skb); 465 else 466 hlen = skb->len; 467 468 len = upcall_msg_size(upcall_info, hlen - cutlen); 469 user_skb = genlmsg_new(len, GFP_ATOMIC); 470 if (!user_skb) { 471 err = -ENOMEM; 472 goto out; 473 } 474 475 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 476 0, upcall_info->cmd); 477 upcall->dp_ifindex = dp_ifindex; 478 479 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb); 480 BUG_ON(err); 481 482 if (upcall_info->userdata) 483 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA, 484 nla_len(upcall_info->userdata), 485 nla_data(upcall_info->userdata)); 486 487 if (upcall_info->egress_tun_info) { 488 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY); 489 err = ovs_nla_put_tunnel_info(user_skb, 490 upcall_info->egress_tun_info); 491 BUG_ON(err); 492 nla_nest_end(user_skb, nla); 493 } 494 495 if (upcall_info->actions_len) { 496 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS); 497 err = ovs_nla_put_actions(upcall_info->actions, 498 upcall_info->actions_len, 499 user_skb); 500 if (!err) 501 nla_nest_end(user_skb, nla); 502 else 503 nla_nest_cancel(user_skb, nla); 504 } 505 506 /* Add OVS_PACKET_ATTR_MRU */ 507 if (upcall_info->mru) { 508 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU, 509 upcall_info->mru)) { 510 err = -ENOBUFS; 511 goto out; 512 } 513 pad_packet(dp, user_skb); 514 } 515 516 /* Add OVS_PACKET_ATTR_LEN when packet is truncated */ 517 if (cutlen > 0) { 518 if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN, 519 skb->len)) { 520 err = -ENOBUFS; 521 goto out; 522 } 523 pad_packet(dp, user_skb); 524 } 525 526 /* Only reserve room for attribute header, packet data is added 527 * in skb_zerocopy() */ 528 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) { 529 err = -ENOBUFS; 530 goto out; 531 } 532 nla->nla_len = nla_attr_size(skb->len - cutlen); 533 534 err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen); 535 if (err) 536 goto out; 537 538 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */ 539 pad_packet(dp, user_skb); 540 541 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len; 542 543 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); 544 user_skb = NULL; 545 out: 546 if (err) 547 skb_tx_error(skb); 548 kfree_skb(user_skb); 549 kfree_skb(nskb); 550 return err; 551 } 552 553 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) 554 { 555 struct ovs_header *ovs_header = info->userhdr; 556 struct net *net = sock_net(skb->sk); 557 struct nlattr **a = info->attrs; 558 struct sw_flow_actions *acts; 559 struct sk_buff *packet; 560 struct sw_flow *flow; 561 struct sw_flow_actions *sf_acts; 562 struct datapath *dp; 563 struct vport *input_vport; 564 u16 mru = 0; 565 int len; 566 int err; 567 bool log = !a[OVS_PACKET_ATTR_PROBE]; 568 569 err = -EINVAL; 570 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || 571 !a[OVS_PACKET_ATTR_ACTIONS]) 572 goto err; 573 574 len = nla_len(a[OVS_PACKET_ATTR_PACKET]); 575 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL); 576 err = -ENOMEM; 577 if (!packet) 578 goto err; 579 skb_reserve(packet, NET_IP_ALIGN); 580 581 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len); 582 583 /* Set packet's mru */ 584 if (a[OVS_PACKET_ATTR_MRU]) { 585 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]); 586 packet->ignore_df = 1; 587 } 588 OVS_CB(packet)->mru = mru; 589 590 /* Build an sw_flow for sending this packet. */ 591 flow = ovs_flow_alloc(); 592 err = PTR_ERR(flow); 593 if (IS_ERR(flow)) 594 goto err_kfree_skb; 595 596 err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY], 597 packet, &flow->key, log); 598 if (err) 599 goto err_flow_free; 600 601 err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS], 602 &flow->key, &acts, log); 603 if (err) 604 goto err_flow_free; 605 606 rcu_assign_pointer(flow->sf_acts, acts); 607 packet->priority = flow->key.phy.priority; 608 packet->mark = flow->key.phy.skb_mark; 609 packet->protocol = flow->key.eth.type; 610 611 rcu_read_lock(); 612 dp = get_dp_rcu(net, ovs_header->dp_ifindex); 613 err = -ENODEV; 614 if (!dp) 615 goto err_unlock; 616 617 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port); 618 if (!input_vport) 619 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL); 620 621 if (!input_vport) 622 goto err_unlock; 623 624 packet->dev = input_vport->dev; 625 OVS_CB(packet)->input_vport = input_vport; 626 sf_acts = rcu_dereference(flow->sf_acts); 627 628 local_bh_disable(); 629 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key); 630 local_bh_enable(); 631 rcu_read_unlock(); 632 633 ovs_flow_free(flow, false); 634 return err; 635 636 err_unlock: 637 rcu_read_unlock(); 638 err_flow_free: 639 ovs_flow_free(flow, false); 640 err_kfree_skb: 641 kfree_skb(packet); 642 err: 643 return err; 644 } 645 646 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { 647 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, 648 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, 649 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, 650 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG }, 651 [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 }, 652 }; 653 654 static const struct genl_ops dp_packet_genl_ops[] = { 655 { .cmd = OVS_PACKET_CMD_EXECUTE, 656 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 657 .policy = packet_policy, 658 .doit = ovs_packet_cmd_execute 659 } 660 }; 661 662 static struct genl_family dp_packet_genl_family __ro_after_init = { 663 .hdrsize = sizeof(struct ovs_header), 664 .name = OVS_PACKET_FAMILY, 665 .version = OVS_PACKET_VERSION, 666 .maxattr = OVS_PACKET_ATTR_MAX, 667 .netnsok = true, 668 .parallel_ops = true, 669 .ops = dp_packet_genl_ops, 670 .n_ops = ARRAY_SIZE(dp_packet_genl_ops), 671 .module = THIS_MODULE, 672 }; 673 674 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats, 675 struct ovs_dp_megaflow_stats *mega_stats) 676 { 677 int i; 678 679 memset(mega_stats, 0, sizeof(*mega_stats)); 680 681 stats->n_flows = ovs_flow_tbl_count(&dp->table); 682 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table); 683 684 stats->n_hit = stats->n_missed = stats->n_lost = 0; 685 686 for_each_possible_cpu(i) { 687 const struct dp_stats_percpu *percpu_stats; 688 struct dp_stats_percpu local_stats; 689 unsigned int start; 690 691 percpu_stats = per_cpu_ptr(dp->stats_percpu, i); 692 693 do { 694 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); 695 local_stats = *percpu_stats; 696 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); 697 698 stats->n_hit += local_stats.n_hit; 699 stats->n_missed += local_stats.n_missed; 700 stats->n_lost += local_stats.n_lost; 701 mega_stats->n_mask_hit += local_stats.n_mask_hit; 702 } 703 } 704 705 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags) 706 { 707 return ovs_identifier_is_ufid(sfid) && 708 !(ufid_flags & OVS_UFID_F_OMIT_KEY); 709 } 710 711 static bool should_fill_mask(uint32_t ufid_flags) 712 { 713 return !(ufid_flags & OVS_UFID_F_OMIT_MASK); 714 } 715 716 static bool should_fill_actions(uint32_t ufid_flags) 717 { 718 return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS); 719 } 720 721 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts, 722 const struct sw_flow_id *sfid, 723 uint32_t ufid_flags) 724 { 725 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header)); 726 727 /* OVS_FLOW_ATTR_UFID */ 728 if (sfid && ovs_identifier_is_ufid(sfid)) 729 len += nla_total_size(sfid->ufid_len); 730 731 /* OVS_FLOW_ATTR_KEY */ 732 if (!sfid || should_fill_key(sfid, ufid_flags)) 733 len += nla_total_size(ovs_key_attr_size()); 734 735 /* OVS_FLOW_ATTR_MASK */ 736 if (should_fill_mask(ufid_flags)) 737 len += nla_total_size(ovs_key_attr_size()); 738 739 /* OVS_FLOW_ATTR_ACTIONS */ 740 if (should_fill_actions(ufid_flags)) 741 len += nla_total_size(acts->orig_len); 742 743 return len 744 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ 745 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ 746 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */ 747 } 748 749 /* Called with ovs_mutex or RCU read lock. */ 750 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow, 751 struct sk_buff *skb) 752 { 753 struct ovs_flow_stats stats; 754 __be16 tcp_flags; 755 unsigned long used; 756 757 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); 758 759 if (used && 760 nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used), 761 OVS_FLOW_ATTR_PAD)) 762 return -EMSGSIZE; 763 764 if (stats.n_packets && 765 nla_put_64bit(skb, OVS_FLOW_ATTR_STATS, 766 sizeof(struct ovs_flow_stats), &stats, 767 OVS_FLOW_ATTR_PAD)) 768 return -EMSGSIZE; 769 770 if ((u8)ntohs(tcp_flags) && 771 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags))) 772 return -EMSGSIZE; 773 774 return 0; 775 } 776 777 /* Called with ovs_mutex or RCU read lock. */ 778 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow, 779 struct sk_buff *skb, int skb_orig_len) 780 { 781 struct nlattr *start; 782 int err; 783 784 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if 785 * this is the first flow to be dumped into 'skb'. This is unusual for 786 * Netlink but individual action lists can be longer than 787 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this. 788 * The userspace caller can always fetch the actions separately if it 789 * really wants them. (Most userspace callers in fact don't care.) 790 * 791 * This can only fail for dump operations because the skb is always 792 * properly sized for single flows. 793 */ 794 start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS); 795 if (start) { 796 const struct sw_flow_actions *sf_acts; 797 798 sf_acts = rcu_dereference_ovsl(flow->sf_acts); 799 err = ovs_nla_put_actions(sf_acts->actions, 800 sf_acts->actions_len, skb); 801 802 if (!err) 803 nla_nest_end(skb, start); 804 else { 805 if (skb_orig_len) 806 return err; 807 808 nla_nest_cancel(skb, start); 809 } 810 } else if (skb_orig_len) { 811 return -EMSGSIZE; 812 } 813 814 return 0; 815 } 816 817 /* Called with ovs_mutex or RCU read lock. */ 818 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, 819 struct sk_buff *skb, u32 portid, 820 u32 seq, u32 flags, u8 cmd, u32 ufid_flags) 821 { 822 const int skb_orig_len = skb->len; 823 struct ovs_header *ovs_header; 824 int err; 825 826 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, 827 flags, cmd); 828 if (!ovs_header) 829 return -EMSGSIZE; 830 831 ovs_header->dp_ifindex = dp_ifindex; 832 833 err = ovs_nla_put_identifier(flow, skb); 834 if (err) 835 goto error; 836 837 if (should_fill_key(&flow->id, ufid_flags)) { 838 err = ovs_nla_put_masked_key(flow, skb); 839 if (err) 840 goto error; 841 } 842 843 if (should_fill_mask(ufid_flags)) { 844 err = ovs_nla_put_mask(flow, skb); 845 if (err) 846 goto error; 847 } 848 849 err = ovs_flow_cmd_fill_stats(flow, skb); 850 if (err) 851 goto error; 852 853 if (should_fill_actions(ufid_flags)) { 854 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len); 855 if (err) 856 goto error; 857 } 858 859 genlmsg_end(skb, ovs_header); 860 return 0; 861 862 error: 863 genlmsg_cancel(skb, ovs_header); 864 return err; 865 } 866 867 /* May not be called with RCU read lock. */ 868 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts, 869 const struct sw_flow_id *sfid, 870 struct genl_info *info, 871 bool always, 872 uint32_t ufid_flags) 873 { 874 struct sk_buff *skb; 875 size_t len; 876 877 if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0)) 878 return NULL; 879 880 len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags); 881 skb = genlmsg_new(len, GFP_KERNEL); 882 if (!skb) 883 return ERR_PTR(-ENOMEM); 884 885 return skb; 886 } 887 888 /* Called with ovs_mutex. */ 889 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow, 890 int dp_ifindex, 891 struct genl_info *info, u8 cmd, 892 bool always, u32 ufid_flags) 893 { 894 struct sk_buff *skb; 895 int retval; 896 897 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), 898 &flow->id, info, always, ufid_flags); 899 if (IS_ERR_OR_NULL(skb)) 900 return skb; 901 902 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb, 903 info->snd_portid, info->snd_seq, 0, 904 cmd, ufid_flags); 905 BUG_ON(retval < 0); 906 return skb; 907 } 908 909 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) 910 { 911 struct net *net = sock_net(skb->sk); 912 struct nlattr **a = info->attrs; 913 struct ovs_header *ovs_header = info->userhdr; 914 struct sw_flow *flow = NULL, *new_flow; 915 struct sw_flow_mask mask; 916 struct sk_buff *reply; 917 struct datapath *dp; 918 struct sw_flow_actions *acts; 919 struct sw_flow_match match; 920 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); 921 int error; 922 bool log = !a[OVS_FLOW_ATTR_PROBE]; 923 924 /* Must have key and actions. */ 925 error = -EINVAL; 926 if (!a[OVS_FLOW_ATTR_KEY]) { 927 OVS_NLERR(log, "Flow key attr not present in new flow."); 928 goto error; 929 } 930 if (!a[OVS_FLOW_ATTR_ACTIONS]) { 931 OVS_NLERR(log, "Flow actions attr not present in new flow."); 932 goto error; 933 } 934 935 /* Most of the time we need to allocate a new flow, do it before 936 * locking. 937 */ 938 new_flow = ovs_flow_alloc(); 939 if (IS_ERR(new_flow)) { 940 error = PTR_ERR(new_flow); 941 goto error; 942 } 943 944 /* Extract key. */ 945 ovs_match_init(&match, &new_flow->key, false, &mask); 946 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], 947 a[OVS_FLOW_ATTR_MASK], log); 948 if (error) 949 goto err_kfree_flow; 950 951 /* Extract flow identifier. */ 952 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], 953 &new_flow->key, log); 954 if (error) 955 goto err_kfree_flow; 956 957 /* unmasked key is needed to match when ufid is not used. */ 958 if (ovs_identifier_is_key(&new_flow->id)) 959 match.key = new_flow->id.unmasked_key; 960 961 ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask); 962 963 /* Validate actions. */ 964 error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS], 965 &new_flow->key, &acts, log); 966 if (error) { 967 OVS_NLERR(log, "Flow actions may not be safe on all matching packets."); 968 goto err_kfree_flow; 969 } 970 971 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false, 972 ufid_flags); 973 if (IS_ERR(reply)) { 974 error = PTR_ERR(reply); 975 goto err_kfree_acts; 976 } 977 978 ovs_lock(); 979 dp = get_dp(net, ovs_header->dp_ifindex); 980 if (unlikely(!dp)) { 981 error = -ENODEV; 982 goto err_unlock_ovs; 983 } 984 985 /* Check if this is a duplicate flow */ 986 if (ovs_identifier_is_ufid(&new_flow->id)) 987 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id); 988 if (!flow) 989 flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key); 990 if (likely(!flow)) { 991 rcu_assign_pointer(new_flow->sf_acts, acts); 992 993 /* Put flow in bucket. */ 994 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask); 995 if (unlikely(error)) { 996 acts = NULL; 997 goto err_unlock_ovs; 998 } 999 1000 if (unlikely(reply)) { 1001 error = ovs_flow_cmd_fill_info(new_flow, 1002 ovs_header->dp_ifindex, 1003 reply, info->snd_portid, 1004 info->snd_seq, 0, 1005 OVS_FLOW_CMD_NEW, 1006 ufid_flags); 1007 BUG_ON(error < 0); 1008 } 1009 ovs_unlock(); 1010 } else { 1011 struct sw_flow_actions *old_acts; 1012 1013 /* Bail out if we're not allowed to modify an existing flow. 1014 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL 1015 * because Generic Netlink treats the latter as a dump 1016 * request. We also accept NLM_F_EXCL in case that bug ever 1017 * gets fixed. 1018 */ 1019 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE 1020 | NLM_F_EXCL))) { 1021 error = -EEXIST; 1022 goto err_unlock_ovs; 1023 } 1024 /* The flow identifier has to be the same for flow updates. 1025 * Look for any overlapping flow. 1026 */ 1027 if (unlikely(!ovs_flow_cmp(flow, &match))) { 1028 if (ovs_identifier_is_key(&flow->id)) 1029 flow = ovs_flow_tbl_lookup_exact(&dp->table, 1030 &match); 1031 else /* UFID matches but key is different */ 1032 flow = NULL; 1033 if (!flow) { 1034 error = -ENOENT; 1035 goto err_unlock_ovs; 1036 } 1037 } 1038 /* Update actions. */ 1039 old_acts = ovsl_dereference(flow->sf_acts); 1040 rcu_assign_pointer(flow->sf_acts, acts); 1041 1042 if (unlikely(reply)) { 1043 error = ovs_flow_cmd_fill_info(flow, 1044 ovs_header->dp_ifindex, 1045 reply, info->snd_portid, 1046 info->snd_seq, 0, 1047 OVS_FLOW_CMD_NEW, 1048 ufid_flags); 1049 BUG_ON(error < 0); 1050 } 1051 ovs_unlock(); 1052 1053 ovs_nla_free_flow_actions_rcu(old_acts); 1054 ovs_flow_free(new_flow, false); 1055 } 1056 1057 if (reply) 1058 ovs_notify(&dp_flow_genl_family, reply, info); 1059 return 0; 1060 1061 err_unlock_ovs: 1062 ovs_unlock(); 1063 kfree_skb(reply); 1064 err_kfree_acts: 1065 ovs_nla_free_flow_actions(acts); 1066 err_kfree_flow: 1067 ovs_flow_free(new_flow, false); 1068 error: 1069 return error; 1070 } 1071 1072 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */ 1073 static struct sw_flow_actions *get_flow_actions(struct net *net, 1074 const struct nlattr *a, 1075 const struct sw_flow_key *key, 1076 const struct sw_flow_mask *mask, 1077 bool log) 1078 { 1079 struct sw_flow_actions *acts; 1080 struct sw_flow_key masked_key; 1081 int error; 1082 1083 ovs_flow_mask_key(&masked_key, key, true, mask); 1084 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log); 1085 if (error) { 1086 OVS_NLERR(log, 1087 "Actions may not be safe on all matching packets"); 1088 return ERR_PTR(error); 1089 } 1090 1091 return acts; 1092 } 1093 1094 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) 1095 { 1096 struct net *net = sock_net(skb->sk); 1097 struct nlattr **a = info->attrs; 1098 struct ovs_header *ovs_header = info->userhdr; 1099 struct sw_flow_key key; 1100 struct sw_flow *flow; 1101 struct sw_flow_mask mask; 1102 struct sk_buff *reply = NULL; 1103 struct datapath *dp; 1104 struct sw_flow_actions *old_acts = NULL, *acts = NULL; 1105 struct sw_flow_match match; 1106 struct sw_flow_id sfid; 1107 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); 1108 int error = 0; 1109 bool log = !a[OVS_FLOW_ATTR_PROBE]; 1110 bool ufid_present; 1111 1112 ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log); 1113 if (a[OVS_FLOW_ATTR_KEY]) { 1114 ovs_match_init(&match, &key, true, &mask); 1115 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], 1116 a[OVS_FLOW_ATTR_MASK], log); 1117 } else if (!ufid_present) { 1118 OVS_NLERR(log, 1119 "Flow set message rejected, Key attribute missing."); 1120 error = -EINVAL; 1121 } 1122 if (error) 1123 goto error; 1124 1125 /* Validate actions. */ 1126 if (a[OVS_FLOW_ATTR_ACTIONS]) { 1127 if (!a[OVS_FLOW_ATTR_KEY]) { 1128 OVS_NLERR(log, 1129 "Flow key attribute not present in set flow."); 1130 error = -EINVAL; 1131 goto error; 1132 } 1133 1134 acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &key, 1135 &mask, log); 1136 if (IS_ERR(acts)) { 1137 error = PTR_ERR(acts); 1138 goto error; 1139 } 1140 1141 /* Can allocate before locking if have acts. */ 1142 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false, 1143 ufid_flags); 1144 if (IS_ERR(reply)) { 1145 error = PTR_ERR(reply); 1146 goto err_kfree_acts; 1147 } 1148 } 1149 1150 ovs_lock(); 1151 dp = get_dp(net, ovs_header->dp_ifindex); 1152 if (unlikely(!dp)) { 1153 error = -ENODEV; 1154 goto err_unlock_ovs; 1155 } 1156 /* Check that the flow exists. */ 1157 if (ufid_present) 1158 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid); 1159 else 1160 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); 1161 if (unlikely(!flow)) { 1162 error = -ENOENT; 1163 goto err_unlock_ovs; 1164 } 1165 1166 /* Update actions, if present. */ 1167 if (likely(acts)) { 1168 old_acts = ovsl_dereference(flow->sf_acts); 1169 rcu_assign_pointer(flow->sf_acts, acts); 1170 1171 if (unlikely(reply)) { 1172 error = ovs_flow_cmd_fill_info(flow, 1173 ovs_header->dp_ifindex, 1174 reply, info->snd_portid, 1175 info->snd_seq, 0, 1176 OVS_FLOW_CMD_NEW, 1177 ufid_flags); 1178 BUG_ON(error < 0); 1179 } 1180 } else { 1181 /* Could not alloc without acts before locking. */ 1182 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, 1183 info, OVS_FLOW_CMD_NEW, false, 1184 ufid_flags); 1185 1186 if (IS_ERR(reply)) { 1187 error = PTR_ERR(reply); 1188 goto err_unlock_ovs; 1189 } 1190 } 1191 1192 /* Clear stats. */ 1193 if (a[OVS_FLOW_ATTR_CLEAR]) 1194 ovs_flow_stats_clear(flow); 1195 ovs_unlock(); 1196 1197 if (reply) 1198 ovs_notify(&dp_flow_genl_family, reply, info); 1199 if (old_acts) 1200 ovs_nla_free_flow_actions_rcu(old_acts); 1201 1202 return 0; 1203 1204 err_unlock_ovs: 1205 ovs_unlock(); 1206 kfree_skb(reply); 1207 err_kfree_acts: 1208 ovs_nla_free_flow_actions(acts); 1209 error: 1210 return error; 1211 } 1212 1213 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) 1214 { 1215 struct nlattr **a = info->attrs; 1216 struct ovs_header *ovs_header = info->userhdr; 1217 struct net *net = sock_net(skb->sk); 1218 struct sw_flow_key key; 1219 struct sk_buff *reply; 1220 struct sw_flow *flow; 1221 struct datapath *dp; 1222 struct sw_flow_match match; 1223 struct sw_flow_id ufid; 1224 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); 1225 int err = 0; 1226 bool log = !a[OVS_FLOW_ATTR_PROBE]; 1227 bool ufid_present; 1228 1229 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log); 1230 if (a[OVS_FLOW_ATTR_KEY]) { 1231 ovs_match_init(&match, &key, true, NULL); 1232 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL, 1233 log); 1234 } else if (!ufid_present) { 1235 OVS_NLERR(log, 1236 "Flow get message rejected, Key attribute missing."); 1237 err = -EINVAL; 1238 } 1239 if (err) 1240 return err; 1241 1242 ovs_lock(); 1243 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1244 if (!dp) { 1245 err = -ENODEV; 1246 goto unlock; 1247 } 1248 1249 if (ufid_present) 1250 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid); 1251 else 1252 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); 1253 if (!flow) { 1254 err = -ENOENT; 1255 goto unlock; 1256 } 1257 1258 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info, 1259 OVS_FLOW_CMD_NEW, true, ufid_flags); 1260 if (IS_ERR(reply)) { 1261 err = PTR_ERR(reply); 1262 goto unlock; 1263 } 1264 1265 ovs_unlock(); 1266 return genlmsg_reply(reply, info); 1267 unlock: 1268 ovs_unlock(); 1269 return err; 1270 } 1271 1272 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) 1273 { 1274 struct nlattr **a = info->attrs; 1275 struct ovs_header *ovs_header = info->userhdr; 1276 struct net *net = sock_net(skb->sk); 1277 struct sw_flow_key key; 1278 struct sk_buff *reply; 1279 struct sw_flow *flow = NULL; 1280 struct datapath *dp; 1281 struct sw_flow_match match; 1282 struct sw_flow_id ufid; 1283 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); 1284 int err; 1285 bool log = !a[OVS_FLOW_ATTR_PROBE]; 1286 bool ufid_present; 1287 1288 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log); 1289 if (a[OVS_FLOW_ATTR_KEY]) { 1290 ovs_match_init(&match, &key, true, NULL); 1291 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], 1292 NULL, log); 1293 if (unlikely(err)) 1294 return err; 1295 } 1296 1297 ovs_lock(); 1298 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1299 if (unlikely(!dp)) { 1300 err = -ENODEV; 1301 goto unlock; 1302 } 1303 1304 if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) { 1305 err = ovs_flow_tbl_flush(&dp->table); 1306 goto unlock; 1307 } 1308 1309 if (ufid_present) 1310 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid); 1311 else 1312 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); 1313 if (unlikely(!flow)) { 1314 err = -ENOENT; 1315 goto unlock; 1316 } 1317 1318 ovs_flow_tbl_remove(&dp->table, flow); 1319 ovs_unlock(); 1320 1321 reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts, 1322 &flow->id, info, false, ufid_flags); 1323 if (likely(reply)) { 1324 if (likely(!IS_ERR(reply))) { 1325 rcu_read_lock(); /*To keep RCU checker happy. */ 1326 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, 1327 reply, info->snd_portid, 1328 info->snd_seq, 0, 1329 OVS_FLOW_CMD_DEL, 1330 ufid_flags); 1331 rcu_read_unlock(); 1332 BUG_ON(err < 0); 1333 1334 ovs_notify(&dp_flow_genl_family, reply, info); 1335 } else { 1336 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply)); 1337 } 1338 } 1339 1340 ovs_flow_free(flow, true); 1341 return 0; 1342 unlock: 1343 ovs_unlock(); 1344 return err; 1345 } 1346 1347 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) 1348 { 1349 struct nlattr *a[__OVS_FLOW_ATTR_MAX]; 1350 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); 1351 struct table_instance *ti; 1352 struct datapath *dp; 1353 u32 ufid_flags; 1354 int err; 1355 1356 err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a, 1357 OVS_FLOW_ATTR_MAX, flow_policy); 1358 if (err) 1359 return err; 1360 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); 1361 1362 rcu_read_lock(); 1363 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); 1364 if (!dp) { 1365 rcu_read_unlock(); 1366 return -ENODEV; 1367 } 1368 1369 ti = rcu_dereference(dp->table.ti); 1370 for (;;) { 1371 struct sw_flow *flow; 1372 u32 bucket, obj; 1373 1374 bucket = cb->args[0]; 1375 obj = cb->args[1]; 1376 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj); 1377 if (!flow) 1378 break; 1379 1380 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb, 1381 NETLINK_CB(cb->skb).portid, 1382 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1383 OVS_FLOW_CMD_NEW, ufid_flags) < 0) 1384 break; 1385 1386 cb->args[0] = bucket; 1387 cb->args[1] = obj; 1388 } 1389 rcu_read_unlock(); 1390 return skb->len; 1391 } 1392 1393 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { 1394 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, 1395 [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED }, 1396 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, 1397 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, 1398 [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG }, 1399 [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 }, 1400 [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 }, 1401 }; 1402 1403 static const struct genl_ops dp_flow_genl_ops[] = { 1404 { .cmd = OVS_FLOW_CMD_NEW, 1405 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1406 .policy = flow_policy, 1407 .doit = ovs_flow_cmd_new 1408 }, 1409 { .cmd = OVS_FLOW_CMD_DEL, 1410 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1411 .policy = flow_policy, 1412 .doit = ovs_flow_cmd_del 1413 }, 1414 { .cmd = OVS_FLOW_CMD_GET, 1415 .flags = 0, /* OK for unprivileged users. */ 1416 .policy = flow_policy, 1417 .doit = ovs_flow_cmd_get, 1418 .dumpit = ovs_flow_cmd_dump 1419 }, 1420 { .cmd = OVS_FLOW_CMD_SET, 1421 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1422 .policy = flow_policy, 1423 .doit = ovs_flow_cmd_set, 1424 }, 1425 }; 1426 1427 static struct genl_family dp_flow_genl_family __ro_after_init = { 1428 .hdrsize = sizeof(struct ovs_header), 1429 .name = OVS_FLOW_FAMILY, 1430 .version = OVS_FLOW_VERSION, 1431 .maxattr = OVS_FLOW_ATTR_MAX, 1432 .netnsok = true, 1433 .parallel_ops = true, 1434 .ops = dp_flow_genl_ops, 1435 .n_ops = ARRAY_SIZE(dp_flow_genl_ops), 1436 .mcgrps = &ovs_dp_flow_multicast_group, 1437 .n_mcgrps = 1, 1438 .module = THIS_MODULE, 1439 }; 1440 1441 static size_t ovs_dp_cmd_msg_size(void) 1442 { 1443 size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header)); 1444 1445 msgsize += nla_total_size(IFNAMSIZ); 1446 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats)); 1447 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats)); 1448 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */ 1449 1450 return msgsize; 1451 } 1452 1453 /* Called with ovs_mutex. */ 1454 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, 1455 u32 portid, u32 seq, u32 flags, u8 cmd) 1456 { 1457 struct ovs_header *ovs_header; 1458 struct ovs_dp_stats dp_stats; 1459 struct ovs_dp_megaflow_stats dp_megaflow_stats; 1460 int err; 1461 1462 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family, 1463 flags, cmd); 1464 if (!ovs_header) 1465 goto error; 1466 1467 ovs_header->dp_ifindex = get_dpifindex(dp); 1468 1469 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp)); 1470 if (err) 1471 goto nla_put_failure; 1472 1473 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats); 1474 if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), 1475 &dp_stats, OVS_DP_ATTR_PAD)) 1476 goto nla_put_failure; 1477 1478 if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS, 1479 sizeof(struct ovs_dp_megaflow_stats), 1480 &dp_megaflow_stats, OVS_DP_ATTR_PAD)) 1481 goto nla_put_failure; 1482 1483 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features)) 1484 goto nla_put_failure; 1485 1486 genlmsg_end(skb, ovs_header); 1487 return 0; 1488 1489 nla_put_failure: 1490 genlmsg_cancel(skb, ovs_header); 1491 error: 1492 return -EMSGSIZE; 1493 } 1494 1495 static struct sk_buff *ovs_dp_cmd_alloc_info(void) 1496 { 1497 return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL); 1498 } 1499 1500 /* Called with rcu_read_lock or ovs_mutex. */ 1501 static struct datapath *lookup_datapath(struct net *net, 1502 const struct ovs_header *ovs_header, 1503 struct nlattr *a[OVS_DP_ATTR_MAX + 1]) 1504 { 1505 struct datapath *dp; 1506 1507 if (!a[OVS_DP_ATTR_NAME]) 1508 dp = get_dp(net, ovs_header->dp_ifindex); 1509 else { 1510 struct vport *vport; 1511 1512 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME])); 1513 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; 1514 } 1515 return dp ? dp : ERR_PTR(-ENODEV); 1516 } 1517 1518 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info) 1519 { 1520 struct datapath *dp; 1521 1522 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1523 if (IS_ERR(dp)) 1524 return; 1525 1526 WARN(dp->user_features, "Dropping previously announced user features\n"); 1527 dp->user_features = 0; 1528 } 1529 1530 static void ovs_dp_change(struct datapath *dp, struct nlattr *a[]) 1531 { 1532 if (a[OVS_DP_ATTR_USER_FEATURES]) 1533 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]); 1534 } 1535 1536 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) 1537 { 1538 struct nlattr **a = info->attrs; 1539 struct vport_parms parms; 1540 struct sk_buff *reply; 1541 struct datapath *dp; 1542 struct vport *vport; 1543 struct ovs_net *ovs_net; 1544 int err, i; 1545 1546 err = -EINVAL; 1547 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) 1548 goto err; 1549 1550 reply = ovs_dp_cmd_alloc_info(); 1551 if (!reply) 1552 return -ENOMEM; 1553 1554 err = -ENOMEM; 1555 dp = kzalloc(sizeof(*dp), GFP_KERNEL); 1556 if (dp == NULL) 1557 goto err_free_reply; 1558 1559 ovs_dp_set_net(dp, sock_net(skb->sk)); 1560 1561 /* Allocate table. */ 1562 err = ovs_flow_tbl_init(&dp->table); 1563 if (err) 1564 goto err_free_dp; 1565 1566 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu); 1567 if (!dp->stats_percpu) { 1568 err = -ENOMEM; 1569 goto err_destroy_table; 1570 } 1571 1572 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head), 1573 GFP_KERNEL); 1574 if (!dp->ports) { 1575 err = -ENOMEM; 1576 goto err_destroy_percpu; 1577 } 1578 1579 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) 1580 INIT_HLIST_HEAD(&dp->ports[i]); 1581 1582 /* Set up our datapath device. */ 1583 parms.name = nla_data(a[OVS_DP_ATTR_NAME]); 1584 parms.type = OVS_VPORT_TYPE_INTERNAL; 1585 parms.options = NULL; 1586 parms.dp = dp; 1587 parms.port_no = OVSP_LOCAL; 1588 parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID]; 1589 1590 ovs_dp_change(dp, a); 1591 1592 /* So far only local changes have been made, now need the lock. */ 1593 ovs_lock(); 1594 1595 vport = new_vport(&parms); 1596 if (IS_ERR(vport)) { 1597 err = PTR_ERR(vport); 1598 if (err == -EBUSY) 1599 err = -EEXIST; 1600 1601 if (err == -EEXIST) { 1602 /* An outdated user space instance that does not understand 1603 * the concept of user_features has attempted to create a new 1604 * datapath and is likely to reuse it. Drop all user features. 1605 */ 1606 if (info->genlhdr->version < OVS_DP_VER_FEATURES) 1607 ovs_dp_reset_user_features(skb, info); 1608 } 1609 1610 goto err_destroy_ports_array; 1611 } 1612 1613 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, 1614 info->snd_seq, 0, OVS_DP_CMD_NEW); 1615 BUG_ON(err < 0); 1616 1617 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); 1618 list_add_tail_rcu(&dp->list_node, &ovs_net->dps); 1619 1620 ovs_unlock(); 1621 1622 ovs_notify(&dp_datapath_genl_family, reply, info); 1623 return 0; 1624 1625 err_destroy_ports_array: 1626 ovs_unlock(); 1627 kfree(dp->ports); 1628 err_destroy_percpu: 1629 free_percpu(dp->stats_percpu); 1630 err_destroy_table: 1631 ovs_flow_tbl_destroy(&dp->table); 1632 err_free_dp: 1633 kfree(dp); 1634 err_free_reply: 1635 kfree_skb(reply); 1636 err: 1637 return err; 1638 } 1639 1640 /* Called with ovs_mutex. */ 1641 static void __dp_destroy(struct datapath *dp) 1642 { 1643 int i; 1644 1645 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { 1646 struct vport *vport; 1647 struct hlist_node *n; 1648 1649 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) 1650 if (vport->port_no != OVSP_LOCAL) 1651 ovs_dp_detach_port(vport); 1652 } 1653 1654 list_del_rcu(&dp->list_node); 1655 1656 /* OVSP_LOCAL is datapath internal port. We need to make sure that 1657 * all ports in datapath are destroyed first before freeing datapath. 1658 */ 1659 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); 1660 1661 /* RCU destroy the flow table */ 1662 call_rcu(&dp->rcu, destroy_dp_rcu); 1663 } 1664 1665 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) 1666 { 1667 struct sk_buff *reply; 1668 struct datapath *dp; 1669 int err; 1670 1671 reply = ovs_dp_cmd_alloc_info(); 1672 if (!reply) 1673 return -ENOMEM; 1674 1675 ovs_lock(); 1676 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1677 err = PTR_ERR(dp); 1678 if (IS_ERR(dp)) 1679 goto err_unlock_free; 1680 1681 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, 1682 info->snd_seq, 0, OVS_DP_CMD_DEL); 1683 BUG_ON(err < 0); 1684 1685 __dp_destroy(dp); 1686 ovs_unlock(); 1687 1688 ovs_notify(&dp_datapath_genl_family, reply, info); 1689 1690 return 0; 1691 1692 err_unlock_free: 1693 ovs_unlock(); 1694 kfree_skb(reply); 1695 return err; 1696 } 1697 1698 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) 1699 { 1700 struct sk_buff *reply; 1701 struct datapath *dp; 1702 int err; 1703 1704 reply = ovs_dp_cmd_alloc_info(); 1705 if (!reply) 1706 return -ENOMEM; 1707 1708 ovs_lock(); 1709 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1710 err = PTR_ERR(dp); 1711 if (IS_ERR(dp)) 1712 goto err_unlock_free; 1713 1714 ovs_dp_change(dp, info->attrs); 1715 1716 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, 1717 info->snd_seq, 0, OVS_DP_CMD_NEW); 1718 BUG_ON(err < 0); 1719 1720 ovs_unlock(); 1721 ovs_notify(&dp_datapath_genl_family, reply, info); 1722 1723 return 0; 1724 1725 err_unlock_free: 1726 ovs_unlock(); 1727 kfree_skb(reply); 1728 return err; 1729 } 1730 1731 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) 1732 { 1733 struct sk_buff *reply; 1734 struct datapath *dp; 1735 int err; 1736 1737 reply = ovs_dp_cmd_alloc_info(); 1738 if (!reply) 1739 return -ENOMEM; 1740 1741 ovs_lock(); 1742 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1743 if (IS_ERR(dp)) { 1744 err = PTR_ERR(dp); 1745 goto err_unlock_free; 1746 } 1747 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, 1748 info->snd_seq, 0, OVS_DP_CMD_NEW); 1749 BUG_ON(err < 0); 1750 ovs_unlock(); 1751 1752 return genlmsg_reply(reply, info); 1753 1754 err_unlock_free: 1755 ovs_unlock(); 1756 kfree_skb(reply); 1757 return err; 1758 } 1759 1760 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) 1761 { 1762 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id); 1763 struct datapath *dp; 1764 int skip = cb->args[0]; 1765 int i = 0; 1766 1767 ovs_lock(); 1768 list_for_each_entry(dp, &ovs_net->dps, list_node) { 1769 if (i >= skip && 1770 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, 1771 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1772 OVS_DP_CMD_NEW) < 0) 1773 break; 1774 i++; 1775 } 1776 ovs_unlock(); 1777 1778 cb->args[0] = i; 1779 1780 return skb->len; 1781 } 1782 1783 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { 1784 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, 1785 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, 1786 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 }, 1787 }; 1788 1789 static const struct genl_ops dp_datapath_genl_ops[] = { 1790 { .cmd = OVS_DP_CMD_NEW, 1791 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1792 .policy = datapath_policy, 1793 .doit = ovs_dp_cmd_new 1794 }, 1795 { .cmd = OVS_DP_CMD_DEL, 1796 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1797 .policy = datapath_policy, 1798 .doit = ovs_dp_cmd_del 1799 }, 1800 { .cmd = OVS_DP_CMD_GET, 1801 .flags = 0, /* OK for unprivileged users. */ 1802 .policy = datapath_policy, 1803 .doit = ovs_dp_cmd_get, 1804 .dumpit = ovs_dp_cmd_dump 1805 }, 1806 { .cmd = OVS_DP_CMD_SET, 1807 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 1808 .policy = datapath_policy, 1809 .doit = ovs_dp_cmd_set, 1810 }, 1811 }; 1812 1813 static struct genl_family dp_datapath_genl_family __ro_after_init = { 1814 .hdrsize = sizeof(struct ovs_header), 1815 .name = OVS_DATAPATH_FAMILY, 1816 .version = OVS_DATAPATH_VERSION, 1817 .maxattr = OVS_DP_ATTR_MAX, 1818 .netnsok = true, 1819 .parallel_ops = true, 1820 .ops = dp_datapath_genl_ops, 1821 .n_ops = ARRAY_SIZE(dp_datapath_genl_ops), 1822 .mcgrps = &ovs_dp_datapath_multicast_group, 1823 .n_mcgrps = 1, 1824 .module = THIS_MODULE, 1825 }; 1826 1827 /* Called with ovs_mutex or RCU read lock. */ 1828 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, 1829 u32 portid, u32 seq, u32 flags, u8 cmd) 1830 { 1831 struct ovs_header *ovs_header; 1832 struct ovs_vport_stats vport_stats; 1833 int err; 1834 1835 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family, 1836 flags, cmd); 1837 if (!ovs_header) 1838 return -EMSGSIZE; 1839 1840 ovs_header->dp_ifindex = get_dpifindex(vport->dp); 1841 1842 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) || 1843 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) || 1844 nla_put_string(skb, OVS_VPORT_ATTR_NAME, 1845 ovs_vport_name(vport))) 1846 goto nla_put_failure; 1847 1848 ovs_vport_get_stats(vport, &vport_stats); 1849 if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS, 1850 sizeof(struct ovs_vport_stats), &vport_stats, 1851 OVS_VPORT_ATTR_PAD)) 1852 goto nla_put_failure; 1853 1854 if (ovs_vport_get_upcall_portids(vport, skb)) 1855 goto nla_put_failure; 1856 1857 err = ovs_vport_get_options(vport, skb); 1858 if (err == -EMSGSIZE) 1859 goto error; 1860 1861 genlmsg_end(skb, ovs_header); 1862 return 0; 1863 1864 nla_put_failure: 1865 err = -EMSGSIZE; 1866 error: 1867 genlmsg_cancel(skb, ovs_header); 1868 return err; 1869 } 1870 1871 static struct sk_buff *ovs_vport_cmd_alloc_info(void) 1872 { 1873 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1874 } 1875 1876 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */ 1877 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, 1878 u32 seq, u8 cmd) 1879 { 1880 struct sk_buff *skb; 1881 int retval; 1882 1883 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 1884 if (!skb) 1885 return ERR_PTR(-ENOMEM); 1886 1887 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); 1888 BUG_ON(retval < 0); 1889 1890 return skb; 1891 } 1892 1893 /* Called with ovs_mutex or RCU read lock. */ 1894 static struct vport *lookup_vport(struct net *net, 1895 const struct ovs_header *ovs_header, 1896 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) 1897 { 1898 struct datapath *dp; 1899 struct vport *vport; 1900 1901 if (a[OVS_VPORT_ATTR_NAME]) { 1902 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME])); 1903 if (!vport) 1904 return ERR_PTR(-ENODEV); 1905 if (ovs_header->dp_ifindex && 1906 ovs_header->dp_ifindex != get_dpifindex(vport->dp)) 1907 return ERR_PTR(-ENODEV); 1908 return vport; 1909 } else if (a[OVS_VPORT_ATTR_PORT_NO]) { 1910 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); 1911 1912 if (port_no >= DP_MAX_PORTS) 1913 return ERR_PTR(-EFBIG); 1914 1915 dp = get_dp(net, ovs_header->dp_ifindex); 1916 if (!dp) 1917 return ERR_PTR(-ENODEV); 1918 1919 vport = ovs_vport_ovsl_rcu(dp, port_no); 1920 if (!vport) 1921 return ERR_PTR(-ENODEV); 1922 return vport; 1923 } else 1924 return ERR_PTR(-EINVAL); 1925 } 1926 1927 /* Called with ovs_mutex */ 1928 static void update_headroom(struct datapath *dp) 1929 { 1930 unsigned dev_headroom, max_headroom = 0; 1931 struct net_device *dev; 1932 struct vport *vport; 1933 int i; 1934 1935 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { 1936 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) { 1937 dev = vport->dev; 1938 dev_headroom = netdev_get_fwd_headroom(dev); 1939 if (dev_headroom > max_headroom) 1940 max_headroom = dev_headroom; 1941 } 1942 } 1943 1944 dp->max_headroom = max_headroom; 1945 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) 1946 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) 1947 netdev_set_rx_headroom(vport->dev, max_headroom); 1948 } 1949 1950 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) 1951 { 1952 struct nlattr **a = info->attrs; 1953 struct ovs_header *ovs_header = info->userhdr; 1954 struct vport_parms parms; 1955 struct sk_buff *reply; 1956 struct vport *vport; 1957 struct datapath *dp; 1958 u32 port_no; 1959 int err; 1960 1961 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] || 1962 !a[OVS_VPORT_ATTR_UPCALL_PID]) 1963 return -EINVAL; 1964 1965 port_no = a[OVS_VPORT_ATTR_PORT_NO] 1966 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0; 1967 if (port_no >= DP_MAX_PORTS) 1968 return -EFBIG; 1969 1970 reply = ovs_vport_cmd_alloc_info(); 1971 if (!reply) 1972 return -ENOMEM; 1973 1974 ovs_lock(); 1975 restart: 1976 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1977 err = -ENODEV; 1978 if (!dp) 1979 goto exit_unlock_free; 1980 1981 if (port_no) { 1982 vport = ovs_vport_ovsl(dp, port_no); 1983 err = -EBUSY; 1984 if (vport) 1985 goto exit_unlock_free; 1986 } else { 1987 for (port_no = 1; ; port_no++) { 1988 if (port_no >= DP_MAX_PORTS) { 1989 err = -EFBIG; 1990 goto exit_unlock_free; 1991 } 1992 vport = ovs_vport_ovsl(dp, port_no); 1993 if (!vport) 1994 break; 1995 } 1996 } 1997 1998 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]); 1999 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]); 2000 parms.options = a[OVS_VPORT_ATTR_OPTIONS]; 2001 parms.dp = dp; 2002 parms.port_no = port_no; 2003 parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID]; 2004 2005 vport = new_vport(&parms); 2006 err = PTR_ERR(vport); 2007 if (IS_ERR(vport)) { 2008 if (err == -EAGAIN) 2009 goto restart; 2010 goto exit_unlock_free; 2011 } 2012 2013 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, 2014 info->snd_seq, 0, OVS_VPORT_CMD_NEW); 2015 2016 if (netdev_get_fwd_headroom(vport->dev) > dp->max_headroom) 2017 update_headroom(dp); 2018 else 2019 netdev_set_rx_headroom(vport->dev, dp->max_headroom); 2020 2021 BUG_ON(err < 0); 2022 ovs_unlock(); 2023 2024 ovs_notify(&dp_vport_genl_family, reply, info); 2025 return 0; 2026 2027 exit_unlock_free: 2028 ovs_unlock(); 2029 kfree_skb(reply); 2030 return err; 2031 } 2032 2033 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) 2034 { 2035 struct nlattr **a = info->attrs; 2036 struct sk_buff *reply; 2037 struct vport *vport; 2038 int err; 2039 2040 reply = ovs_vport_cmd_alloc_info(); 2041 if (!reply) 2042 return -ENOMEM; 2043 2044 ovs_lock(); 2045 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); 2046 err = PTR_ERR(vport); 2047 if (IS_ERR(vport)) 2048 goto exit_unlock_free; 2049 2050 if (a[OVS_VPORT_ATTR_TYPE] && 2051 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) { 2052 err = -EINVAL; 2053 goto exit_unlock_free; 2054 } 2055 2056 if (a[OVS_VPORT_ATTR_OPTIONS]) { 2057 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); 2058 if (err) 2059 goto exit_unlock_free; 2060 } 2061 2062 2063 if (a[OVS_VPORT_ATTR_UPCALL_PID]) { 2064 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID]; 2065 2066 err = ovs_vport_set_upcall_portids(vport, ids); 2067 if (err) 2068 goto exit_unlock_free; 2069 } 2070 2071 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, 2072 info->snd_seq, 0, OVS_VPORT_CMD_NEW); 2073 BUG_ON(err < 0); 2074 2075 ovs_unlock(); 2076 ovs_notify(&dp_vport_genl_family, reply, info); 2077 return 0; 2078 2079 exit_unlock_free: 2080 ovs_unlock(); 2081 kfree_skb(reply); 2082 return err; 2083 } 2084 2085 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) 2086 { 2087 bool must_update_headroom = false; 2088 struct nlattr **a = info->attrs; 2089 struct sk_buff *reply; 2090 struct datapath *dp; 2091 struct vport *vport; 2092 int err; 2093 2094 reply = ovs_vport_cmd_alloc_info(); 2095 if (!reply) 2096 return -ENOMEM; 2097 2098 ovs_lock(); 2099 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); 2100 err = PTR_ERR(vport); 2101 if (IS_ERR(vport)) 2102 goto exit_unlock_free; 2103 2104 if (vport->port_no == OVSP_LOCAL) { 2105 err = -EINVAL; 2106 goto exit_unlock_free; 2107 } 2108 2109 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, 2110 info->snd_seq, 0, OVS_VPORT_CMD_DEL); 2111 BUG_ON(err < 0); 2112 2113 /* the vport deletion may trigger dp headroom update */ 2114 dp = vport->dp; 2115 if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom) 2116 must_update_headroom = true; 2117 netdev_reset_rx_headroom(vport->dev); 2118 ovs_dp_detach_port(vport); 2119 2120 if (must_update_headroom) 2121 update_headroom(dp); 2122 ovs_unlock(); 2123 2124 ovs_notify(&dp_vport_genl_family, reply, info); 2125 return 0; 2126 2127 exit_unlock_free: 2128 ovs_unlock(); 2129 kfree_skb(reply); 2130 return err; 2131 } 2132 2133 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info) 2134 { 2135 struct nlattr **a = info->attrs; 2136 struct ovs_header *ovs_header = info->userhdr; 2137 struct sk_buff *reply; 2138 struct vport *vport; 2139 int err; 2140 2141 reply = ovs_vport_cmd_alloc_info(); 2142 if (!reply) 2143 return -ENOMEM; 2144 2145 rcu_read_lock(); 2146 vport = lookup_vport(sock_net(skb->sk), ovs_header, a); 2147 err = PTR_ERR(vport); 2148 if (IS_ERR(vport)) 2149 goto exit_unlock_free; 2150 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, 2151 info->snd_seq, 0, OVS_VPORT_CMD_NEW); 2152 BUG_ON(err < 0); 2153 rcu_read_unlock(); 2154 2155 return genlmsg_reply(reply, info); 2156 2157 exit_unlock_free: 2158 rcu_read_unlock(); 2159 kfree_skb(reply); 2160 return err; 2161 } 2162 2163 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) 2164 { 2165 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); 2166 struct datapath *dp; 2167 int bucket = cb->args[0], skip = cb->args[1]; 2168 int i, j = 0; 2169 2170 rcu_read_lock(); 2171 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); 2172 if (!dp) { 2173 rcu_read_unlock(); 2174 return -ENODEV; 2175 } 2176 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { 2177 struct vport *vport; 2178 2179 j = 0; 2180 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) { 2181 if (j >= skip && 2182 ovs_vport_cmd_fill_info(vport, skb, 2183 NETLINK_CB(cb->skb).portid, 2184 cb->nlh->nlmsg_seq, 2185 NLM_F_MULTI, 2186 OVS_VPORT_CMD_NEW) < 0) 2187 goto out; 2188 2189 j++; 2190 } 2191 skip = 0; 2192 } 2193 out: 2194 rcu_read_unlock(); 2195 2196 cb->args[0] = i; 2197 cb->args[1] = j; 2198 2199 return skb->len; 2200 } 2201 2202 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { 2203 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, 2204 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, 2205 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, 2206 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, 2207 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 }, 2208 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, 2209 }; 2210 2211 static const struct genl_ops dp_vport_genl_ops[] = { 2212 { .cmd = OVS_VPORT_CMD_NEW, 2213 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 2214 .policy = vport_policy, 2215 .doit = ovs_vport_cmd_new 2216 }, 2217 { .cmd = OVS_VPORT_CMD_DEL, 2218 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 2219 .policy = vport_policy, 2220 .doit = ovs_vport_cmd_del 2221 }, 2222 { .cmd = OVS_VPORT_CMD_GET, 2223 .flags = 0, /* OK for unprivileged users. */ 2224 .policy = vport_policy, 2225 .doit = ovs_vport_cmd_get, 2226 .dumpit = ovs_vport_cmd_dump 2227 }, 2228 { .cmd = OVS_VPORT_CMD_SET, 2229 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ 2230 .policy = vport_policy, 2231 .doit = ovs_vport_cmd_set, 2232 }, 2233 }; 2234 2235 struct genl_family dp_vport_genl_family __ro_after_init = { 2236 .hdrsize = sizeof(struct ovs_header), 2237 .name = OVS_VPORT_FAMILY, 2238 .version = OVS_VPORT_VERSION, 2239 .maxattr = OVS_VPORT_ATTR_MAX, 2240 .netnsok = true, 2241 .parallel_ops = true, 2242 .ops = dp_vport_genl_ops, 2243 .n_ops = ARRAY_SIZE(dp_vport_genl_ops), 2244 .mcgrps = &ovs_dp_vport_multicast_group, 2245 .n_mcgrps = 1, 2246 .module = THIS_MODULE, 2247 }; 2248 2249 static struct genl_family * const dp_genl_families[] = { 2250 &dp_datapath_genl_family, 2251 &dp_vport_genl_family, 2252 &dp_flow_genl_family, 2253 &dp_packet_genl_family, 2254 }; 2255 2256 static void dp_unregister_genl(int n_families) 2257 { 2258 int i; 2259 2260 for (i = 0; i < n_families; i++) 2261 genl_unregister_family(dp_genl_families[i]); 2262 } 2263 2264 static int __init dp_register_genl(void) 2265 { 2266 int err; 2267 int i; 2268 2269 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { 2270 2271 err = genl_register_family(dp_genl_families[i]); 2272 if (err) 2273 goto error; 2274 } 2275 2276 return 0; 2277 2278 error: 2279 dp_unregister_genl(i); 2280 return err; 2281 } 2282 2283 static int __net_init ovs_init_net(struct net *net) 2284 { 2285 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2286 2287 INIT_LIST_HEAD(&ovs_net->dps); 2288 INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq); 2289 ovs_ct_init(net); 2290 return 0; 2291 } 2292 2293 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet, 2294 struct list_head *head) 2295 { 2296 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2297 struct datapath *dp; 2298 2299 list_for_each_entry(dp, &ovs_net->dps, list_node) { 2300 int i; 2301 2302 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { 2303 struct vport *vport; 2304 2305 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) { 2306 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL) 2307 continue; 2308 2309 if (dev_net(vport->dev) == dnet) 2310 list_add(&vport->detach_list, head); 2311 } 2312 } 2313 } 2314 } 2315 2316 static void __net_exit ovs_exit_net(struct net *dnet) 2317 { 2318 struct datapath *dp, *dp_next; 2319 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id); 2320 struct vport *vport, *vport_next; 2321 struct net *net; 2322 LIST_HEAD(head); 2323 2324 ovs_ct_exit(dnet); 2325 ovs_lock(); 2326 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) 2327 __dp_destroy(dp); 2328 2329 rtnl_lock(); 2330 for_each_net(net) 2331 list_vports_from_net(net, dnet, &head); 2332 rtnl_unlock(); 2333 2334 /* Detach all vports from given namespace. */ 2335 list_for_each_entry_safe(vport, vport_next, &head, detach_list) { 2336 list_del(&vport->detach_list); 2337 ovs_dp_detach_port(vport); 2338 } 2339 2340 ovs_unlock(); 2341 2342 cancel_work_sync(&ovs_net->dp_notify_work); 2343 } 2344 2345 static struct pernet_operations ovs_net_ops = { 2346 .init = ovs_init_net, 2347 .exit = ovs_exit_net, 2348 .id = &ovs_net_id, 2349 .size = sizeof(struct ovs_net), 2350 }; 2351 2352 static int __init dp_init(void) 2353 { 2354 int err; 2355 2356 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); 2357 2358 pr_info("Open vSwitch switching datapath\n"); 2359 2360 err = action_fifos_init(); 2361 if (err) 2362 goto error; 2363 2364 err = ovs_internal_dev_rtnl_link_register(); 2365 if (err) 2366 goto error_action_fifos_exit; 2367 2368 err = ovs_flow_init(); 2369 if (err) 2370 goto error_unreg_rtnl_link; 2371 2372 err = ovs_vport_init(); 2373 if (err) 2374 goto error_flow_exit; 2375 2376 err = register_pernet_device(&ovs_net_ops); 2377 if (err) 2378 goto error_vport_exit; 2379 2380 err = register_netdevice_notifier(&ovs_dp_device_notifier); 2381 if (err) 2382 goto error_netns_exit; 2383 2384 err = ovs_netdev_init(); 2385 if (err) 2386 goto error_unreg_notifier; 2387 2388 err = dp_register_genl(); 2389 if (err < 0) 2390 goto error_unreg_netdev; 2391 2392 return 0; 2393 2394 error_unreg_netdev: 2395 ovs_netdev_exit(); 2396 error_unreg_notifier: 2397 unregister_netdevice_notifier(&ovs_dp_device_notifier); 2398 error_netns_exit: 2399 unregister_pernet_device(&ovs_net_ops); 2400 error_vport_exit: 2401 ovs_vport_exit(); 2402 error_flow_exit: 2403 ovs_flow_exit(); 2404 error_unreg_rtnl_link: 2405 ovs_internal_dev_rtnl_link_unregister(); 2406 error_action_fifos_exit: 2407 action_fifos_exit(); 2408 error: 2409 return err; 2410 } 2411 2412 static void dp_cleanup(void) 2413 { 2414 dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); 2415 ovs_netdev_exit(); 2416 unregister_netdevice_notifier(&ovs_dp_device_notifier); 2417 unregister_pernet_device(&ovs_net_ops); 2418 rcu_barrier(); 2419 ovs_vport_exit(); 2420 ovs_flow_exit(); 2421 ovs_internal_dev_rtnl_link_unregister(); 2422 action_fifos_exit(); 2423 } 2424 2425 module_init(dp_init); 2426 module_exit(dp_cleanup); 2427 2428 MODULE_DESCRIPTION("Open vSwitch switching datapath"); 2429 MODULE_LICENSE("GPL"); 2430 MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY); 2431 MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY); 2432 MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY); 2433 MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY); 2434