1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2019-2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_eswitch.h" 6 #include "devlink/devlink.h" 7 #include "devlink/devlink_port.h" 8 #include "ice_sriov.h" 9 #include "ice_tc_lib.h" 10 #include "ice_dcb_lib.h" 11 12 /** 13 * ice_repr_get_sw_port_id - get port ID associated with representor 14 * @repr: pointer to port representor 15 */ 16 static int ice_repr_get_sw_port_id(struct ice_repr *repr) 17 { 18 return repr->src_vsi->back->hw.port_info->lport; 19 } 20 21 /** 22 * ice_repr_get_phys_port_name - get phys port name 23 * @netdev: pointer to port representor netdev 24 * @buf: write here port name 25 * @len: max length of buf 26 */ 27 static int 28 ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) 29 { 30 struct ice_netdev_priv *np = netdev_priv(netdev); 31 struct ice_repr *repr = np->repr; 32 int res; 33 34 /* Devlink port is registered and devlink core is taking care of name formatting. */ 35 if (repr->vf->devlink_port.devlink) 36 return -EOPNOTSUPP; 37 38 res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), 39 repr->id); 40 if (res <= 0) 41 return -EOPNOTSUPP; 42 return 0; 43 } 44 45 /** 46 * ice_repr_inc_tx_stats - increment Tx statistic by one packet 47 * @repr: repr to increment stats on 48 * @len: length of the packet 49 * @xmit_status: value returned by xmit function 50 */ 51 void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len, 52 int xmit_status) 53 { 54 struct ice_repr_pcpu_stats *stats; 55 56 if (unlikely(xmit_status != NET_XMIT_SUCCESS && 57 xmit_status != NET_XMIT_CN)) { 58 this_cpu_inc(repr->stats->tx_drops); 59 return; 60 } 61 62 stats = this_cpu_ptr(repr->stats); 63 u64_stats_update_begin(&stats->syncp); 64 stats->tx_packets++; 65 stats->tx_bytes += len; 66 u64_stats_update_end(&stats->syncp); 67 } 68 69 /** 70 * ice_repr_inc_rx_stats - increment Rx statistic by one packet 71 * @netdev: repr netdev to increment stats on 72 * @len: length of the packet 73 */ 74 void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len) 75 { 76 struct ice_repr *repr = ice_netdev_to_repr(netdev); 77 struct ice_repr_pcpu_stats *stats; 78 79 stats = this_cpu_ptr(repr->stats); 80 u64_stats_update_begin(&stats->syncp); 81 stats->rx_packets++; 82 stats->rx_bytes += len; 83 u64_stats_update_end(&stats->syncp); 84 } 85 86 /** 87 * ice_repr_get_stats64 - get VF stats for VFPR use 88 * @netdev: pointer to port representor netdev 89 * @stats: pointer to struct where stats can be stored 90 */ 91 static void 92 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 93 { 94 struct ice_netdev_priv *np = netdev_priv(netdev); 95 struct ice_eth_stats *eth_stats; 96 struct ice_vsi *vsi; 97 98 if (ice_is_vf_disabled(np->repr->vf)) 99 return; 100 vsi = np->repr->src_vsi; 101 102 ice_update_vsi_stats(vsi); 103 eth_stats = &vsi->eth_stats; 104 105 stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast + 106 eth_stats->tx_multicast; 107 stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast + 108 eth_stats->rx_multicast; 109 stats->tx_bytes = eth_stats->tx_bytes; 110 stats->rx_bytes = eth_stats->rx_bytes; 111 stats->multicast = eth_stats->rx_multicast; 112 stats->tx_errors = eth_stats->tx_errors; 113 stats->tx_dropped = eth_stats->tx_discards; 114 stats->rx_dropped = eth_stats->rx_discards; 115 } 116 117 /** 118 * ice_netdev_to_repr - Get port representor for given netdevice 119 * @netdev: pointer to port representor netdev 120 */ 121 struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev) 122 { 123 struct ice_netdev_priv *np = netdev_priv(netdev); 124 125 return np->repr; 126 } 127 128 /** 129 * ice_repr_open - Enable port representor's network interface 130 * @netdev: network interface device structure 131 * 132 * The open entry point is called when a port representor's network 133 * interface is made active by the system (IFF_UP). Corresponding 134 * VF is notified about link status change. 135 * 136 * Returns 0 on success 137 */ 138 static int ice_repr_open(struct net_device *netdev) 139 { 140 struct ice_repr *repr = ice_netdev_to_repr(netdev); 141 struct ice_vf *vf; 142 143 vf = repr->vf; 144 vf->link_forced = true; 145 vf->link_up = true; 146 ice_vc_notify_vf_link_state(vf); 147 148 netif_carrier_on(netdev); 149 netif_tx_start_all_queues(netdev); 150 151 return 0; 152 } 153 154 /** 155 * ice_repr_stop - Disable port representor's network interface 156 * @netdev: network interface device structure 157 * 158 * The stop entry point is called when a port representor's network 159 * interface is de-activated by the system. Corresponding 160 * VF is notified about link status change. 161 * 162 * Returns 0 on success 163 */ 164 static int ice_repr_stop(struct net_device *netdev) 165 { 166 struct ice_repr *repr = ice_netdev_to_repr(netdev); 167 struct ice_vf *vf; 168 169 vf = repr->vf; 170 vf->link_forced = true; 171 vf->link_up = false; 172 ice_vc_notify_vf_link_state(vf); 173 174 netif_carrier_off(netdev); 175 netif_tx_stop_all_queues(netdev); 176 177 return 0; 178 } 179 180 /** 181 * ice_repr_sp_stats64 - get slow path stats for port representor 182 * @dev: network interface device structure 183 * @stats: netlink stats structure 184 */ 185 static int 186 ice_repr_sp_stats64(const struct net_device *dev, 187 struct rtnl_link_stats64 *stats) 188 { 189 struct ice_repr *repr = ice_netdev_to_repr(dev); 190 int i; 191 192 for_each_possible_cpu(i) { 193 u64 tbytes, tpkts, tdrops, rbytes, rpkts; 194 struct ice_repr_pcpu_stats *repr_stats; 195 unsigned int start; 196 197 repr_stats = per_cpu_ptr(repr->stats, i); 198 do { 199 start = u64_stats_fetch_begin(&repr_stats->syncp); 200 tbytes = repr_stats->tx_bytes; 201 tpkts = repr_stats->tx_packets; 202 tdrops = repr_stats->tx_drops; 203 rbytes = repr_stats->rx_bytes; 204 rpkts = repr_stats->rx_packets; 205 } while (u64_stats_fetch_retry(&repr_stats->syncp, start)); 206 207 stats->tx_bytes += tbytes; 208 stats->tx_packets += tpkts; 209 stats->tx_dropped += tdrops; 210 stats->rx_bytes += rbytes; 211 stats->rx_packets += rpkts; 212 } 213 return 0; 214 } 215 216 static bool 217 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id) 218 { 219 return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT; 220 } 221 222 static int 223 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev, 224 void *sp) 225 { 226 if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT) 227 return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp); 228 229 return -EINVAL; 230 } 231 232 static int 233 ice_repr_setup_tc_cls_flower(struct ice_repr *repr, 234 struct flow_cls_offload *flower) 235 { 236 switch (flower->command) { 237 case FLOW_CLS_REPLACE: 238 return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower); 239 case FLOW_CLS_DESTROY: 240 return ice_del_cls_flower(repr->src_vsi, flower); 241 default: 242 return -EINVAL; 243 } 244 } 245 246 static int 247 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 248 void *cb_priv) 249 { 250 struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data; 251 struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv; 252 253 switch (type) { 254 case TC_SETUP_CLSFLOWER: 255 return ice_repr_setup_tc_cls_flower(np->repr, flower); 256 default: 257 return -EOPNOTSUPP; 258 } 259 } 260 261 static LIST_HEAD(ice_repr_block_cb_list); 262 263 static int 264 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, 265 void *type_data) 266 { 267 struct ice_netdev_priv *np = netdev_priv(netdev); 268 269 switch (type) { 270 case TC_SETUP_BLOCK: 271 return flow_block_cb_setup_simple((struct flow_block_offload *) 272 type_data, 273 &ice_repr_block_cb_list, 274 ice_repr_setup_tc_block_cb, 275 np, np, true); 276 default: 277 return -EOPNOTSUPP; 278 } 279 } 280 281 static const struct net_device_ops ice_repr_netdev_ops = { 282 .ndo_get_phys_port_name = ice_repr_get_phys_port_name, 283 .ndo_get_stats64 = ice_repr_get_stats64, 284 .ndo_open = ice_repr_open, 285 .ndo_stop = ice_repr_stop, 286 .ndo_start_xmit = ice_eswitch_port_start_xmit, 287 .ndo_setup_tc = ice_repr_setup_tc, 288 .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, 289 .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats, 290 }; 291 292 /** 293 * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev 294 * @netdev: pointer to netdev 295 */ 296 bool ice_is_port_repr_netdev(const struct net_device *netdev) 297 { 298 return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); 299 } 300 301 /** 302 * ice_repr_reg_netdev - register port representor netdev 303 * @netdev: pointer to port representor netdev 304 */ 305 static int 306 ice_repr_reg_netdev(struct net_device *netdev) 307 { 308 eth_hw_addr_random(netdev); 309 netdev->netdev_ops = &ice_repr_netdev_ops; 310 ice_set_ethtool_repr_ops(netdev); 311 312 netdev->hw_features |= NETIF_F_HW_TC; 313 314 netif_carrier_off(netdev); 315 netif_tx_stop_all_queues(netdev); 316 317 return register_netdev(netdev); 318 } 319 320 static void ice_repr_remove_node(struct devlink_port *devlink_port) 321 { 322 devl_lock(devlink_port->devlink); 323 devl_rate_leaf_destroy(devlink_port); 324 devl_unlock(devlink_port->devlink); 325 } 326 327 /** 328 * ice_repr_rem - remove representor from VF 329 * @repr: pointer to representor structure 330 */ 331 static void ice_repr_rem(struct ice_repr *repr) 332 { 333 free_percpu(repr->stats); 334 free_netdev(repr->netdev); 335 kfree(repr); 336 } 337 338 /** 339 * ice_repr_rem_vf - remove representor from VF 340 * @repr: pointer to representor structure 341 */ 342 void ice_repr_rem_vf(struct ice_repr *repr) 343 { 344 ice_repr_remove_node(&repr->vf->devlink_port); 345 unregister_netdev(repr->netdev); 346 ice_devlink_destroy_vf_port(repr->vf); 347 ice_virtchnl_set_dflt_ops(repr->vf); 348 ice_repr_rem(repr); 349 } 350 351 static void ice_repr_set_tx_topology(struct ice_pf *pf) 352 { 353 struct devlink *devlink; 354 355 /* only export if ADQ and DCB disabled and eswitch enabled*/ 356 if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) || 357 !ice_is_switchdev_running(pf)) 358 return; 359 360 devlink = priv_to_devlink(pf); 361 ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); 362 } 363 364 /** 365 * ice_repr_add - add representor for generic VSI 366 * @pf: pointer to PF structure 367 * @src_vsi: pointer to VSI structure of device to represent 368 * @parent_mac: device MAC address 369 */ 370 static struct ice_repr * 371 ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) 372 { 373 struct ice_netdev_priv *np; 374 struct ice_repr *repr; 375 int err; 376 377 repr = kzalloc(sizeof(*repr), GFP_KERNEL); 378 if (!repr) 379 return ERR_PTR(-ENOMEM); 380 381 repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); 382 if (!repr->netdev) { 383 err = -ENOMEM; 384 goto err_alloc; 385 } 386 387 repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats); 388 if (!repr->stats) { 389 err = -ENOMEM; 390 goto err_stats; 391 } 392 393 repr->src_vsi = src_vsi; 394 repr->id = src_vsi->vsi_num; 395 np = netdev_priv(repr->netdev); 396 np->repr = repr; 397 398 ether_addr_copy(repr->parent_mac, parent_mac); 399 400 return repr; 401 402 err_stats: 403 free_netdev(repr->netdev); 404 err_alloc: 405 kfree(repr); 406 return ERR_PTR(err); 407 } 408 409 struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) 410 { 411 struct ice_repr *repr; 412 struct ice_vsi *vsi; 413 int err; 414 415 vsi = ice_get_vf_vsi(vf); 416 if (!vsi) 417 return ERR_PTR(-ENOENT); 418 419 err = ice_devlink_create_vf_port(vf); 420 if (err) 421 return ERR_PTR(err); 422 423 repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr); 424 if (IS_ERR(repr)) { 425 err = PTR_ERR(repr); 426 goto err_repr_add; 427 } 428 429 repr->vf = vf; 430 431 repr->netdev->min_mtu = ETH_MIN_MTU; 432 repr->netdev->max_mtu = ICE_MAX_MTU; 433 434 SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf)); 435 SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port); 436 err = ice_repr_reg_netdev(repr->netdev); 437 if (err) 438 goto err_netdev; 439 440 ice_virtchnl_set_repr_ops(vf); 441 ice_repr_set_tx_topology(vf->pf); 442 443 return repr; 444 445 err_netdev: 446 ice_repr_rem(repr); 447 err_repr_add: 448 ice_devlink_destroy_vf_port(vf); 449 return ERR_PTR(err); 450 } 451 452 struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi) 453 { 454 if (!vsi->vf) 455 return NULL; 456 457 return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id); 458 } 459 460 /** 461 * ice_repr_start_tx_queues - start Tx queues of port representor 462 * @repr: pointer to repr structure 463 */ 464 void ice_repr_start_tx_queues(struct ice_repr *repr) 465 { 466 netif_carrier_on(repr->netdev); 467 netif_tx_start_all_queues(repr->netdev); 468 } 469 470 /** 471 * ice_repr_stop_tx_queues - stop Tx queues of port representor 472 * @repr: pointer to repr structure 473 */ 474 void ice_repr_stop_tx_queues(struct ice_repr *repr) 475 { 476 netif_carrier_off(repr->netdev); 477 netif_tx_stop_all_queues(repr->netdev); 478 } 479