1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2019-2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_eswitch.h" 6 #include "devlink/devlink.h" 7 #include "devlink/devlink_port.h" 8 #include "ice_sriov.h" 9 #include "ice_tc_lib.h" 10 #include "ice_dcb_lib.h" 11 12 /** 13 * ice_repr_inc_tx_stats - increment Tx statistic by one packet 14 * @repr: repr to increment stats on 15 * @len: length of the packet 16 * @xmit_status: value returned by xmit function 17 */ 18 void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len, 19 int xmit_status) 20 { 21 struct ice_repr_pcpu_stats *stats; 22 23 if (unlikely(xmit_status != NET_XMIT_SUCCESS && 24 xmit_status != NET_XMIT_CN)) { 25 this_cpu_inc(repr->stats->tx_drops); 26 return; 27 } 28 29 stats = this_cpu_ptr(repr->stats); 30 u64_stats_update_begin(&stats->syncp); 31 stats->tx_packets++; 32 stats->tx_bytes += len; 33 u64_stats_update_end(&stats->syncp); 34 } 35 36 /** 37 * ice_repr_inc_rx_stats - increment Rx statistic by one packet 38 * @netdev: repr netdev to increment stats on 39 * @len: length of the packet 40 */ 41 void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len) 42 { 43 struct ice_repr *repr = ice_netdev_to_repr(netdev); 44 struct ice_repr_pcpu_stats *stats; 45 46 stats = this_cpu_ptr(repr->stats); 47 u64_stats_update_begin(&stats->syncp); 48 stats->rx_packets++; 49 stats->rx_bytes += len; 50 u64_stats_update_end(&stats->syncp); 51 } 52 53 /** 54 * ice_repr_get_stats64 - get VF stats for VFPR use 55 * @netdev: pointer to port representor netdev 56 * @stats: pointer to struct where stats can be stored 57 */ 58 static void 59 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 60 { 61 struct ice_netdev_priv *np = netdev_priv(netdev); 62 struct ice_eth_stats *eth_stats; 63 struct ice_vsi *vsi; 64 65 if (ice_is_vf_disabled(np->repr->vf)) 66 return; 67 vsi = np->repr->src_vsi; 68 69 ice_update_vsi_stats(vsi); 70 eth_stats = &vsi->eth_stats; 71 72 stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast + 73 eth_stats->tx_multicast; 74 stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast + 75 eth_stats->rx_multicast; 76 stats->tx_bytes = eth_stats->tx_bytes; 77 stats->rx_bytes = eth_stats->rx_bytes; 78 stats->multicast = eth_stats->rx_multicast; 79 stats->tx_errors = eth_stats->tx_errors; 80 stats->tx_dropped = eth_stats->tx_discards; 81 stats->rx_dropped = eth_stats->rx_discards; 82 } 83 84 /** 85 * ice_netdev_to_repr - Get port representor for given netdevice 86 * @netdev: pointer to port representor netdev 87 */ 88 struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev) 89 { 90 struct ice_netdev_priv *np = netdev_priv(netdev); 91 92 return np->repr; 93 } 94 95 /** 96 * ice_repr_open - Enable port representor's network interface 97 * @netdev: network interface device structure 98 * 99 * The open entry point is called when a port representor's network 100 * interface is made active by the system (IFF_UP). Corresponding 101 * VF is notified about link status change. 102 * 103 * Returns 0 on success 104 */ 105 static int ice_repr_open(struct net_device *netdev) 106 { 107 struct ice_repr *repr = ice_netdev_to_repr(netdev); 108 struct ice_vf *vf; 109 110 vf = repr->vf; 111 vf->link_forced = true; 112 vf->link_up = true; 113 ice_vc_notify_vf_link_state(vf); 114 115 netif_carrier_on(netdev); 116 netif_tx_start_all_queues(netdev); 117 118 return 0; 119 } 120 121 /** 122 * ice_repr_stop - Disable port representor's network interface 123 * @netdev: network interface device structure 124 * 125 * The stop entry point is called when a port representor's network 126 * interface is de-activated by the system. Corresponding 127 * VF is notified about link status change. 128 * 129 * Returns 0 on success 130 */ 131 static int ice_repr_stop(struct net_device *netdev) 132 { 133 struct ice_repr *repr = ice_netdev_to_repr(netdev); 134 struct ice_vf *vf; 135 136 vf = repr->vf; 137 vf->link_forced = true; 138 vf->link_up = false; 139 ice_vc_notify_vf_link_state(vf); 140 141 netif_carrier_off(netdev); 142 netif_tx_stop_all_queues(netdev); 143 144 return 0; 145 } 146 147 /** 148 * ice_repr_sp_stats64 - get slow path stats for port representor 149 * @dev: network interface device structure 150 * @stats: netlink stats structure 151 */ 152 static int 153 ice_repr_sp_stats64(const struct net_device *dev, 154 struct rtnl_link_stats64 *stats) 155 { 156 struct ice_repr *repr = ice_netdev_to_repr(dev); 157 int i; 158 159 for_each_possible_cpu(i) { 160 u64 tbytes, tpkts, tdrops, rbytes, rpkts; 161 struct ice_repr_pcpu_stats *repr_stats; 162 unsigned int start; 163 164 repr_stats = per_cpu_ptr(repr->stats, i); 165 do { 166 start = u64_stats_fetch_begin(&repr_stats->syncp); 167 tbytes = repr_stats->tx_bytes; 168 tpkts = repr_stats->tx_packets; 169 tdrops = repr_stats->tx_drops; 170 rbytes = repr_stats->rx_bytes; 171 rpkts = repr_stats->rx_packets; 172 } while (u64_stats_fetch_retry(&repr_stats->syncp, start)); 173 174 stats->tx_bytes += tbytes; 175 stats->tx_packets += tpkts; 176 stats->tx_dropped += tdrops; 177 stats->rx_bytes += rbytes; 178 stats->rx_packets += rpkts; 179 } 180 return 0; 181 } 182 183 static bool 184 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id) 185 { 186 return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT; 187 } 188 189 static int 190 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev, 191 void *sp) 192 { 193 if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT) 194 return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp); 195 196 return -EINVAL; 197 } 198 199 static int 200 ice_repr_setup_tc_cls_flower(struct ice_repr *repr, 201 struct flow_cls_offload *flower) 202 { 203 switch (flower->command) { 204 case FLOW_CLS_REPLACE: 205 return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower); 206 case FLOW_CLS_DESTROY: 207 return ice_del_cls_flower(repr->src_vsi, flower); 208 default: 209 return -EINVAL; 210 } 211 } 212 213 static int 214 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 215 void *cb_priv) 216 { 217 struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data; 218 struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv; 219 220 switch (type) { 221 case TC_SETUP_CLSFLOWER: 222 return ice_repr_setup_tc_cls_flower(np->repr, flower); 223 default: 224 return -EOPNOTSUPP; 225 } 226 } 227 228 static LIST_HEAD(ice_repr_block_cb_list); 229 230 static int 231 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, 232 void *type_data) 233 { 234 struct ice_netdev_priv *np = netdev_priv(netdev); 235 236 switch (type) { 237 case TC_SETUP_BLOCK: 238 return flow_block_cb_setup_simple((struct flow_block_offload *) 239 type_data, 240 &ice_repr_block_cb_list, 241 ice_repr_setup_tc_block_cb, 242 np, np, true); 243 default: 244 return -EOPNOTSUPP; 245 } 246 } 247 248 static const struct net_device_ops ice_repr_netdev_ops = { 249 .ndo_get_stats64 = ice_repr_get_stats64, 250 .ndo_open = ice_repr_open, 251 .ndo_stop = ice_repr_stop, 252 .ndo_start_xmit = ice_eswitch_port_start_xmit, 253 .ndo_setup_tc = ice_repr_setup_tc, 254 .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, 255 .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats, 256 }; 257 258 /** 259 * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev 260 * @netdev: pointer to netdev 261 */ 262 bool ice_is_port_repr_netdev(const struct net_device *netdev) 263 { 264 return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); 265 } 266 267 /** 268 * ice_repr_reg_netdev - register port representor netdev 269 * @netdev: pointer to port representor netdev 270 */ 271 static int 272 ice_repr_reg_netdev(struct net_device *netdev) 273 { 274 eth_hw_addr_random(netdev); 275 netdev->netdev_ops = &ice_repr_netdev_ops; 276 ice_set_ethtool_repr_ops(netdev); 277 278 netdev->hw_features |= NETIF_F_HW_TC; 279 280 netif_carrier_off(netdev); 281 netif_tx_stop_all_queues(netdev); 282 283 return register_netdev(netdev); 284 } 285 286 static void ice_repr_remove_node(struct devlink_port *devlink_port) 287 { 288 devl_lock(devlink_port->devlink); 289 devl_rate_leaf_destroy(devlink_port); 290 devl_unlock(devlink_port->devlink); 291 } 292 293 /** 294 * ice_repr_rem - remove representor from VF 295 * @repr: pointer to representor structure 296 */ 297 static void ice_repr_rem(struct ice_repr *repr) 298 { 299 free_percpu(repr->stats); 300 free_netdev(repr->netdev); 301 kfree(repr); 302 } 303 304 /** 305 * ice_repr_rem_vf - remove representor from VF 306 * @repr: pointer to representor structure 307 */ 308 void ice_repr_rem_vf(struct ice_repr *repr) 309 { 310 ice_repr_remove_node(&repr->vf->devlink_port); 311 unregister_netdev(repr->netdev); 312 ice_devlink_destroy_vf_port(repr->vf); 313 ice_virtchnl_set_dflt_ops(repr->vf); 314 ice_repr_rem(repr); 315 } 316 317 static void ice_repr_set_tx_topology(struct ice_pf *pf) 318 { 319 struct devlink *devlink; 320 321 /* only export if ADQ and DCB disabled and eswitch enabled*/ 322 if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) || 323 !ice_is_switchdev_running(pf)) 324 return; 325 326 devlink = priv_to_devlink(pf); 327 ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); 328 } 329 330 /** 331 * ice_repr_add - add representor for generic VSI 332 * @pf: pointer to PF structure 333 * @src_vsi: pointer to VSI structure of device to represent 334 * @parent_mac: device MAC address 335 */ 336 static struct ice_repr * 337 ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) 338 { 339 struct ice_netdev_priv *np; 340 struct ice_repr *repr; 341 int err; 342 343 repr = kzalloc(sizeof(*repr), GFP_KERNEL); 344 if (!repr) 345 return ERR_PTR(-ENOMEM); 346 347 repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); 348 if (!repr->netdev) { 349 err = -ENOMEM; 350 goto err_alloc; 351 } 352 353 repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats); 354 if (!repr->stats) { 355 err = -ENOMEM; 356 goto err_stats; 357 } 358 359 repr->src_vsi = src_vsi; 360 repr->id = src_vsi->vsi_num; 361 np = netdev_priv(repr->netdev); 362 np->repr = repr; 363 364 ether_addr_copy(repr->parent_mac, parent_mac); 365 366 return repr; 367 368 err_stats: 369 free_netdev(repr->netdev); 370 err_alloc: 371 kfree(repr); 372 return ERR_PTR(err); 373 } 374 375 struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) 376 { 377 struct ice_repr *repr; 378 struct ice_vsi *vsi; 379 int err; 380 381 vsi = ice_get_vf_vsi(vf); 382 if (!vsi) 383 return ERR_PTR(-ENOENT); 384 385 err = ice_devlink_create_vf_port(vf); 386 if (err) 387 return ERR_PTR(err); 388 389 repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr); 390 if (IS_ERR(repr)) { 391 err = PTR_ERR(repr); 392 goto err_repr_add; 393 } 394 395 repr->vf = vf; 396 397 repr->netdev->min_mtu = ETH_MIN_MTU; 398 repr->netdev->max_mtu = ICE_MAX_MTU; 399 400 SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf)); 401 SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port); 402 err = ice_repr_reg_netdev(repr->netdev); 403 if (err) 404 goto err_netdev; 405 406 ice_virtchnl_set_repr_ops(vf); 407 ice_repr_set_tx_topology(vf->pf); 408 409 return repr; 410 411 err_netdev: 412 ice_repr_rem(repr); 413 err_repr_add: 414 ice_devlink_destroy_vf_port(vf); 415 return ERR_PTR(err); 416 } 417 418 struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi) 419 { 420 if (!vsi->vf) 421 return NULL; 422 423 return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id); 424 } 425 426 /** 427 * ice_repr_start_tx_queues - start Tx queues of port representor 428 * @repr: pointer to repr structure 429 */ 430 void ice_repr_start_tx_queues(struct ice_repr *repr) 431 { 432 netif_carrier_on(repr->netdev); 433 netif_tx_start_all_queues(repr->netdev); 434 } 435 436 /** 437 * ice_repr_stop_tx_queues - stop Tx queues of port representor 438 * @repr: pointer to repr structure 439 */ 440 void ice_repr_stop_tx_queues(struct ice_repr *repr) 441 { 442 netif_carrier_off(repr->netdev); 443 netif_tx_stop_all_queues(repr->netdev); 444 } 445