1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. 4 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 5 */ 6 7 #ifndef _ENIC_H_ 8 #define _ENIC_H_ 9 10 #include "vnic_enet.h" 11 #include "vnic_dev.h" 12 #include "vnic_wq.h" 13 #include "vnic_rq.h" 14 #include "vnic_cq.h" 15 #include "vnic_intr.h" 16 #include "vnic_stats.h" 17 #include "vnic_nic.h" 18 #include "vnic_rss.h" 19 #include <linux/irq.h> 20 #include <net/page_pool/helpers.h> 21 22 #define DRV_NAME "enic" 23 #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 24 25 #define ENIC_BARS_MAX 6 26 27 #define ENIC_WQ_MAX 256 28 #define ENIC_RQ_MAX 256 29 30 #define ENIC_WQ_NAPI_BUDGET 256 31 32 #define ENIC_AIC_LARGE_PKT_DIFF 3 33 34 struct enic_msix_entry { 35 int requested; 36 char devname[IFNAMSIZ + 8]; 37 irqreturn_t (*isr)(int, void *); 38 void *devid; 39 cpumask_var_t affinity_mask; 40 }; 41 42 /* Store only the lower range. Higher range is given by fw. */ 43 struct enic_intr_mod_range { 44 u32 small_pkt_range_start; 45 u32 large_pkt_range_start; 46 }; 47 48 struct enic_intr_mod_table { 49 u32 rx_rate; 50 u32 range_percent; 51 }; 52 53 #define ENIC_MAX_LINK_SPEEDS 3 54 #define ENIC_LINK_SPEED_10G 10000 55 #define ENIC_LINK_SPEED_4G 4000 56 #define ENIC_LINK_40G_INDEX 2 57 #define ENIC_LINK_10G_INDEX 1 58 #define ENIC_LINK_4G_INDEX 0 59 #define ENIC_RX_COALESCE_RANGE_END 125 60 #define ENIC_AIC_TS_BREAK 100 61 62 struct enic_rx_coal { 63 u32 small_pkt_range_start; 64 u32 large_pkt_range_start; 65 u32 range_end; 66 u32 use_adaptive_rx_coalesce; 67 }; 68 69 /* priv_flags */ 70 #define ENIC_SRIOV_ENABLED (1 << 0) 71 72 /* enic port profile set flags */ 73 #define ENIC_PORT_REQUEST_APPLIED (1 << 0) 74 #define ENIC_SET_REQUEST (1 << 1) 75 #define ENIC_SET_NAME (1 << 2) 76 #define ENIC_SET_INSTANCE (1 << 3) 77 #define ENIC_SET_HOST (1 << 4) 78 79 struct enic_port_profile { 80 u32 set; 81 u8 request; 82 char name[PORT_PROFILE_MAX]; 83 u8 instance_uuid[PORT_UUID_MAX]; 84 u8 host_uuid[PORT_UUID_MAX]; 85 u8 vf_mac[ETH_ALEN]; 86 u8 mac_addr[ETH_ALEN]; 87 }; 88 89 /* enic_rfs_fltr_node - rfs filter node in hash table 90 * @@keys: IPv4 5 tuple 91 * @flow_id: flow_id of clsf filter provided by kernel 92 * @fltr_id: filter id of clsf filter returned by adaptor 93 * @rq_id: desired rq index 94 * @node: hlist_node 95 */ 96 struct enic_rfs_fltr_node { 97 struct flow_keys keys; 98 u32 flow_id; 99 u16 fltr_id; 100 u16 rq_id; 101 struct hlist_node node; 102 }; 103 104 /* enic_rfs_flw_tbl - rfs flow table 105 * @max: Maximum number of filters vNIC supports 106 * @free: Number of free filters available 107 * @toclean: hash table index to clean next 108 * @ht_head: hash table list head 109 * @lock: spin lock 110 * @rfs_may_expire: timer function for enic_rps_may_expire_flow 111 */ 112 struct enic_rfs_flw_tbl { 113 u16 max; 114 int free; 115 116 #define ENIC_RFS_FLW_BITSHIFT (10) 117 #define ENIC_RFS_FLW_MASK ((1 << ENIC_RFS_FLW_BITSHIFT) - 1) 118 u16 toclean:ENIC_RFS_FLW_BITSHIFT; 119 struct hlist_head ht_head[1 << ENIC_RFS_FLW_BITSHIFT]; 120 spinlock_t lock; 121 struct timer_list rfs_may_expire; 122 }; 123 124 struct vxlan_offload { 125 u16 vxlan_udp_port_number; 126 u8 patch_level; 127 u8 flags; 128 }; 129 130 struct enic_wq_stats { 131 u64 packets; /* pkts queued for Tx */ 132 u64 stopped; /* Tx ring almost full, queue stopped */ 133 u64 wake; /* Tx ring no longer full, queue woken up*/ 134 u64 tso; /* non-encap tso pkt */ 135 u64 encap_tso; /* encap tso pkt */ 136 u64 encap_csum; /* encap HW csum */ 137 u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */ 138 u64 csum_none; /* HW csum not required */ 139 u64 bytes; /* bytes queued for Tx */ 140 u64 add_vlan; /* HW adds vlan tag */ 141 u64 cq_work; /* Tx completions processed */ 142 u64 cq_bytes; /* Tx bytes processed */ 143 u64 null_pkt; /* skb length <= 0 */ 144 u64 skb_linear_fail; /* linearize failures */ 145 u64 desc_full_awake; /* TX ring full while queue awake */ 146 }; 147 148 struct enic_rq_stats { 149 u64 packets; /* pkts received */ 150 u64 bytes; /* bytes received */ 151 u64 l4_rss_hash; /* hashed on l4 */ 152 u64 l3_rss_hash; /* hashed on l3 */ 153 u64 csum_unnecessary; /* HW verified csum */ 154 u64 csum_unnecessary_encap; /* HW verified csum on encap packet */ 155 u64 vlan_stripped; /* HW stripped vlan */ 156 u64 napi_complete; /* napi complete intr reenabled */ 157 u64 napi_repoll; /* napi poll again */ 158 u64 bad_fcs; /* bad pkts */ 159 u64 pkt_truncated; /* truncated pkts */ 160 u64 no_skb; /* out of skbs */ 161 u64 desc_skip; /* Rx pkt went into later buffer */ 162 u64 pp_alloc_fail; /* page pool alloc failure */ 163 }; 164 165 struct enic_wq { 166 spinlock_t lock; /* spinlock for wq */ 167 struct vnic_wq vwq; 168 struct enic_wq_stats stats; 169 } ____cacheline_aligned; 170 171 struct enic_rq { 172 struct vnic_rq vrq; 173 struct enic_rq_stats stats; 174 struct page_pool *pool; 175 } ____cacheline_aligned; 176 177 /* Per-instance private data structure */ 178 struct enic { 179 struct net_device *netdev; 180 struct pci_dev *pdev; 181 struct vnic_enet_config config; 182 struct vnic_dev_bar bar[ENIC_BARS_MAX]; 183 struct vnic_dev *vdev; 184 struct timer_list notify_timer; 185 struct work_struct reset; 186 struct work_struct tx_hang_reset; 187 struct work_struct change_mtu_work; 188 struct msix_entry *msix_entry; 189 struct enic_msix_entry *msix; 190 u32 msg_enable; 191 spinlock_t devcmd_lock; 192 u8 mac_addr[ETH_ALEN]; 193 unsigned int flags; 194 unsigned int priv_flags; 195 unsigned int mc_count; 196 unsigned int uc_count; 197 u32 port_mtu; 198 struct enic_rx_coal rx_coalesce_setting; 199 u32 rx_coalesce_usecs; 200 u32 tx_coalesce_usecs; 201 #ifdef CONFIG_PCI_IOV 202 u16 num_vfs; 203 #endif 204 spinlock_t enic_api_lock; 205 bool enic_api_busy; 206 struct enic_port_profile *pp; 207 208 struct enic_wq *wq; 209 unsigned int wq_avail; 210 unsigned int wq_count; 211 u16 loop_enable; 212 u16 loop_tag; 213 214 struct enic_rq *rq; 215 unsigned int rq_avail; 216 unsigned int rq_count; 217 struct vxlan_offload vxlan; 218 struct napi_struct *napi; 219 220 struct vnic_intr *intr; 221 unsigned int intr_avail; 222 unsigned int intr_count; 223 u32 __iomem *legacy_pba; /* memory-mapped */ 224 225 struct vnic_cq *cq; 226 unsigned int cq_avail; 227 unsigned int cq_count; 228 struct enic_rfs_flw_tbl rfs_h; 229 u8 rss_key[ENIC_RSS_LEN]; 230 struct vnic_gen_stats gen_stats; 231 }; 232 233 static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev) 234 { 235 struct enic *enic = vdev->priv; 236 237 return enic->netdev; 238 } 239 240 /* wrappers function for kernel log 241 */ 242 #define vdev_err(vdev, fmt, ...) \ 243 dev_err(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) 244 #define vdev_warn(vdev, fmt, ...) \ 245 dev_warn(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) 246 #define vdev_info(vdev, fmt, ...) \ 247 dev_info(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) 248 249 #define vdev_neterr(vdev, fmt, ...) \ 250 netdev_err(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) 251 #define vdev_netwarn(vdev, fmt, ...) \ 252 netdev_warn(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) 253 #define vdev_netinfo(vdev, fmt, ...) \ 254 netdev_info(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) 255 256 static inline struct device *enic_get_dev(struct enic *enic) 257 { 258 return &(enic->pdev->dev); 259 } 260 261 static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) 262 { 263 return rq; 264 } 265 266 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) 267 { 268 return enic->rq_count + wq; 269 } 270 271 static inline unsigned int enic_msix_rq_intr(struct enic *enic, 272 unsigned int rq) 273 { 274 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; 275 } 276 277 static inline unsigned int enic_msix_wq_intr(struct enic *enic, 278 unsigned int wq) 279 { 280 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; 281 } 282 283 /* MSIX interrupts are organized as the error interrupt, then the notify 284 * interrupt followed by all the I/O interrupts. The error interrupt needs 285 * to fit in 7 bits due to hardware constraints 286 */ 287 #define ENIC_MSIX_RESERVED_INTR 2 288 #define ENIC_MSIX_ERR_INTR 0 289 #define ENIC_MSIX_NOTIFY_INTR 1 290 #define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR 291 #define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2) 292 293 #define ENIC_LEGACY_IO_INTR 0 294 #define ENIC_LEGACY_ERR_INTR 1 295 #define ENIC_LEGACY_NOTIFY_INTR 2 296 297 static inline unsigned int enic_msix_err_intr(struct enic *enic) 298 { 299 return ENIC_MSIX_ERR_INTR; 300 } 301 302 static inline unsigned int enic_msix_notify_intr(struct enic *enic) 303 { 304 return ENIC_MSIX_NOTIFY_INTR; 305 } 306 307 static inline bool enic_is_err_intr(struct enic *enic, int intr) 308 { 309 switch (vnic_dev_get_intr_mode(enic->vdev)) { 310 case VNIC_DEV_INTR_MODE_INTX: 311 return intr == ENIC_LEGACY_ERR_INTR; 312 case VNIC_DEV_INTR_MODE_MSIX: 313 return intr == enic_msix_err_intr(enic); 314 case VNIC_DEV_INTR_MODE_MSI: 315 default: 316 return false; 317 } 318 } 319 320 static inline bool enic_is_notify_intr(struct enic *enic, int intr) 321 { 322 switch (vnic_dev_get_intr_mode(enic->vdev)) { 323 case VNIC_DEV_INTR_MODE_INTX: 324 return intr == ENIC_LEGACY_NOTIFY_INTR; 325 case VNIC_DEV_INTR_MODE_MSIX: 326 return intr == enic_msix_notify_intr(enic); 327 case VNIC_DEV_INTR_MODE_MSI: 328 default: 329 return false; 330 } 331 } 332 333 static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr) 334 { 335 if (unlikely(dma_mapping_error(&enic->pdev->dev, dma_addr))) { 336 net_warn_ratelimited("%s: PCI dma mapping failed!\n", 337 enic->netdev->name); 338 enic->gen_stats.dma_map_error++; 339 340 return -ENOMEM; 341 } 342 343 return 0; 344 } 345 346 void enic_reset_addr_lists(struct enic *enic); 347 int enic_sriov_enabled(struct enic *enic); 348 int enic_is_valid_vf(struct enic *enic, int vf); 349 int enic_is_dynamic(struct enic *enic); 350 void enic_set_ethtool_ops(struct net_device *netdev); 351 int __enic_set_rsskey(struct enic *enic); 352 353 #endif /* _ENIC_H_ */ 354