1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. 4 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 5 */ 6 7 #ifndef _ENIC_H_ 8 #define _ENIC_H_ 9 10 #include "vnic_enet.h" 11 #include "vnic_dev.h" 12 #include "vnic_wq.h" 13 #include "vnic_rq.h" 14 #include "vnic_cq.h" 15 #include "vnic_intr.h" 16 #include "vnic_stats.h" 17 #include "vnic_nic.h" 18 #include "vnic_rss.h" 19 #include <linux/irq.h> 20 21 #define DRV_NAME "enic" 22 #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 23 24 #define ENIC_BARS_MAX 6 25 26 #define ENIC_WQ_MAX 256 27 #define ENIC_RQ_MAX 256 28 29 #define ENIC_WQ_NAPI_BUDGET 256 30 31 #define ENIC_AIC_LARGE_PKT_DIFF 3 32 33 struct enic_msix_entry { 34 int requested; 35 char devname[IFNAMSIZ + 8]; 36 irqreturn_t (*isr)(int, void *); 37 void *devid; 38 cpumask_var_t affinity_mask; 39 }; 40 41 /* Store only the lower range. Higher range is given by fw. */ 42 struct enic_intr_mod_range { 43 u32 small_pkt_range_start; 44 u32 large_pkt_range_start; 45 }; 46 47 struct enic_intr_mod_table { 48 u32 rx_rate; 49 u32 range_percent; 50 }; 51 52 #define ENIC_MAX_LINK_SPEEDS 3 53 #define ENIC_LINK_SPEED_10G 10000 54 #define ENIC_LINK_SPEED_4G 4000 55 #define ENIC_LINK_40G_INDEX 2 56 #define ENIC_LINK_10G_INDEX 1 57 #define ENIC_LINK_4G_INDEX 0 58 #define ENIC_RX_COALESCE_RANGE_END 125 59 #define ENIC_AIC_TS_BREAK 100 60 61 struct enic_rx_coal { 62 u32 small_pkt_range_start; 63 u32 large_pkt_range_start; 64 u32 range_end; 65 u32 use_adaptive_rx_coalesce; 66 }; 67 68 /* priv_flags */ 69 #define ENIC_SRIOV_ENABLED (1 << 0) 70 71 /* enic port profile set flags */ 72 #define ENIC_PORT_REQUEST_APPLIED (1 << 0) 73 #define ENIC_SET_REQUEST (1 << 1) 74 #define ENIC_SET_NAME (1 << 2) 75 #define ENIC_SET_INSTANCE (1 << 3) 76 #define ENIC_SET_HOST (1 << 4) 77 78 struct enic_port_profile { 79 u32 set; 80 u8 request; 81 char name[PORT_PROFILE_MAX]; 82 u8 instance_uuid[PORT_UUID_MAX]; 83 u8 host_uuid[PORT_UUID_MAX]; 84 u8 vf_mac[ETH_ALEN]; 85 u8 mac_addr[ETH_ALEN]; 86 }; 87 88 /* enic_rfs_fltr_node - rfs filter node in hash table 89 * @@keys: IPv4 5 tuple 90 * @flow_id: flow_id of clsf filter provided by kernel 91 * @fltr_id: filter id of clsf filter returned by adaptor 92 * @rq_id: desired rq index 93 * @node: hlist_node 94 */ 95 struct enic_rfs_fltr_node { 96 struct flow_keys keys; 97 u32 flow_id; 98 u16 fltr_id; 99 u16 rq_id; 100 struct hlist_node node; 101 }; 102 103 /* enic_rfs_flw_tbl - rfs flow table 104 * @max: Maximum number of filters vNIC supports 105 * @free: Number of free filters available 106 * @toclean: hash table index to clean next 107 * @ht_head: hash table list head 108 * @lock: spin lock 109 * @rfs_may_expire: timer function for enic_rps_may_expire_flow 110 */ 111 struct enic_rfs_flw_tbl { 112 u16 max; 113 int free; 114 115 #define ENIC_RFS_FLW_BITSHIFT (10) 116 #define ENIC_RFS_FLW_MASK ((1 << ENIC_RFS_FLW_BITSHIFT) - 1) 117 u16 toclean:ENIC_RFS_FLW_BITSHIFT; 118 struct hlist_head ht_head[1 << ENIC_RFS_FLW_BITSHIFT]; 119 spinlock_t lock; 120 struct timer_list rfs_may_expire; 121 }; 122 123 struct vxlan_offload { 124 u16 vxlan_udp_port_number; 125 u8 patch_level; 126 u8 flags; 127 }; 128 129 struct enic_wq_stats { 130 u64 packets; /* pkts queued for Tx */ 131 u64 stopped; /* Tx ring almost full, queue stopped */ 132 u64 wake; /* Tx ring no longer full, queue woken up*/ 133 u64 tso; /* non-encap tso pkt */ 134 u64 encap_tso; /* encap tso pkt */ 135 u64 encap_csum; /* encap HW csum */ 136 u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */ 137 u64 csum_none; /* HW csum not required */ 138 u64 bytes; /* bytes queued for Tx */ 139 u64 add_vlan; /* HW adds vlan tag */ 140 u64 cq_work; /* Tx completions processed */ 141 u64 cq_bytes; /* Tx bytes processed */ 142 u64 null_pkt; /* skb length <= 0 */ 143 u64 skb_linear_fail; /* linearize failures */ 144 u64 desc_full_awake; /* TX ring full while queue awake */ 145 }; 146 147 struct enic_rq_stats { 148 u64 packets; /* pkts received */ 149 u64 bytes; /* bytes received */ 150 u64 l4_rss_hash; /* hashed on l4 */ 151 u64 l3_rss_hash; /* hashed on l3 */ 152 u64 csum_unnecessary; /* HW verified csum */ 153 u64 csum_unnecessary_encap; /* HW verified csum on encap packet */ 154 u64 vlan_stripped; /* HW stripped vlan */ 155 u64 napi_complete; /* napi complete intr reenabled */ 156 u64 napi_repoll; /* napi poll again */ 157 u64 bad_fcs; /* bad pkts */ 158 u64 pkt_truncated; /* truncated pkts */ 159 u64 no_skb; /* out of skbs */ 160 u64 desc_skip; /* Rx pkt went into later buffer */ 161 }; 162 163 struct enic_wq { 164 spinlock_t lock; /* spinlock for wq */ 165 struct vnic_wq vwq; 166 struct enic_wq_stats stats; 167 } ____cacheline_aligned; 168 169 struct enic_rq { 170 struct vnic_rq vrq; 171 struct enic_rq_stats stats; 172 } ____cacheline_aligned; 173 174 /* Per-instance private data structure */ 175 struct enic { 176 struct net_device *netdev; 177 struct pci_dev *pdev; 178 struct vnic_enet_config config; 179 struct vnic_dev_bar bar[ENIC_BARS_MAX]; 180 struct vnic_dev *vdev; 181 struct timer_list notify_timer; 182 struct work_struct reset; 183 struct work_struct tx_hang_reset; 184 struct work_struct change_mtu_work; 185 struct msix_entry *msix_entry; 186 struct enic_msix_entry *msix; 187 u32 msg_enable; 188 spinlock_t devcmd_lock; 189 u8 mac_addr[ETH_ALEN]; 190 unsigned int flags; 191 unsigned int priv_flags; 192 unsigned int mc_count; 193 unsigned int uc_count; 194 u32 port_mtu; 195 struct enic_rx_coal rx_coalesce_setting; 196 u32 rx_coalesce_usecs; 197 u32 tx_coalesce_usecs; 198 #ifdef CONFIG_PCI_IOV 199 u16 num_vfs; 200 #endif 201 spinlock_t enic_api_lock; 202 bool enic_api_busy; 203 struct enic_port_profile *pp; 204 205 struct enic_wq *wq; 206 unsigned int wq_avail; 207 unsigned int wq_count; 208 u16 loop_enable; 209 u16 loop_tag; 210 211 struct enic_rq *rq; 212 unsigned int rq_avail; 213 unsigned int rq_count; 214 struct vxlan_offload vxlan; 215 struct napi_struct *napi; 216 217 struct vnic_intr *intr; 218 unsigned int intr_avail; 219 unsigned int intr_count; 220 u32 __iomem *legacy_pba; /* memory-mapped */ 221 222 struct vnic_cq *cq; 223 unsigned int cq_avail; 224 unsigned int cq_count; 225 struct enic_rfs_flw_tbl rfs_h; 226 u32 rx_copybreak; 227 u8 rss_key[ENIC_RSS_LEN]; 228 struct vnic_gen_stats gen_stats; 229 }; 230 231 static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev) 232 { 233 struct enic *enic = vdev->priv; 234 235 return enic->netdev; 236 } 237 238 /* wrappers function for kernel log 239 */ 240 #define vdev_err(vdev, fmt, ...) \ 241 dev_err(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) 242 #define vdev_warn(vdev, fmt, ...) \ 243 dev_warn(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) 244 #define vdev_info(vdev, fmt, ...) \ 245 dev_info(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) 246 247 #define vdev_neterr(vdev, fmt, ...) \ 248 netdev_err(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) 249 #define vdev_netwarn(vdev, fmt, ...) \ 250 netdev_warn(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) 251 #define vdev_netinfo(vdev, fmt, ...) \ 252 netdev_info(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) 253 254 static inline struct device *enic_get_dev(struct enic *enic) 255 { 256 return &(enic->pdev->dev); 257 } 258 259 static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) 260 { 261 return rq; 262 } 263 264 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) 265 { 266 return enic->rq_count + wq; 267 } 268 269 static inline unsigned int enic_msix_rq_intr(struct enic *enic, 270 unsigned int rq) 271 { 272 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; 273 } 274 275 static inline unsigned int enic_msix_wq_intr(struct enic *enic, 276 unsigned int wq) 277 { 278 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; 279 } 280 281 /* MSIX interrupts are organized as the error interrupt, then the notify 282 * interrupt followed by all the I/O interrupts. The error interrupt needs 283 * to fit in 7 bits due to hardware constraints 284 */ 285 #define ENIC_MSIX_RESERVED_INTR 2 286 #define ENIC_MSIX_ERR_INTR 0 287 #define ENIC_MSIX_NOTIFY_INTR 1 288 #define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR 289 #define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2) 290 291 #define ENIC_LEGACY_IO_INTR 0 292 #define ENIC_LEGACY_ERR_INTR 1 293 #define ENIC_LEGACY_NOTIFY_INTR 2 294 295 static inline unsigned int enic_msix_err_intr(struct enic *enic) 296 { 297 return ENIC_MSIX_ERR_INTR; 298 } 299 300 static inline unsigned int enic_msix_notify_intr(struct enic *enic) 301 { 302 return ENIC_MSIX_NOTIFY_INTR; 303 } 304 305 static inline bool enic_is_err_intr(struct enic *enic, int intr) 306 { 307 switch (vnic_dev_get_intr_mode(enic->vdev)) { 308 case VNIC_DEV_INTR_MODE_INTX: 309 return intr == ENIC_LEGACY_ERR_INTR; 310 case VNIC_DEV_INTR_MODE_MSIX: 311 return intr == enic_msix_err_intr(enic); 312 case VNIC_DEV_INTR_MODE_MSI: 313 default: 314 return false; 315 } 316 } 317 318 static inline bool enic_is_notify_intr(struct enic *enic, int intr) 319 { 320 switch (vnic_dev_get_intr_mode(enic->vdev)) { 321 case VNIC_DEV_INTR_MODE_INTX: 322 return intr == ENIC_LEGACY_NOTIFY_INTR; 323 case VNIC_DEV_INTR_MODE_MSIX: 324 return intr == enic_msix_notify_intr(enic); 325 case VNIC_DEV_INTR_MODE_MSI: 326 default: 327 return false; 328 } 329 } 330 331 static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr) 332 { 333 if (unlikely(dma_mapping_error(&enic->pdev->dev, dma_addr))) { 334 net_warn_ratelimited("%s: PCI dma mapping failed!\n", 335 enic->netdev->name); 336 enic->gen_stats.dma_map_error++; 337 338 return -ENOMEM; 339 } 340 341 return 0; 342 } 343 344 void enic_reset_addr_lists(struct enic *enic); 345 int enic_sriov_enabled(struct enic *enic); 346 int enic_is_valid_vf(struct enic *enic, int vf); 347 int enic_is_dynamic(struct enic *enic); 348 void enic_set_ethtool_ops(struct net_device *netdev); 349 int __enic_set_rsskey(struct enic *enic); 350 351 #endif /* _ENIC_H_ */ 352