if_em.c (67bc8c8b9e69bc53221a9bd914e418d81d6cdc7d) if_em.c (f2d6ace4a684fcb98293983758c73d703338a78b)
1/******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35#include "opt_em.h"
36#include "opt_ddb.h"
37#include "opt_inet.h"
38#include "opt_inet6.h"
39
40#ifdef HAVE_KERNEL_OPTION_HEADERS
41#include "opt_device_polling.h"
42#endif
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#ifdef DDB
47#include <sys/types.h>
48#include <ddb/ddb.h>
49#endif
50#if __FreeBSD_version >= 800000
51#include <sys/buf_ring.h>
52#endif
53#include <sys/bus.h>
54#include <sys/endian.h>
55#include <sys/kernel.h>
56#include <sys/kthread.h>
57#include <sys/malloc.h>
58#include <sys/mbuf.h>
59#include <sys/module.h>
60#include <sys/rman.h>
61#include <sys/smp.h>
62#include <sys/socket.h>
63#include <sys/sockio.h>
64#include <sys/sysctl.h>
65#include <sys/taskqueue.h>
66#include <sys/eventhandler.h>
67#include <machine/bus.h>
68#include <machine/resource.h>
69
70#include <net/bpf.h>
71#include <net/ethernet.h>
72#include <net/if.h>
73#include <net/if_var.h>
74#include <net/if_arp.h>
75#include <net/if_dl.h>
76#include <net/if_media.h>
77
78#include <net/if_types.h>
79#include <net/if_vlan_var.h>
80
81#include <netinet/in_systm.h>
82#include <netinet/in.h>
83#include <netinet/if_ether.h>
84#include <netinet/ip.h>
85#include <netinet/ip6.h>
86#include <netinet/tcp.h>
87#include <netinet/udp.h>
88
89#include <machine/in_cksum.h>
90#include <dev/led/led.h>
91#include <dev/pci/pcivar.h>
92#include <dev/pci/pcireg.h>
93
94#include "e1000_api.h"
95#include "e1000_82571.h"
1/* $FreeBSD$ */
96#include "if_em.h"
2#include "if_em.h"
3#include <sys/sbuf.h>
4#include <machine/_inttypes.h>
97
5
6#define em_mac_min e1000_82547
7#define igb_mac_min e1000_82575
8
98/*********************************************************************
99 * Driver version:
100 *********************************************************************/
101char em_driver_version[] = "7.6.1-k";
102
103/*********************************************************************
104 * PCI Device ID Table
105 *
106 * Used by probe to select devices to load on
107 * Last field stores an index into e1000_strings
108 * Last entry must be all 0s
109 *
110 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
111 *********************************************************************/
112
9/*********************************************************************
10 * Driver version:
11 *********************************************************************/
12char em_driver_version[] = "7.6.1-k";
13
14/*********************************************************************
15 * PCI Device ID Table
16 *
17 * Used by probe to select devices to load on
18 * Last field stores an index into e1000_strings
19 * Last entry must be all 0s
20 *
21 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
22 *********************************************************************/
23
113static em_vendor_info_t em_vendor_info_array[] =
24static pci_vendor_info_t em_vendor_info_array[] =
114{
25{
115 /* Intel(R) PRO/1000 Network Connection */
116 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
120 PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
122 PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
124 PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
126 PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
128 PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
130 PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0},
26 /* Intel(R) PRO/1000 Network Connection - Legacy em*/
27 PVID(0x8086, E1000_DEV_ID_82540EM, "Intel(R) PRO/1000 Network Connection"),
28 PVID(0x8086, E1000_DEV_ID_82540EM_LOM, "Intel(R) PRO/1000 Network Connection"),
29 PVID(0x8086, E1000_DEV_ID_82540EP, "Intel(R) PRO/1000 Network Connection"),
30 PVID(0x8086, E1000_DEV_ID_82540EP_LOM, "Intel(R) PRO/1000 Network Connection"),
31 PVID(0x8086, E1000_DEV_ID_82540EP_LP, "Intel(R) PRO/1000 Network Connection"),
135
32
136 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82583V, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
141 PCI_ANY_ID, PCI_ANY_ID, 0},
142 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
143 PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
145 PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
147 PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
152 { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
153 { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
155 { 0x8086, E1000_DEV_ID_ICH8_82567V_3, PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
157 { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
158 { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
159 { 0x8086, E1000_DEV_ID_ICH9_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
160 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_V, PCI_ANY_ID, PCI_ANY_ID, 0},
161 { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
162 { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
163 { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
164 { 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0},
165 { 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0},
166 { 0x8086, E1000_DEV_ID_82574LA, PCI_ANY_ID, PCI_ANY_ID, 0},
167 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
168 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
169 { 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
170 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
171 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
172 { 0x8086, E1000_DEV_ID_ICH10_D_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
173 { 0x8086, E1000_DEV_ID_PCH_M_HV_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
174 { 0x8086, E1000_DEV_ID_PCH_M_HV_LC, PCI_ANY_ID, PCI_ANY_ID, 0},
175 { 0x8086, E1000_DEV_ID_PCH_D_HV_DM, PCI_ANY_ID, PCI_ANY_ID, 0},
176 { 0x8086, E1000_DEV_ID_PCH_D_HV_DC, PCI_ANY_ID, PCI_ANY_ID, 0},
177 { 0x8086, E1000_DEV_ID_PCH2_LV_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
178 { 0x8086, E1000_DEV_ID_PCH2_LV_V, PCI_ANY_ID, PCI_ANY_ID, 0},
179 { 0x8086, E1000_DEV_ID_PCH_LPT_I217_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
180 { 0x8086, E1000_DEV_ID_PCH_LPT_I217_V, PCI_ANY_ID, PCI_ANY_ID, 0},
181 { 0x8086, E1000_DEV_ID_PCH_LPTLP_I218_LM,
182 PCI_ANY_ID, PCI_ANY_ID, 0},
183 { 0x8086, E1000_DEV_ID_PCH_LPTLP_I218_V,
184 PCI_ANY_ID, PCI_ANY_ID, 0},
185 { 0x8086, E1000_DEV_ID_PCH_I218_LM2, PCI_ANY_ID, PCI_ANY_ID, 0},
186 { 0x8086, E1000_DEV_ID_PCH_I218_V2, PCI_ANY_ID, PCI_ANY_ID, 0},
187 { 0x8086, E1000_DEV_ID_PCH_I218_LM3, PCI_ANY_ID, PCI_ANY_ID, 0},
188 { 0x8086, E1000_DEV_ID_PCH_I218_V3, PCI_ANY_ID, PCI_ANY_ID, 0},
189 { 0x8086, E1000_DEV_ID_PCH_SPT_I219_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
190 { 0x8086, E1000_DEV_ID_PCH_SPT_I219_V, PCI_ANY_ID, PCI_ANY_ID, 0},
191 { 0x8086, E1000_DEV_ID_PCH_SPT_I219_LM2,
192 PCI_ANY_ID, PCI_ANY_ID, 0},
193 { 0x8086, E1000_DEV_ID_PCH_SPT_I219_V2, PCI_ANY_ID, PCI_ANY_ID, 0},
194 { 0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3,
195 PCI_ANY_ID, PCI_ANY_ID, 0},
196 { 0x8086, E1000_DEV_ID_PCH_SPT_I219_LM4,
197 PCI_ANY_ID, PCI_ANY_ID, 0},
198 { 0x8086, E1000_DEV_ID_PCH_SPT_I219_V4, PCI_ANY_ID, PCI_ANY_ID, 0},
199 { 0x8086, E1000_DEV_ID_PCH_SPT_I219_LM5,
200 PCI_ANY_ID, PCI_ANY_ID, 0},
201 { 0x8086, E1000_DEV_ID_PCH_SPT_I219_V5, PCI_ANY_ID, PCI_ANY_ID, 0},
33 PVID(0x8086, E1000_DEV_ID_82541EI, "Intel(R) PRO/1000 Network Connection"),
34 PVID(0x8086, E1000_DEV_ID_82541ER, "Intel(R) PRO/1000 Network Connection"),
35 PVID(0x8086, E1000_DEV_ID_82541ER_LOM, "Intel(R) PRO/1000 Network Connection"),
36 PVID(0x8086, E1000_DEV_ID_82541EI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
37 PVID(0x8086, E1000_DEV_ID_82541GI, "Intel(R) PRO/1000 Network Connection"),
38 PVID(0x8086, E1000_DEV_ID_82541GI_LF, "Intel(R) PRO/1000 Network Connection"),
39 PVID(0x8086, E1000_DEV_ID_82541GI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
40
41 PVID(0x8086, E1000_DEV_ID_82542, "Intel(R) PRO/1000 Network Connection"),
42
43 PVID(0x8086, E1000_DEV_ID_82543GC_FIBER, "Intel(R) PRO/1000 Network Connection"),
44 PVID(0x8086, E1000_DEV_ID_82543GC_COPPER, "Intel(R) PRO/1000 Network Connection"),
45
46 PVID(0x8086, E1000_DEV_ID_82544EI_COPPER, "Intel(R) PRO/1000 Network Connection"),
47 PVID(0x8086, E1000_DEV_ID_82544EI_FIBER, "Intel(R) PRO/1000 Network Connection"),
48 PVID(0x8086, E1000_DEV_ID_82544GC_COPPER, "Intel(R) PRO/1000 Network Connection"),
49 PVID(0x8086, E1000_DEV_ID_82544GC_LOM, "Intel(R) PRO/1000 Network Connection"),
50
51 PVID(0x8086, E1000_DEV_ID_82545EM_COPPER, "Intel(R) PRO/1000 Network Connection"),
52 PVID(0x8086, E1000_DEV_ID_82545EM_FIBER, "Intel(R) PRO/1000 Network Connection"),
53 PVID(0x8086, E1000_DEV_ID_82545GM_COPPER, "Intel(R) PRO/1000 Network Connection"),
54 PVID(0x8086, E1000_DEV_ID_82545GM_FIBER, "Intel(R) PRO/1000 Network Connection"),
55 PVID(0x8086, E1000_DEV_ID_82545GM_SERDES, "Intel(R) PRO/1000 Network Connection"),
56
57 PVID(0x8086, E1000_DEV_ID_82546EB_COPPER, "Intel(R) PRO/1000 Network Connection"),
58 PVID(0x8086, E1000_DEV_ID_82546EB_FIBER, "Intel(R) PRO/1000 Network Connection"),
59 PVID(0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
60 PVID(0x8086, E1000_DEV_ID_82546GB_COPPER, "Intel(R) PRO/1000 Network Connection"),
61 PVID(0x8086, E1000_DEV_ID_82546GB_FIBER, "Intel(R) PRO/1000 Network Connection"),
62 PVID(0x8086, E1000_DEV_ID_82546GB_SERDES, "Intel(R) PRO/1000 Network Connection"),
63 PVID(0x8086, E1000_DEV_ID_82546GB_PCIE, "Intel(R) PRO/1000 Network Connection"),
64 PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
65 PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, "Intel(R) PRO/1000 Network Connection"),
66
67 PVID(0x8086, E1000_DEV_ID_82547EI, "Intel(R) PRO/1000 Network Connection"),
68 PVID(0x8086, E1000_DEV_ID_82547EI_MOBILE, "Intel(R) PRO/1000 Network Connection"),
69 PVID(0x8086, E1000_DEV_ID_82547GI, "Intel(R) PRO/1000 Network Connection"),
70
71 /* Intel(R) PRO/1000 Network Connection - em */
72 PVID(0x8086, E1000_DEV_ID_82571EB_COPPER, "Intel(R) PRO/1000 Network Connection"),
73 PVID(0x8086, E1000_DEV_ID_82571EB_FIBER, "Intel(R) PRO/1000 Network Connection"),
74 PVID(0x8086, E1000_DEV_ID_82571EB_SERDES, "Intel(R) PRO/1000 Network Connection"),
75 PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL, "Intel(R) PRO/1000 Network Connection"),
76 PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD, "Intel(R) PRO/1000 Network Connection"),
77 PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
78 PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP, "Intel(R) PRO/1000 Network Connection"),
79 PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, "Intel(R) PRO/1000 Network Connection"),
80 PVID(0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"),
81 PVID(0x8086, E1000_DEV_ID_82572EI_COPPER, "Intel(R) PRO/1000 Network Connection"),
82 PVID(0x8086, E1000_DEV_ID_82572EI_FIBER, "Intel(R) PRO/1000 Network Connection"),
83 PVID(0x8086, E1000_DEV_ID_82572EI_SERDES, "Intel(R) PRO/1000 Network Connection"),
84 PVID(0x8086, E1000_DEV_ID_82573E, "Intel(R) PRO/1000 Network Connection"),
85 PVID(0x8086, E1000_DEV_ID_82573E_IAMT, "Intel(R) PRO/1000 Network Connection"),
86 PVID(0x8086, E1000_DEV_ID_82573L, "Intel(R) PRO/1000 Network Connection"),
87 PVID(0x8086, E1000_DEV_ID_82583V, "Intel(R) PRO/1000 Network Connection"),
88 PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, "Intel(R) PRO/1000 Network Connection"),
89 PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, "Intel(R) PRO/1000 Network Connection"),
90 PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, "Intel(R) PRO/1000 Network Connection"),
91 PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, "Intel(R) PRO/1000 Network Connection"),
92 PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"),
93 PVID(0x8086, E1000_DEV_ID_ICH8_IGP_AMT, "Intel(R) PRO/1000 Network Connection"),
94 PVID(0x8086, E1000_DEV_ID_ICH8_IGP_C, "Intel(R) PRO/1000 Network Connection"),
95 PVID(0x8086, E1000_DEV_ID_ICH8_IFE, "Intel(R) PRO/1000 Network Connection"),
96 PVID(0x8086, E1000_DEV_ID_ICH8_IFE_GT, "Intel(R) PRO/1000 Network Connection"),
97 PVID(0x8086, E1000_DEV_ID_ICH8_IFE_G, "Intel(R) PRO/1000 Network Connection"),
98 PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M, "Intel(R) PRO/1000 Network Connection"),
99 PVID(0x8086, E1000_DEV_ID_ICH8_82567V_3, "Intel(R) PRO/1000 Network Connection"),
100 PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"),
101 PVID(0x8086, E1000_DEV_ID_ICH9_IGP_AMT, "Intel(R) PRO/1000 Network Connection"),
102 PVID(0x8086, E1000_DEV_ID_ICH9_IGP_C, "Intel(R) PRO/1000 Network Connection"),
103 PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M, "Intel(R) PRO/1000 Network Connection"),
104 PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_V, "Intel(R) PRO/1000 Network Connection"),
105 PVID(0x8086, E1000_DEV_ID_ICH9_IFE, "Intel(R) PRO/1000 Network Connection"),
106 PVID(0x8086, E1000_DEV_ID_ICH9_IFE_GT, "Intel(R) PRO/1000 Network Connection"),
107 PVID(0x8086, E1000_DEV_ID_ICH9_IFE_G, "Intel(R) PRO/1000 Network Connection"),
108 PVID(0x8086, E1000_DEV_ID_ICH9_BM, "Intel(R) PRO/1000 Network Connection"),
109 PVID(0x8086, E1000_DEV_ID_82574L, "Intel(R) PRO/1000 Network Connection"),
110 PVID(0x8086, E1000_DEV_ID_82574LA, "Intel(R) PRO/1000 Network Connection"),
111 PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LM, "Intel(R) PRO/1000 Network Connection"),
112 PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LF, "Intel(R) PRO/1000 Network Connection"),
113 PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_V, "Intel(R) PRO/1000 Network Connection"),
114 PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LM, "Intel(R) PRO/1000 Network Connection"),
115 PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LF, "Intel(R) PRO/1000 Network Connection"),
116 PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_V, "Intel(R) PRO/1000 Network Connection"),
117 PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LM, "Intel(R) PRO/1000 Network Connection"),
118 PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LC, "Intel(R) PRO/1000 Network Connection"),
119 PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DM, "Intel(R) PRO/1000 Network Connection"),
120 PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DC, "Intel(R) PRO/1000 Network Connection"),
121 PVID(0x8086, E1000_DEV_ID_PCH2_LV_LM, "Intel(R) PRO/1000 Network Connection"),
122 PVID(0x8086, E1000_DEV_ID_PCH2_LV_V, "Intel(R) PRO/1000 Network Connection"),
123 PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_LM, "Intel(R) PRO/1000 Network Connection"),
124 PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_V, "Intel(R) PRO/1000 Network Connection"),
125 PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_LM, "Intel(R) PRO/1000 Network Connection"),
126 PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_V, "Intel(R) PRO/1000 Network Connection"),
127 PVID(0x8086, E1000_DEV_ID_PCH_I218_LM2, "Intel(R) PRO/1000 Network Connection"),
128 PVID(0x8086, E1000_DEV_ID_PCH_I218_V2, "Intel(R) PRO/1000 Network Connection"),
129 PVID(0x8086, E1000_DEV_ID_PCH_I218_LM3, "Intel(R) PRO/1000 Network Connection"),
130 PVID(0x8086, E1000_DEV_ID_PCH_I218_V3, "Intel(R) PRO/1000 Network Connection"),
131 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM, "Intel(R) PRO/1000 Network Connection"),
132 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V, "Intel(R) PRO/1000 Network Connection"),
133 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM2, "Intel(R) PRO/1000 Network Connection"),
134 PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V2, "Intel(R) PRO/1000 Network Connection"),
135 PVID(0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3, "Intel(R) PRO/1000 Network Connection"),
202 /* required last entry */
136 /* required last entry */
203 { 0, 0, 0, 0, 0}
137 PVID_END
204};
205
138};
139
206/*********************************************************************
207 * Table of branding strings for all supported NICs.
208 *********************************************************************/
209
210static char *em_strings[] = {
211 "Intel(R) PRO/1000 Network Connection"
140static pci_vendor_info_t igb_vendor_info_array[] =
141{
142 /* Intel(R) PRO/1000 Network Connection - em */
143 PVID(0x8086, E1000_DEV_ID_82575EB_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
144 PVID(0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
145 PVID(0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
146 PVID(0x8086, E1000_DEV_ID_82576, "Intel(R) PRO/1000 PCI-Express Network Driver"),
147 PVID(0x8086, E1000_DEV_ID_82576_NS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
148 PVID(0x8086, E1000_DEV_ID_82576_NS_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
149 PVID(0x8086, E1000_DEV_ID_82576_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
150 PVID(0x8086, E1000_DEV_ID_82576_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
151 PVID(0x8086, E1000_DEV_ID_82576_SERDES_QUAD, "Intel(R) PRO/1000 PCI-Express Network Driver"),
152 PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
153 PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2, "Intel(R) PRO/1000 PCI-Express Network Driver"),
154 PVID(0x8086, E1000_DEV_ID_82576_VF, "Intel(R) PRO/1000 PCI-Express Network Driver"),
155 PVID(0x8086, E1000_DEV_ID_82580_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
156 PVID(0x8086, E1000_DEV_ID_82580_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
157 PVID(0x8086, E1000_DEV_ID_82580_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
158 PVID(0x8086, E1000_DEV_ID_82580_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
159 PVID(0x8086, E1000_DEV_ID_82580_COPPER_DUAL, "Intel(R) PRO/1000 PCI-Express Network Driver"),
160 PVID(0x8086, E1000_DEV_ID_82580_QUAD_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
161 PVID(0x8086, E1000_DEV_ID_DH89XXCC_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
162 PVID(0x8086, E1000_DEV_ID_DH89XXCC_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
163 PVID(0x8086, E1000_DEV_ID_DH89XXCC_SFP, "Intel(R) PRO/1000 PCI-Express Network Driver"),
164 PVID(0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE, "Intel(R) PRO/1000 PCI-Express Network Driver"),
165 PVID(0x8086, E1000_DEV_ID_I350_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
166 PVID(0x8086, E1000_DEV_ID_I350_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
167 PVID(0x8086, E1000_DEV_ID_I350_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
168 PVID(0x8086, E1000_DEV_ID_I350_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
169 PVID(0x8086, E1000_DEV_ID_I350_VF, "Intel(R) PRO/1000 PCI-Express Network Driver"),
170 PVID(0x8086, E1000_DEV_ID_I210_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
171 PVID(0x8086, E1000_DEV_ID_I210_COPPER_IT, "Intel(R) PRO/1000 PCI-Express Network Driver"),
172 PVID(0x8086, E1000_DEV_ID_I210_COPPER_OEM1, "Intel(R) PRO/1000 PCI-Express Network Driver"),
173 PVID(0x8086, E1000_DEV_ID_I210_COPPER_FLASHLESS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
174 PVID(0x8086, E1000_DEV_ID_I210_SERDES_FLASHLESS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
175 PVID(0x8086, E1000_DEV_ID_I210_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
176 PVID(0x8086, E1000_DEV_ID_I210_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"),
177 PVID(0x8086, E1000_DEV_ID_I210_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
178 PVID(0x8086, E1000_DEV_ID_I211_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"),
179 PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_1GBPS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
180 PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS, "Intel(R) PRO/1000 PCI-Express Network Driver"),
181 PVID(0x8086, E1000_DEV_ID_I354_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"),
182 /* required last entry */
183 PVID_END
212};
213
214/*********************************************************************
215 * Function prototypes
216 *********************************************************************/
184};
185
186/*********************************************************************
187 * Function prototypes
188 *********************************************************************/
217static int em_probe(device_t);
218static int em_attach(device_t);
219static int em_detach(device_t);
220static int em_shutdown(device_t);
221static int em_suspend(device_t);
222static int em_resume(device_t);
223#ifdef EM_MULTIQUEUE
224static int em_mq_start(if_t, struct mbuf *);
225static int em_mq_start_locked(if_t,
226 struct tx_ring *);
227static void em_qflush(if_t);
228#else
229static void em_start(if_t);
230static void em_start_locked(if_t, struct tx_ring *);
231#endif
232static int em_ioctl(if_t, u_long, caddr_t);
233static uint64_t em_get_counter(if_t, ift_counter);
234static void em_init(void *);
235static void em_init_locked(struct adapter *);
236static void em_stop(void *);
237static void em_media_status(if_t, struct ifmediareq *);
238static int em_media_change(if_t);
239static void em_identify_hardware(struct adapter *);
240static int em_allocate_pci_resources(struct adapter *);
241static int em_allocate_legacy(struct adapter *);
242static int em_allocate_msix(struct adapter *);
243static int em_allocate_queues(struct adapter *);
244static int em_setup_msix(struct adapter *);
245static void em_free_pci_resources(struct adapter *);
246static void em_local_timer(void *);
247static void em_reset(struct adapter *);
248static int em_setup_interface(device_t, struct adapter *);
249static void em_flush_desc_rings(struct adapter *);
189static void *em_register(device_t dev);
190static void *igb_register(device_t dev);
191static int em_if_attach_pre(if_ctx_t ctx);
192static int em_if_attach_post(if_ctx_t ctx);
193static int em_if_detach(if_ctx_t ctx);
194static int em_if_shutdown(if_ctx_t ctx);
195static int em_if_suspend(if_ctx_t ctx);
196static int em_if_resume(if_ctx_t ctx);
250
197
251static void em_setup_transmit_structures(struct adapter *);
252static void em_initialize_transmit_unit(struct adapter *);
253static int em_allocate_transmit_buffers(struct tx_ring *);
254static void em_free_transmit_structures(struct adapter *);
255static void em_free_transmit_buffers(struct tx_ring *);
198static int em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
199static int em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets);
200static void em_if_queues_free(if_ctx_t ctx);
256
201
257static int em_setup_receive_structures(struct adapter *);
258static int em_allocate_receive_buffers(struct rx_ring *);
259static void em_initialize_receive_unit(struct adapter *);
260static void em_free_receive_structures(struct adapter *);
261static void em_free_receive_buffers(struct rx_ring *);
202static uint64_t em_if_get_counter(if_ctx_t, ift_counter);
203static void em_if_init(if_ctx_t ctx);
204static void em_if_stop(if_ctx_t ctx);
205static void em_if_media_status(if_ctx_t, struct ifmediareq *);
206static int em_if_media_change(if_ctx_t ctx);
207static int em_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
208static void em_if_timer(if_ctx_t ctx, uint16_t qid);
209static void em_if_vlan_register(if_ctx_t ctx, u16 vtag);
210static void em_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
262
211
263static void em_enable_intr(struct adapter *);
264static void em_disable_intr(struct adapter *);
212static void em_identify_hardware(if_ctx_t ctx);
213static int em_allocate_pci_resources(if_ctx_t ctx);
214static void em_free_pci_resources(if_ctx_t ctx);
215static void em_reset(if_ctx_t ctx);
216static int em_setup_interface(if_ctx_t ctx);
217static int em_setup_msix(if_ctx_t ctx);
218
219static void em_initialize_transmit_unit(if_ctx_t ctx);
220static void em_initialize_receive_unit(if_ctx_t ctx);
221
222static void em_if_enable_intr(if_ctx_t ctx);
223static void em_if_disable_intr(if_ctx_t ctx);
224static int em_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
225static void em_if_multi_set(if_ctx_t ctx);
226static void em_if_update_admin_status(if_ctx_t ctx);
265static void em_update_stats_counters(struct adapter *);
266static void em_add_hw_stats(struct adapter *adapter);
227static void em_update_stats_counters(struct adapter *);
228static void em_add_hw_stats(struct adapter *adapter);
267static void em_txeof(struct tx_ring *);
268static bool em_rxeof(struct rx_ring *, int, int *);
269#ifndef __NO_STRICT_ALIGNMENT
270static int em_fixup_rx(struct rx_ring *);
271#endif
272static void em_setup_rxdesc(union e1000_rx_desc_extended *,
273 const struct em_rxbuffer *rxbuf);
274static void em_receive_checksum(uint32_t status, struct mbuf *);
275static void em_transmit_checksum_setup(struct tx_ring *, struct mbuf *, int,
276 struct ip *, u32 *, u32 *);
277static void em_tso_setup(struct tx_ring *, struct mbuf *, int, struct ip *,
278 struct tcphdr *, u32 *, u32 *);
279static void em_set_promisc(struct adapter *);
280static void em_disable_promisc(struct adapter *);
281static void em_set_multi(struct adapter *);
282static void em_update_link_status(struct adapter *);
283static void em_refresh_mbufs(struct rx_ring *, int);
284static void em_register_vlan(void *, if_t, u16);
285static void em_unregister_vlan(void *, if_t, u16);
229static int em_if_set_promisc(if_ctx_t ctx, int flags);
286static void em_setup_vlan_hw_support(struct adapter *);
230static void em_setup_vlan_hw_support(struct adapter *);
287static int em_xmit(struct tx_ring *, struct mbuf **);
288static int em_dma_malloc(struct adapter *, bus_size_t,
289 struct em_dma_alloc *, int);
290static void em_dma_free(struct adapter *, struct em_dma_alloc *);
291static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
292static void em_print_nvm_info(struct adapter *);
293static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
294static void em_print_debug_info(struct adapter *);
295static int em_is_valid_ether_addr(u8 *);
296static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
297static void em_add_int_delay_sysctl(struct adapter *, const char *,
298 const char *, struct em_int_delay_info *, int, int);
299/* Management and WOL Support */
300static void em_init_manageability(struct adapter *);
301static void em_release_manageability(struct adapter *);
302static void em_get_hw_control(struct adapter *);
303static void em_release_hw_control(struct adapter *);
231static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
232static void em_print_nvm_info(struct adapter *);
233static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
234static void em_print_debug_info(struct adapter *);
235static int em_is_valid_ether_addr(u8 *);
236static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
237static void em_add_int_delay_sysctl(struct adapter *, const char *,
238 const char *, struct em_int_delay_info *, int, int);
239/* Management and WOL Support */
240static void em_init_manageability(struct adapter *);
241static void em_release_manageability(struct adapter *);
242static void em_get_hw_control(struct adapter *);
243static void em_release_hw_control(struct adapter *);
304static void em_get_wakeup(device_t);
305static void em_enable_wakeup(device_t);
244static void em_get_wakeup(if_ctx_t ctx);
245static void em_enable_wakeup(if_ctx_t ctx);
306static int em_enable_phy_wakeup(struct adapter *);
246static int em_enable_phy_wakeup(struct adapter *);
307static void em_led_func(void *, int);
308static void em_disable_aspm(struct adapter *);
309
247static void em_disable_aspm(struct adapter *);
248
310static int em_irq_fast(void *);
249int em_intr(void *arg);
250static void em_disable_promisc(if_ctx_t ctx);
311
312/* MSIX handlers */
251
252/* MSIX handlers */
313static void em_msix_tx(void *);
314static void em_msix_rx(void *);
315static void em_msix_link(void *);
316static void em_handle_tx(void *context, int pending);
317static void em_handle_rx(void *context, int pending);
318static void em_handle_link(void *context, int pending);
253static int em_if_msix_intr_assign(if_ctx_t, int);
254static int em_msix_link(void *);
255static void em_handle_link(void *context);
319
256
320#ifdef EM_MULTIQUEUE
321static void em_enable_vectors_82574(struct adapter *);
322#endif
257static void em_enable_vectors_82574(if_ctx_t);
323
324static void em_set_sysctl_value(struct adapter *, const char *,
325 const char *, int *, int);
326static int em_set_flowcntl(SYSCTL_HANDLER_ARGS);
327static int em_sysctl_eee(SYSCTL_HANDLER_ARGS);
258
259static void em_set_sysctl_value(struct adapter *, const char *,
260 const char *, int *, int);
261static int em_set_flowcntl(SYSCTL_HANDLER_ARGS);
262static int em_sysctl_eee(SYSCTL_HANDLER_ARGS);
263static void em_if_led_func(if_ctx_t ctx, int onoff);
328
264
329static __inline void em_rx_discard(struct rx_ring *, int);
265static void em_init_tx_ring(struct em_tx_queue *que);
266static int em_get_regs(SYSCTL_HANDLER_ARGS);
330
267
331#ifdef DEVICE_POLLING
332static poll_handler_t em_poll;
333#endif /* POLLING */
268static void lem_smartspeed(struct adapter *adapter);
269static void igb_configure_queues(struct adapter *adapter);
334
270
271
335/*********************************************************************
336 * FreeBSD Device Interface Entry Points
337 *********************************************************************/
272/*********************************************************************
273 * FreeBSD Device Interface Entry Points
274 *********************************************************************/
338
339static device_method_t em_methods[] = {
340 /* Device interface */
275static device_method_t em_methods[] = {
276 /* Device interface */
341 DEVMETHOD(device_probe, em_probe),
342 DEVMETHOD(device_attach, em_attach),
343 DEVMETHOD(device_detach, em_detach),
344 DEVMETHOD(device_shutdown, em_shutdown),
345 DEVMETHOD(device_suspend, em_suspend),
346 DEVMETHOD(device_resume, em_resume),
347 DEVMETHOD_END
277 DEVMETHOD(device_register, em_register),
278 DEVMETHOD(device_probe, iflib_device_probe),
279 DEVMETHOD(device_attach, iflib_device_attach),
280 DEVMETHOD(device_detach, iflib_device_detach),
281 DEVMETHOD(device_shutdown, iflib_device_shutdown),
282 DEVMETHOD(device_suspend, iflib_device_suspend),
283 DEVMETHOD(device_resume, iflib_device_resume),
284 DEVMETHOD_END
348};
349
285};
286
287static device_method_t igb_methods[] = {
288 /* Device interface */
289 DEVMETHOD(device_register, igb_register),
290 DEVMETHOD(device_probe, iflib_device_probe),
291 DEVMETHOD(device_attach, iflib_device_attach),
292 DEVMETHOD(device_detach, iflib_device_detach),
293 DEVMETHOD(device_shutdown, iflib_device_shutdown),
294 DEVMETHOD(device_suspend, iflib_device_suspend),
295 DEVMETHOD(device_resume, iflib_device_resume),
296 DEVMETHOD_END
297};
298
299
350static driver_t em_driver = {
351 "em", em_methods, sizeof(struct adapter),
352};
353
300static driver_t em_driver = {
301 "em", em_methods, sizeof(struct adapter),
302};
303
354devclass_t em_devclass;
304static devclass_t em_devclass;
355DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
305DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
306
356MODULE_DEPEND(em, pci, 1, 1, 1);
357MODULE_DEPEND(em, ether, 1, 1, 1);
307MODULE_DEPEND(em, pci, 1, 1, 1);
308MODULE_DEPEND(em, ether, 1, 1, 1);
358#ifdef DEV_NETMAP
359MODULE_DEPEND(em, netmap, 1, 1, 1);
360#endif /* DEV_NETMAP */
309MODULE_DEPEND(em, iflib, 1, 1, 1);
361
310
311static driver_t igb_driver = {
312 "igb", igb_methods, sizeof(struct adapter),
313};
314
315static devclass_t igb_devclass;
316DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0);
317
318MODULE_DEPEND(igb, pci, 1, 1, 1);
319MODULE_DEPEND(igb, ether, 1, 1, 1);
320MODULE_DEPEND(igb, iflib, 1, 1, 1);
321
322
323static device_method_t em_if_methods[] = {
324 DEVMETHOD(ifdi_attach_pre, em_if_attach_pre),
325 DEVMETHOD(ifdi_attach_post, em_if_attach_post),
326 DEVMETHOD(ifdi_detach, em_if_detach),
327 DEVMETHOD(ifdi_shutdown, em_if_shutdown),
328 DEVMETHOD(ifdi_suspend, em_if_suspend),
329 DEVMETHOD(ifdi_resume, em_if_resume),
330 DEVMETHOD(ifdi_init, em_if_init),
331 DEVMETHOD(ifdi_stop, em_if_stop),
332 DEVMETHOD(ifdi_msix_intr_assign, em_if_msix_intr_assign),
333 DEVMETHOD(ifdi_intr_enable, em_if_enable_intr),
334 DEVMETHOD(ifdi_intr_disable, em_if_disable_intr),
335 DEVMETHOD(ifdi_tx_queues_alloc, em_if_tx_queues_alloc),
336 DEVMETHOD(ifdi_rx_queues_alloc, em_if_rx_queues_alloc),
337 DEVMETHOD(ifdi_queues_free, em_if_queues_free),
338 DEVMETHOD(ifdi_update_admin_status, em_if_update_admin_status),
339 DEVMETHOD(ifdi_multi_set, em_if_multi_set),
340 DEVMETHOD(ifdi_media_status, em_if_media_status),
341 DEVMETHOD(ifdi_media_change, em_if_media_change),
342 DEVMETHOD(ifdi_mtu_set, em_if_mtu_set),
343 DEVMETHOD(ifdi_promisc_set, em_if_set_promisc),
344 DEVMETHOD(ifdi_timer, em_if_timer),
345 DEVMETHOD(ifdi_vlan_register, em_if_vlan_register),
346 DEVMETHOD(ifdi_vlan_unregister, em_if_vlan_unregister),
347 DEVMETHOD(ifdi_get_counter, em_if_get_counter),
348 DEVMETHOD(ifdi_led_func, em_if_led_func),
349 DEVMETHOD(ifdi_queue_intr_enable, em_if_queue_intr_enable),
350 DEVMETHOD_END
351};
352
353 /*
354 * note that if (adapter->msix_mem) is replaced by:
355 * if (adapter->intr_type == IFLIB_INTR_MSIX)
356 */
357static driver_t em_if_driver = {
358 "em_if", em_if_methods, sizeof(struct adapter)
359};
360
362/*********************************************************************
363 * Tunable default values.
364 *********************************************************************/
365
366#define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
367#define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
361/*********************************************************************
362 * Tunable default values.
363 *********************************************************************/
364
365#define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
366#define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
367#define M_TSO_LEN 66
368
369#define MAX_INTS_PER_SEC 8000
370#define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
371
368
369#define MAX_INTS_PER_SEC 8000
370#define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
371
372/* Allow common code without TSO */
373#ifndef CSUM_TSO
374#define CSUM_TSO 0
375#endif
376
372#define TSO_WORKAROUND 4
373
374static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters");
375
376static int em_disable_crc_stripping = 0;
377SYSCTL_INT(_hw_em, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN,
378 &em_disable_crc_stripping, 0, "Disable CRC Stripping");
379

--- 8 unchanged lines hidden (view full) ---

388static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
389SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN,
390 &em_tx_abs_int_delay_dflt, 0,
391 "Default transmit interrupt delay limit in usecs");
392SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN,
393 &em_rx_abs_int_delay_dflt, 0,
394 "Default receive interrupt delay limit in usecs");
395
377#define TSO_WORKAROUND 4
378
379static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters");
380
381static int em_disable_crc_stripping = 0;
382SYSCTL_INT(_hw_em, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN,
383 &em_disable_crc_stripping, 0, "Disable CRC Stripping");
384

--- 8 unchanged lines hidden (view full) ---

393static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
394SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN,
395 &em_tx_abs_int_delay_dflt, 0,
396 "Default transmit interrupt delay limit in usecs");
397SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN,
398 &em_rx_abs_int_delay_dflt, 0,
399 "Default receive interrupt delay limit in usecs");
400
396static int em_rxd = EM_DEFAULT_RXD;
397static int em_txd = EM_DEFAULT_TXD;
398SYSCTL_INT(_hw_em, OID_AUTO, rxd, CTLFLAG_RDTUN, &em_rxd, 0,
399 "Number of receive descriptors per queue");
400SYSCTL_INT(_hw_em, OID_AUTO, txd, CTLFLAG_RDTUN, &em_txd, 0,
401 "Number of transmit descriptors per queue");
402
403static int em_smart_pwr_down = FALSE;
404SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down,
405 0, "Set to true to leave smart power down enabled on newer adapters");
406
407/* Controls whether promiscuous also shows bad packets */
401static int em_smart_pwr_down = FALSE;
402SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down,
403 0, "Set to true to leave smart power down enabled on newer adapters");
404
405/* Controls whether promiscuous also shows bad packets */
408static int em_debug_sbp = FALSE;
406static int em_debug_sbp = TRUE;
409SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0,
410 "Show bad packets in promiscuous mode");
411
407SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0,
408 "Show bad packets in promiscuous mode");
409
412static int em_enable_msix = TRUE;
413SYSCTL_INT(_hw_em, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &em_enable_msix, 0,
414 "Enable MSI-X interrupts");
415
416#ifdef EM_MULTIQUEUE
417static int em_num_queues = 1;
418SYSCTL_INT(_hw_em, OID_AUTO, num_queues, CTLFLAG_RDTUN, &em_num_queues, 0,
419 "82574 only: Number of queues to configure, 0 indicates autoconfigure");
420#endif
421
422/*
423** Global variable to store last used CPU when binding queues
424** to CPUs in igb_allocate_msix. Starts at CPU_FIRST and increments when a
425** queue is bound to a cpu.
426*/
427static int em_last_bind_cpu = -1;
428
429/* How many packets rxeof tries to clean at a time */
430static int em_rx_process_limit = 100;
431SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
432 &em_rx_process_limit, 0,
433 "Maximum number of received packets to process "
434 "at a time, -1 means unlimited");
435
436/* Energy efficient ethernet - default to OFF */
437static int eee_setting = 1;
438SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0,
439 "Enable Energy Efficient Ethernet");
440
410/* How many packets rxeof tries to clean at a time */
411static int em_rx_process_limit = 100;
412SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
413 &em_rx_process_limit, 0,
414 "Maximum number of received packets to process "
415 "at a time, -1 means unlimited");
416
417/* Energy efficient ethernet - default to OFF */
418static int eee_setting = 1;
419SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0,
420 "Enable Energy Efficient Ethernet");
421
422/*
423** Tuneable Interrupt rate
424*/
425static int em_max_interrupt_rate = 8000;
426SYSCTL_INT(_hw_em, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
427 &em_max_interrupt_rate, 0, "Maximum interrupts per second");
428
429
430
441/* Global used in WOL setup with multiport cards */
442static int global_quad_port_a = 0;
443
431/* Global used in WOL setup with multiport cards */
432static int global_quad_port_a = 0;
433
444#ifdef DEV_NETMAP /* see ixgbe.c for details */
445#include <dev/netmap/if_em_netmap.h>
446#endif /* DEV_NETMAP */
434extern struct if_txrx igb_txrx;
435extern struct if_txrx em_txrx;
436extern struct if_txrx lem_txrx;
447
437
448/*********************************************************************
449 * Device identification routine
438static struct if_shared_ctx em_sctx_init = {
439 .isc_magic = IFLIB_MAGIC,
440 .isc_q_align = PAGE_SIZE,
441 .isc_tx_maxsize = EM_TSO_SIZE,
442 .isc_tx_maxsegsize = PAGE_SIZE,
443 .isc_rx_maxsize = MJUM9BYTES,
444 .isc_rx_nsegments = 1,
445 .isc_rx_maxsegsize = MJUM9BYTES,
446 .isc_nfl = 1,
447 .isc_nrxqs = 1,
448 .isc_ntxqs = 1,
449 .isc_admin_intrcnt = 1,
450 .isc_vendor_info = em_vendor_info_array,
451 .isc_driver_version = em_driver_version,
452 .isc_driver = &em_if_driver,
453 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP,
454
455 .isc_nrxd_min = {EM_MIN_RXD},
456 .isc_ntxd_min = {EM_MIN_TXD},
457 .isc_nrxd_max = {EM_MAX_RXD},
458 .isc_ntxd_max = {EM_MAX_TXD},
459 .isc_nrxd_default = {EM_DEFAULT_RXD},
460 .isc_ntxd_default = {EM_DEFAULT_TXD},
461};
462
463if_shared_ctx_t em_sctx = &em_sctx_init;
464
465
466static struct if_shared_ctx igb_sctx_init = {
467 .isc_magic = IFLIB_MAGIC,
468 .isc_q_align = PAGE_SIZE,
469 .isc_tx_maxsize = EM_TSO_SIZE,
470 .isc_tx_maxsegsize = PAGE_SIZE,
471 .isc_rx_maxsize = MJUM9BYTES,
472 .isc_rx_nsegments = 1,
473 .isc_rx_maxsegsize = MJUM9BYTES,
474 .isc_nfl = 1,
475 .isc_nrxqs = 1,
476 .isc_ntxqs = 1,
477 .isc_admin_intrcnt = 1,
478 .isc_vendor_info = igb_vendor_info_array,
479 .isc_driver_version = em_driver_version,
480 .isc_driver = &em_if_driver,
481 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP,
482
483 .isc_nrxd_min = {EM_MIN_RXD},
484 .isc_ntxd_min = {EM_MIN_TXD},
485 .isc_nrxd_max = {EM_MAX_RXD},
486 .isc_ntxd_max = {EM_MAX_TXD},
487 .isc_nrxd_default = {EM_DEFAULT_RXD},
488 .isc_ntxd_default = {EM_DEFAULT_TXD},
489};
490
491if_shared_ctx_t igb_sctx = &igb_sctx_init;
492
493/*****************************************************************
450 *
494 *
451 * em_probe determines if the driver should be loaded on
452 * adapter based on PCI vendor/device id of the adapter.
495 * Dump Registers
453 *
496 *
454 * return BUS_PROBE_DEFAULT on success, positive on failure
455 *********************************************************************/
497 ****************************************************************/
498#define IGB_REGS_LEN 739
456
499
457static int
458em_probe(device_t dev)
500static int em_get_regs(SYSCTL_HANDLER_ARGS)
459{
501{
460 char adapter_name[60];
461 uint16_t pci_vendor_id = 0;
462 uint16_t pci_device_id = 0;
463 uint16_t pci_subvendor_id = 0;
464 uint16_t pci_subdevice_id = 0;
465 em_vendor_info_t *ent;
502 struct adapter *adapter = (struct adapter *)arg1;
503 struct e1000_hw *hw = &adapter->hw;
466
504
467 INIT_DEBUGOUT("em_probe: begin");
505 struct sbuf *sb;
506 u32 *regs_buff = (u32 *)malloc(sizeof(u32) * IGB_REGS_LEN, M_DEVBUF, M_NOWAIT);
507 int rc;
468
508
469 pci_vendor_id = pci_get_vendor(dev);
470 if (pci_vendor_id != EM_VENDOR_ID)
471 return (ENXIO);
509 memset(regs_buff, 0, IGB_REGS_LEN * sizeof(u32));
472
510
473 pci_device_id = pci_get_device(dev);
474 pci_subvendor_id = pci_get_subvendor(dev);
475 pci_subdevice_id = pci_get_subdevice(dev);
511 rc = sysctl_wire_old_buffer(req, 0);
512 MPASS(rc == 0);
513 if (rc != 0)
514 return (rc);
476
515
477 ent = em_vendor_info_array;
478 while (ent->vendor_id != 0) {
479 if ((pci_vendor_id == ent->vendor_id) &&
480 (pci_device_id == ent->device_id) &&
516 sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req);
517 MPASS(sb != NULL);
518 if (sb == NULL)
519 return (ENOMEM);
481
520
482 ((pci_subvendor_id == ent->subvendor_id) ||
483 (ent->subvendor_id == PCI_ANY_ID)) &&
521 /* General Registers */
522 regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
523 regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
524 regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
525 regs_buff[3] = E1000_READ_REG(hw, E1000_ICR);
526 regs_buff[4] = E1000_READ_REG(hw, E1000_RCTL);
527 regs_buff[5] = E1000_READ_REG(hw, E1000_RDLEN(0));
528 regs_buff[6] = E1000_READ_REG(hw, E1000_RDH(0));
529 regs_buff[7] = E1000_READ_REG(hw, E1000_RDT(0));
530 regs_buff[8] = E1000_READ_REG(hw, E1000_RXDCTL(0));
531 regs_buff[9] = E1000_READ_REG(hw, E1000_RDBAL(0));
532 regs_buff[10] = E1000_READ_REG(hw, E1000_RDBAH(0));
533 regs_buff[11] = E1000_READ_REG(hw, E1000_TCTL);
534 regs_buff[12] = E1000_READ_REG(hw, E1000_TDBAL(0));
535 regs_buff[13] = E1000_READ_REG(hw, E1000_TDBAH(0));
536 regs_buff[14] = E1000_READ_REG(hw, E1000_TDLEN(0));
537 regs_buff[15] = E1000_READ_REG(hw, E1000_TDH(0));
538 regs_buff[16] = E1000_READ_REG(hw, E1000_TDT(0));
539 regs_buff[17] = E1000_READ_REG(hw, E1000_TXDCTL(0));
540 regs_buff[18] = E1000_READ_REG(hw, E1000_TDFH);
541 regs_buff[19] = E1000_READ_REG(hw, E1000_TDFT);
542 regs_buff[20] = E1000_READ_REG(hw, E1000_TDFHS);
543 regs_buff[21] = E1000_READ_REG(hw, E1000_TDFPC);
544
545 sbuf_printf(sb, "General Registers\n");
546 sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]);
547 sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]);
548 sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]);
484
549
485 ((pci_subdevice_id == ent->subdevice_id) ||
486 (ent->subdevice_id == PCI_ANY_ID))) {
487 sprintf(adapter_name, "%s %s",
488 em_strings[ent->index],
489 em_driver_version);
490 device_set_desc_copy(dev, adapter_name);
491 return (BUS_PROBE_DEFAULT);
492 }
493 ent++;
550 sbuf_printf(sb, "Interrupt Registers\n");
551 sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]);
552
553 sbuf_printf(sb, "RX Registers\n");
554 sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]);
555 sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]);
556 sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]);
557 sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]);
558 sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]);
559 sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]);
560 sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]);
561
562 sbuf_printf(sb, "TX Registers\n");
563 sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]);
564 sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]);
565 sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]);
566 sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]);
567 sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]);
568 sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]);
569 sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]);
570 sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]);
571 sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]);
572 sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]);
573 sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]);
574
575#ifdef DUMP_DESCS
576 {
577 if_softc_ctx_t scctx = adapter->shared;
578 struct rx_ring *rxr = &rx_que->rxr;
579 struct tx_ring *txr = &tx_que->txr;
580 int ntxd = scctx->isc_ntxd[0];
581 int nrxd = scctx->isc_nrxd[0];
582 int j;
583
584 for (j = 0; j < nrxd; j++) {
585 u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error);
586 u32 length = le32toh(rxr->rx_base[j].wb.upper.length);
587 sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length);
494 }
495
588 }
589
496 return (ENXIO);
590 for (j = 0; j < min(ntxd, 256); j++) {
591 struct em_txbuffer *buf = &txr->tx_buffers[j];
592 unsigned int *ptr = (unsigned int *)&txr->tx_base[j];
593
594 sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n",
595 j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop,
596 buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & E1000_TXD_STAT_DD : 0);
597
598 }
599 }
600#endif
601
602 rc = sbuf_finish(sb);
603 sbuf_delete(sb);
604 return(rc);
497}
498
605}
606
607static void *
608em_register(device_t dev)
609{
610 return (em_sctx);
611}
612
613static void *
614igb_register(device_t dev)
615{
616 return (igb_sctx);
617}
618
619static void
620em_init_tx_ring(struct em_tx_queue *que)
621{
622 struct adapter *sc = que->adapter;
623 if_softc_ctx_t scctx = sc->shared;
624 struct tx_ring *txr = &que->txr;
625 struct em_txbuffer *tx_buffer;
626
627 tx_buffer = txr->tx_buffers;
628 for (int i = 0; i < scctx->isc_ntxd[0]; i++, tx_buffer++) {
629 tx_buffer->eop = -1;
630 }
631}
632
633static int
634em_set_num_queues(if_ctx_t ctx)
635{
636 struct adapter *adapter = iflib_get_softc(ctx);
637 int maxqueues;
638
639 /* Sanity check based on HW */
640 switch (adapter->hw.mac.type) {
641 case e1000_82576:
642 case e1000_82580:
643 case e1000_i350:
644 case e1000_i354:
645 maxqueues = 8;
646 break;
647 case e1000_i210:
648 case e1000_82575:
649 maxqueues = 4;
650 break;
651 case e1000_i211:
652 case e1000_82574:
653 maxqueues = 2;
654 break;
655 default:
656 maxqueues = 1;
657 break;
658 }
659
660 return (maxqueues);
661}
662
663
664#define EM_CAPS \
665 IFCAP_TSO4 | IFCAP_TXCSUM | IFCAP_LRO | IFCAP_RXCSUM | IFCAP_VLAN_HWFILTER | IFCAP_WOL_MAGIC | \
666 IFCAP_WOL_MCAST | IFCAP_WOL | IFCAP_VLAN_HWTSO | IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | \
667 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU;
668
669#define IGB_CAPS \
670 IFCAP_TSO4 | IFCAP_TXCSUM | IFCAP_LRO | IFCAP_RXCSUM | IFCAP_VLAN_HWFILTER | IFCAP_WOL_MAGIC | \
671 IFCAP_WOL_MCAST | IFCAP_WOL | IFCAP_VLAN_HWTSO | IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | \
672 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU | IFCAP_TXCSUM_IPV6 | IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU;
673
499/*********************************************************************
500 * Device initialization routine
501 *
502 * The attach entry point is called when the driver is being loaded.
503 * This routine identifies the type of hardware, allocates all resources
504 * and initializes the hardware.
505 *
506 * return 0 on success, positive on failure
507 *********************************************************************/
508
509static int
674/*********************************************************************
675 * Device initialization routine
676 *
677 * The attach entry point is called when the driver is being loaded.
678 * This routine identifies the type of hardware, allocates all resources
679 * and initializes the hardware.
680 *
681 * return 0 on success, positive on failure
682 *********************************************************************/
683
684static int
510em_attach(device_t dev)
685em_if_attach_pre(if_ctx_t ctx)
511{
686{
512 struct adapter *adapter;
687 struct adapter *adapter;
688 if_softc_ctx_t scctx;
689 device_t dev;
513 struct e1000_hw *hw;
514 int error = 0;
515
690 struct e1000_hw *hw;
691 int error = 0;
692
516 INIT_DEBUGOUT("em_attach: begin");
693 INIT_DEBUGOUT("em_if_attach_pre begin");
694 dev = iflib_get_dev(ctx);
695 adapter = iflib_get_softc(ctx);
517
518 if (resource_disabled("em", device_get_unit(dev))) {
519 device_printf(dev, "Disabled by device hint\n");
520 return (ENXIO);
521 }
522
696
697 if (resource_disabled("em", device_get_unit(dev))) {
698 device_printf(dev, "Disabled by device hint\n");
699 return (ENXIO);
700 }
701
523 adapter = device_get_softc(dev);
702 adapter->ctx = ctx;
524 adapter->dev = adapter->osdep.dev = dev;
703 adapter->dev = adapter->osdep.dev = dev;
525 hw = &adapter->hw;
526 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
704 scctx = adapter->shared = iflib_get_softc_ctx(ctx);
705 adapter->media = iflib_get_media(ctx);
706 hw = &adapter->hw;
527
707
708 adapter->tx_process_limit = scctx->isc_ntxd[0];
709
528 /* SYSCTL stuff */
529 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
530 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
531 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
532 em_sysctl_nvm_info, "I", "NVM Information");
533
534 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
535 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
536 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
537 em_sysctl_debug_info, "I", "Debug Information");
538
539 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
540 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
541 OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
542 em_set_flowcntl, "I", "Flow Control");
543
710 /* SYSCTL stuff */
711 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
712 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
713 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
714 em_sysctl_nvm_info, "I", "NVM Information");
715
716 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
717 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
718 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
719 em_sysctl_debug_info, "I", "Debug Information");
720
721 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
722 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
723 OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
724 em_set_flowcntl, "I", "Flow Control");
725
544 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
726 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
727 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
728 OID_AUTO, "reg_dump", CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
729 em_get_regs, "A", "Dump Registers");
545
546 /* Determine hardware and mac info */
730
731 /* Determine hardware and mac info */
547 em_identify_hardware(adapter);
732 em_identify_hardware(ctx);
548
733
734 /* Set isc_msix_bar */
735 scctx->isc_msix_bar = PCIR_BAR(EM_MSIX_BAR);
736 scctx->isc_tx_nsegments = EM_MAX_SCATTER;
737 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
738 scctx->isc_tx_tso_size_max = EM_TSO_SIZE;
739 scctx->isc_tx_tso_segsize_max = EM_TSO_SEG_SIZE;
740 scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = em_set_num_queues(ctx);
741 device_printf(dev, "attach_pre capping queues at %d\n", scctx->isc_ntxqsets_max);
742
743 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO;
744
745
746 if (adapter->hw.mac.type >= igb_mac_min) {
747 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union e1000_adv_tx_desc), EM_DBA_ALIGN);
748 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_adv_rx_desc), EM_DBA_ALIGN);
749 scctx->isc_txrx = &igb_txrx;
750 scctx->isc_capenable = IGB_CAPS;
751 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_IP6_TCP \
752 | CSUM_IP6_UDP | CSUM_IP6_TCP;
753 if (adapter->hw.mac.type != e1000_82575)
754 scctx->isc_tx_csum_flags |= CSUM_SCTP | CSUM_IP6_SCTP;
755
756 } else if (adapter->hw.mac.type >= em_mac_min) {
757 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]* sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
758 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN);
759 scctx->isc_txrx = &em_txrx;
760 scctx->isc_capenable = EM_CAPS;
761 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO;
762 } else {
763 scctx->isc_txqsizes[0] = roundup2((scctx->isc_ntxd[0] + 1) * sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
764 scctx->isc_rxqsizes[0] = roundup2((scctx->isc_nrxd[0] + 1) * sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
765 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO;
766 scctx->isc_txrx = &lem_txrx;
767 scctx->isc_capenable = EM_CAPS;
768 if (adapter->hw.mac.type < e1000_82543)
769 scctx->isc_capenable &= ~(IFCAP_HWCSUM|IFCAP_VLAN_HWCSUM);
770 scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO;
771 scctx->isc_msix_bar = 0;
772 }
773
549 /* Setup PCI resources */
774 /* Setup PCI resources */
550 if (em_allocate_pci_resources(adapter)) {
775 if (em_allocate_pci_resources(ctx)) {
551 device_printf(dev, "Allocation of PCI resources failed\n");
552 error = ENXIO;
553 goto err_pci;
554 }
555
556 /*
557 ** For ICH8 and family we need to
558 ** map the flash memory, and this

--- 39 unchanged lines hidden (view full) ---

598 error = e1000_setup_init_funcs(hw, TRUE);
599 if (error) {
600 device_printf(dev, "Setup of Shared code failed, error %d\n",
601 error);
602 error = ENXIO;
603 goto err_pci;
604 }
605
776 device_printf(dev, "Allocation of PCI resources failed\n");
777 error = ENXIO;
778 goto err_pci;
779 }
780
781 /*
782 ** For ICH8 and family we need to
783 ** map the flash memory, and this

--- 39 unchanged lines hidden (view full) ---

823 error = e1000_setup_init_funcs(hw, TRUE);
824 if (error) {
825 device_printf(dev, "Setup of Shared code failed, error %d\n",
826 error);
827 error = ENXIO;
828 goto err_pci;
829 }
830
606 /*
607 * Setup MSI/X or MSI if PCI Express
608 */
609 adapter->msix = em_setup_msix(adapter);
610
831 em_setup_msix(ctx);
611 e1000_get_bus_info(hw);
612
613 /* Set up some sysctls for the tunable interrupt delays */
614 em_add_int_delay_sysctl(adapter, "rx_int_delay",
615 "receive interrupt delay in usecs", &adapter->rx_int_delay,
616 E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt);
617 em_add_int_delay_sysctl(adapter, "tx_int_delay",
618 "transmit interrupt delay in usecs", &adapter->tx_int_delay,

--- 13 unchanged lines hidden (view full) ---

632 &adapter->tx_itr,
633 E1000_REGISTER(hw, E1000_ITR),
634 DEFAULT_ITR);
635
636 /* Sysctl for limiting the amount of work done in the taskqueue */
637 em_set_sysctl_value(adapter, "rx_processing_limit",
638 "max number of rx packets to process", &adapter->rx_process_limit,
639 em_rx_process_limit);
832 e1000_get_bus_info(hw);
833
834 /* Set up some sysctls for the tunable interrupt delays */
835 em_add_int_delay_sysctl(adapter, "rx_int_delay",
836 "receive interrupt delay in usecs", &adapter->rx_int_delay,
837 E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt);
838 em_add_int_delay_sysctl(adapter, "tx_int_delay",
839 "transmit interrupt delay in usecs", &adapter->tx_int_delay,

--- 13 unchanged lines hidden (view full) ---

853 &adapter->tx_itr,
854 E1000_REGISTER(hw, E1000_ITR),
855 DEFAULT_ITR);
856
857 /* Sysctl for limiting the amount of work done in the taskqueue */
858 em_set_sysctl_value(adapter, "rx_processing_limit",
859 "max number of rx packets to process", &adapter->rx_process_limit,
860 em_rx_process_limit);
640
641 /*
642 * Validate number of transmit and receive descriptors. It
643 * must not exceed hardware maximum, and must be multiple
644 * of E1000_DBA_ALIGN.
645 */
646 if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
647 (em_txd > EM_MAX_TXD) || (em_txd < EM_MIN_TXD)) {
648 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
649 EM_DEFAULT_TXD, em_txd);
650 adapter->num_tx_desc = EM_DEFAULT_TXD;
651 } else
652 adapter->num_tx_desc = em_txd;
653
654 if (((em_rxd * sizeof(union e1000_rx_desc_extended)) % EM_DBA_ALIGN) != 0 ||
655 (em_rxd > EM_MAX_RXD) || (em_rxd < EM_MIN_RXD)) {
656 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
657 EM_DEFAULT_RXD, em_rxd);
658 adapter->num_rx_desc = EM_DEFAULT_RXD;
659 } else
660 adapter->num_rx_desc = em_rxd;
661
861
662 hw->mac.autoneg = DO_AUTO_NEG;
663 hw->phy.autoneg_wait_to_complete = FALSE;
664 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
665
862 hw->mac.autoneg = DO_AUTO_NEG;
863 hw->phy.autoneg_wait_to_complete = FALSE;
864 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
865
866 if (adapter->hw.mac.type < em_mac_min) {
867 e1000_init_script_state_82541(&adapter->hw, TRUE);
868 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
869 }
666 /* Copper options */
667 if (hw->phy.media_type == e1000_media_type_copper) {
668 hw->phy.mdix = AUTO_ALL_MODES;
669 hw->phy.disable_polarity_correction = FALSE;
670 hw->phy.ms_type = EM_MASTER_SLAVE;
671 }
672
673 /*

--- 4 unchanged lines hidden (view full) ---

678 ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
679
680 /*
681 * This controls when hardware reports transmit completion
682 * status.
683 */
684 hw->mac.report_tx_early = 1;
685
870 /* Copper options */
871 if (hw->phy.media_type == e1000_media_type_copper) {
872 hw->phy.mdix = AUTO_ALL_MODES;
873 hw->phy.disable_polarity_correction = FALSE;
874 hw->phy.ms_type = EM_MASTER_SLAVE;
875 }
876
877 /*

--- 4 unchanged lines hidden (view full) ---

882 ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
883
884 /*
885 * This controls when hardware reports transmit completion
886 * status.
887 */
888 hw->mac.report_tx_early = 1;
889
686 /*
687 ** Get queue/ring memory
688 */
689 if (em_allocate_queues(adapter)) {
690 error = ENOMEM;
691 goto err_pci;
692 }
693
694 /* Allocate multicast array memory. */
695 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
696 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
697 if (adapter->mta == NULL) {
698 device_printf(dev, "Can not allocate multicast setup array\n");
699 error = ENOMEM;
700 goto err_late;
701 }

--- 13 unchanged lines hidden (view full) ---

715
716 /*
717 ** Start from a known state, this is
718 ** important in reading the nvm and
719 ** mac from that.
720 */
721 e1000_reset_hw(hw);
722
890 /* Allocate multicast array memory. */
891 adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
892 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
893 if (adapter->mta == NULL) {
894 device_printf(dev, "Can not allocate multicast setup array\n");
895 error = ENOMEM;
896 goto err_late;
897 }

--- 13 unchanged lines hidden (view full) ---

911
912 /*
913 ** Start from a known state, this is
914 ** important in reading the nvm and
915 ** mac from that.
916 */
917 e1000_reset_hw(hw);
918
723
724 /* Make sure we have a good EEPROM before we read from it */
725 if (e1000_validate_nvm_checksum(hw) < 0) {
726 /*
727 ** Some PCI-E parts fail the first check due to
728 ** the link being in sleep state, call it again,
729 ** if it fails a second time its a real issue.
730 */
731 if (e1000_validate_nvm_checksum(hw) < 0) {

--- 16 unchanged lines hidden (view full) ---

748 device_printf(dev, "Invalid MAC address\n");
749 error = EIO;
750 goto err_late;
751 }
752
753 /* Disable ULP support */
754 e1000_disable_ulp_lpt_lp(hw, TRUE);
755
919 /* Make sure we have a good EEPROM before we read from it */
920 if (e1000_validate_nvm_checksum(hw) < 0) {
921 /*
922 ** Some PCI-E parts fail the first check due to
923 ** the link being in sleep state, call it again,
924 ** if it fails a second time its a real issue.
925 */
926 if (e1000_validate_nvm_checksum(hw) < 0) {

--- 16 unchanged lines hidden (view full) ---

943 device_printf(dev, "Invalid MAC address\n");
944 error = EIO;
945 goto err_late;
946 }
947
948 /* Disable ULP support */
949 e1000_disable_ulp_lpt_lp(hw, TRUE);
950
756 /*
757 ** Do interrupt configuration
758 */
759 if (adapter->msix > 1) /* Do MSIX */
760 error = em_allocate_msix(adapter);
761 else /* MSI or Legacy */
762 error = em_allocate_legacy(adapter);
763 if (error)
764 goto err_late;
765
766 /*
951 /*
767 * Get Wake-on-Lan and Management info for later use
768 */
952 * Get Wake-on-Lan and Management info for later use
953 */
769 em_get_wakeup(dev);
954 em_get_wakeup(ctx);
770
955
956 iflib_set_mac(ctx, hw->mac.addr);
957
958 return (0);
959
960err_late:
961 em_release_hw_control(adapter);
962err_pci:
963 em_free_pci_resources(ctx);
964 free(adapter->mta, M_DEVBUF);
965
966 return (error);
967}
968
969static int
970em_if_attach_post(if_ctx_t ctx)
971{
972 struct adapter *adapter = iflib_get_softc(ctx);
973 struct e1000_hw *hw = &adapter->hw;
974 int error = 0;
975
771 /* Setup OS specific network interface */
976 /* Setup OS specific network interface */
772 if (em_setup_interface(dev, adapter) != 0)
977 error = em_setup_interface(ctx);
978 if (error != 0) {
773 goto err_late;
979 goto err_late;
980 }
774
981
775 em_reset(adapter);
982 em_reset(ctx);
776
777 /* Initialize statistics */
778 em_update_stats_counters(adapter);
983
984 /* Initialize statistics */
985 em_update_stats_counters(adapter);
779
780 hw->mac.get_link_status = 1;
986 hw->mac.get_link_status = 1;
781 em_update_link_status(adapter);
782
783 /* Register for VLAN events */
784 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
785 em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
786 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
787 em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
788
987 em_if_update_admin_status(ctx);
789 em_add_hw_stats(adapter);
790
791 /* Non-AMT based hardware can now take control from firmware */
792 if (adapter->has_manage && !adapter->has_amt)
793 em_get_hw_control(adapter);
988 em_add_hw_stats(adapter);
989
990 /* Non-AMT based hardware can now take control from firmware */
991 if (adapter->has_manage && !adapter->has_amt)
992 em_get_hw_control(adapter);
993
994 INIT_DEBUGOUT("em_if_attach_post: end");
794
995
795 /* Tell the stack that the interface is not active */
796 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
996 return (error);
797
997
798 adapter->led_dev = led_create(em_led_func, adapter,
799 device_get_nameunit(dev));
800#ifdef DEV_NETMAP
801 em_netmap_attach(adapter);
802#endif /* DEV_NETMAP */
803
804 INIT_DEBUGOUT("em_attach: end");
805
806 return (0);
807
808err_late:
998err_late:
809 em_free_transmit_structures(adapter);
810 em_free_receive_structures(adapter);
811 em_release_hw_control(adapter);
999 em_release_hw_control(adapter);
812 if (adapter->ifp != (void *)NULL)
813 if_free(adapter->ifp);
814err_pci:
815 em_free_pci_resources(adapter);
1000 em_free_pci_resources(ctx);
1001 em_if_queues_free(ctx);
816 free(adapter->mta, M_DEVBUF);
1002 free(adapter->mta, M_DEVBUF);
817 EM_CORE_LOCK_DESTROY(adapter);
818
819 return (error);
820}
821
822/*********************************************************************
823 * Device removal routine
824 *
825 * The detach entry point is called when the driver is being removed.
826 * This routine stops the adapter and deallocates all the resources
827 * that were allocated for driver operation.
828 *
829 * return 0 on success, positive on failure
830 *********************************************************************/
831
832static int
1003
1004 return (error);
1005}
1006
1007/*********************************************************************
1008 * Device removal routine
1009 *
1010 * The detach entry point is called when the driver is being removed.
1011 * This routine stops the adapter and deallocates all the resources
1012 * that were allocated for driver operation.
1013 *
1014 * return 0 on success, positive on failure
1015 *********************************************************************/
1016
1017static int
833em_detach(device_t dev)
1018em_if_detach(if_ctx_t ctx)
834{
1019{
835 struct adapter *adapter = device_get_softc(dev);
836 if_t ifp = adapter->ifp;
1020 struct adapter *adapter = iflib_get_softc(ctx);
837
838 INIT_DEBUGOUT("em_detach: begin");
839
1021
1022 INIT_DEBUGOUT("em_detach: begin");
1023
840 /* Make sure VLANS are not using driver */
841 if (if_vlantrunkinuse(ifp)) {
842 device_printf(dev,"Vlan in use, detach first\n");
843 return (EBUSY);
844 }
845
846#ifdef DEVICE_POLLING
847 if (if_getcapenable(ifp) & IFCAP_POLLING)
848 ether_poll_deregister(ifp);
849#endif
850
851 if (adapter->led_dev != NULL)
852 led_destroy(adapter->led_dev);
853
854 EM_CORE_LOCK(adapter);
855 adapter->in_detach = 1;
856 em_stop(adapter);
857 EM_CORE_UNLOCK(adapter);
858 EM_CORE_LOCK_DESTROY(adapter);
859
860 e1000_phy_hw_reset(&adapter->hw);
861
862 em_release_manageability(adapter);
863 em_release_hw_control(adapter);
1024 e1000_phy_hw_reset(&adapter->hw);
1025
1026 em_release_manageability(adapter);
1027 em_release_hw_control(adapter);
1028 em_free_pci_resources(ctx);
864
1029
865 /* Unregister VLAN events */
866 if (adapter->vlan_attach != NULL)
867 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
868 if (adapter->vlan_detach != NULL)
869 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
870
871 ether_ifdetach(adapter->ifp);
872 callout_drain(&adapter->timer);
873
874#ifdef DEV_NETMAP
875 netmap_detach(ifp);
876#endif /* DEV_NETMAP */
877
878 em_free_pci_resources(adapter);
879 bus_generic_detach(dev);
880 if_free(ifp);
881
882 em_free_transmit_structures(adapter);
883 em_free_receive_structures(adapter);
884
885 em_release_hw_control(adapter);
886 free(adapter->mta, M_DEVBUF);
887
888 return (0);
889}
890
891/*********************************************************************
892 *
893 * Shutdown entry point
894 *
895 **********************************************************************/
896
897static int
1030 return (0);
1031}
1032
1033/*********************************************************************
1034 *
1035 * Shutdown entry point
1036 *
1037 **********************************************************************/
1038
1039static int
898em_shutdown(device_t dev)
1040em_if_shutdown(if_ctx_t ctx)
899{
1041{
900 return em_suspend(dev);
1042 return em_if_suspend(ctx);
901}
902
903/*
904 * Suspend/resume device methods.
905 */
906static int
1043}
1044
1045/*
1046 * Suspend/resume device methods.
1047 */
1048static int
907em_suspend(device_t dev)
1049em_if_suspend(if_ctx_t ctx)
908{
1050{
909 struct adapter *adapter = device_get_softc(dev);
1051 struct adapter *adapter = iflib_get_softc(ctx);
910
1052
911 EM_CORE_LOCK(adapter);
912
913 em_release_manageability(adapter);
914 em_release_hw_control(adapter);
1053 em_release_manageability(adapter);
1054 em_release_hw_control(adapter);
915 em_enable_wakeup(dev);
916
917 EM_CORE_UNLOCK(adapter);
918
919 return bus_generic_suspend(dev);
1055 em_enable_wakeup(ctx);
1056 return (0);
920}
921
922static int
1057}
1058
1059static int
923em_resume(device_t dev)
1060em_if_resume(if_ctx_t ctx)
924{
1061{
925 struct adapter *adapter = device_get_softc(dev);
926 struct tx_ring *txr = adapter->tx_rings;
927 if_t ifp = adapter->ifp;
1062 struct adapter *adapter = iflib_get_softc(ctx);
928
1063
929 EM_CORE_LOCK(adapter);
930 if (adapter->hw.mac.type == e1000_pch2lan)
931 e1000_resume_workarounds_pchlan(&adapter->hw);
1064 if (adapter->hw.mac.type == e1000_pch2lan)
1065 e1000_resume_workarounds_pchlan(&adapter->hw);
932 em_init_locked(adapter);
1066 em_if_init(ctx);
933 em_init_manageability(adapter);
934
1067 em_init_manageability(adapter);
1068
935 if ((if_getflags(ifp) & IFF_UP) &&
936 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) && adapter->link_active) {
937 for (int i = 0; i < adapter->num_queues; i++, txr++) {
938 EM_TX_LOCK(txr);
939#ifdef EM_MULTIQUEUE
940 if (!drbr_empty(ifp, txr->br))
941 em_mq_start_locked(ifp, txr);
942#else
943 if (!if_sendq_empty(ifp))
944 em_start_locked(ifp, txr);
945#endif
946 EM_TX_UNLOCK(txr);
947 }
948 }
949 EM_CORE_UNLOCK(adapter);
950
951 return bus_generic_resume(dev);
1069 return(0);
952}
953
1070}
1071
954
955#ifndef EM_MULTIQUEUE
956static void
957em_start_locked(if_t ifp, struct tx_ring *txr)
958{
959 struct adapter *adapter = if_getsoftc(ifp);
960 struct mbuf *m_head;
961
962 EM_TX_LOCK_ASSERT(txr);
963
964 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
965 IFF_DRV_RUNNING)
966 return;
967
968 if (!adapter->link_active)
969 return;
970
971 while (!if_sendq_empty(ifp)) {
972 /* Call cleanup if number of TX descriptors low */
973 if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD)
974 em_txeof(txr);
975 if (txr->tx_avail < EM_MAX_SCATTER) {
976 if_setdrvflagbits(ifp,IFF_DRV_OACTIVE, 0);
977 break;
978 }
979 m_head = if_dequeue(ifp);
980 if (m_head == NULL)
981 break;
982 /*
983 * Encapsulation can modify our pointer, and or make it
984 * NULL on failure. In that event, we can't requeue.
985 */
986 if (em_xmit(txr, &m_head)) {
987 if (m_head == NULL)
988 break;
989 if_sendq_prepend(ifp, m_head);
990 break;
991 }
992
993 /* Mark the queue as having work */
994 if (txr->busy == EM_TX_IDLE)
995 txr->busy = EM_TX_BUSY;
996
997 /* Send a copy of the frame to the BPF listener */
998 ETHER_BPF_MTAP(ifp, m_head);
999
1000 }
1001
1002 return;
1003}
1004
1005static void
1006em_start(if_t ifp)
1007{
1008 struct adapter *adapter = if_getsoftc(ifp);
1009 struct tx_ring *txr = adapter->tx_rings;
1010
1011 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1012 EM_TX_LOCK(txr);
1013 em_start_locked(ifp, txr);
1014 EM_TX_UNLOCK(txr);
1015 }
1016 return;
1017}
1018#else /* EM_MULTIQUEUE */
1019/*********************************************************************
1020 * Multiqueue Transmit routines
1021 *
1022 * em_mq_start is called by the stack to initiate a transmit.
1023 * however, if busy the driver can queue the request rather
1024 * than do an immediate send. It is this that is an advantage
1025 * in this driver, rather than also having multiple tx queues.
1026 **********************************************************************/
1027/*
1028** Multiqueue capable stack interface
1029*/
1030static int
1072static int
1031em_mq_start(if_t ifp, struct mbuf *m)
1073em_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1032{
1074{
1033 struct adapter *adapter = if_getsoftc(ifp);
1034 struct tx_ring *txr = adapter->tx_rings;
1035 unsigned int i, error;
1036
1037 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1038 i = m->m_pkthdr.flowid % adapter->num_queues;
1039 else
1040 i = curcpu % adapter->num_queues;
1041
1042 txr = &adapter->tx_rings[i];
1043
1044 error = drbr_enqueue(ifp, txr->br, m);
1045 if (error)
1046 return (error);
1047
1048 if (EM_TX_TRYLOCK(txr)) {
1049 em_mq_start_locked(ifp, txr);
1050 EM_TX_UNLOCK(txr);
1051 } else
1052 taskqueue_enqueue(txr->tq, &txr->tx_task);
1053
1054 return (0);
1075 int max_frame_size;
1076 struct adapter *adapter = iflib_get_softc(ctx);
1077 struct ifnet *ifp = iflib_get_ifp(ctx);
1078
1079 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1080
1081 switch (adapter->hw.mac.type) {
1082 case e1000_82571:
1083 case e1000_82572:
1084 case e1000_ich9lan:
1085 case e1000_ich10lan:
1086 case e1000_pch2lan:
1087 case e1000_pch_lpt:
1088 case e1000_pch_spt:
1089 case e1000_82574:
1090 case e1000_82583:
1091 case e1000_80003es2lan: /* 9K Jumbo Frame size */
1092 max_frame_size = 9234;
1093 break;
1094 case e1000_pchlan:
1095 max_frame_size = 4096;
1096 break;
1097 /* Adapters that do not support jumbo frames */
1098 case e1000_ich8lan:
1099 max_frame_size = ETHER_MAX_LEN;
1100 break;
1101 default:
1102 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1103 }
1104 if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
1105 return (EINVAL);
1106 }
1107
1108 adapter->hw.mac.max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1109 return (0);
1055}
1056
1110}
1111
1057static int
1058em_mq_start_locked(if_t ifp, struct tx_ring *txr)
1059{
1060 struct adapter *adapter = txr->adapter;
1061 struct mbuf *next;
1062 int err = 0, enq = 0;
1063
1064 EM_TX_LOCK_ASSERT(txr);
1065
1066 if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ||
1067 adapter->link_active == 0) {
1068 return (ENETDOWN);
1069 }
1070
1071 /* Process the queue */
1072 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
1073 if ((err = em_xmit(txr, &next)) != 0) {
1074 if (next == NULL) {
1075 /* It was freed, move forward */
1076 drbr_advance(ifp, txr->br);
1077 } else {
1078 /*
1079 * Still have one left, it may not be
1080 * the same since the transmit function
1081 * may have changed it.
1082 */
1083 drbr_putback(ifp, txr->br, next);
1084 }
1085 break;
1086 }
1087 drbr_advance(ifp, txr->br);
1088 enq++;
1089 if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
1090 if (next->m_flags & M_MCAST)
1091 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1092 ETHER_BPF_MTAP(ifp, next);
1093 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1094 break;
1095 }
1096
1097 /* Mark the queue as having work */
1098 if ((enq > 0) && (txr->busy == EM_TX_IDLE))
1099 txr->busy = EM_TX_BUSY;
1100
1101 if (txr->tx_avail < EM_MAX_SCATTER)
1102 em_txeof(txr);
1103 if (txr->tx_avail < EM_MAX_SCATTER) {
1104 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE,0);
1105 }
1106 return (err);
1107}
1108
1109/*
1110** Flush all ring buffers
1111*/
1112static void
1113em_qflush(if_t ifp)
1114{
1115 struct adapter *adapter = if_getsoftc(ifp);
1116 struct tx_ring *txr = adapter->tx_rings;
1117 struct mbuf *m;
1118
1119 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1120 EM_TX_LOCK(txr);
1121 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
1122 m_freem(m);
1123 EM_TX_UNLOCK(txr);
1124 }
1125 if_qflush(ifp);
1126}
1127#endif /* EM_MULTIQUEUE */
1128
1129/*********************************************************************
1112/*********************************************************************
1130 * Ioctl entry point
1131 *
1132 * em_ioctl is called when the user wants to configure the
1133 * interface.
1134 *
1135 * return 0 on success, positive on failure
1136 **********************************************************************/
1137
1138static int
1139em_ioctl(if_t ifp, u_long command, caddr_t data)
1140{
1141 struct adapter *adapter = if_getsoftc(ifp);
1142 struct ifreq *ifr = (struct ifreq *)data;
1143#if defined(INET) || defined(INET6)
1144 struct ifaddr *ifa = (struct ifaddr *)data;
1145#endif
1146 bool avoid_reset = FALSE;
1147 int error = 0;
1148
1149 if (adapter->in_detach)
1150 return (error);
1151
1152 switch (command) {
1153 case SIOCSIFADDR:
1154#ifdef INET
1155 if (ifa->ifa_addr->sa_family == AF_INET)
1156 avoid_reset = TRUE;
1157#endif
1158#ifdef INET6
1159 if (ifa->ifa_addr->sa_family == AF_INET6)
1160 avoid_reset = TRUE;
1161#endif
1162 /*
1163 ** Calling init results in link renegotiation,
1164 ** so we avoid doing it when possible.
1165 */
1166 if (avoid_reset) {
1167 if_setflagbits(ifp,IFF_UP,0);
1168 if (!(if_getdrvflags(ifp)& IFF_DRV_RUNNING))
1169 em_init(adapter);
1170#ifdef INET
1171 if (!(if_getflags(ifp) & IFF_NOARP))
1172 arp_ifinit(ifp, ifa);
1173#endif
1174 } else
1175 error = ether_ioctl(ifp, command, data);
1176 break;
1177 case SIOCSIFMTU:
1178 {
1179 int max_frame_size;
1180
1181 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1182
1183 EM_CORE_LOCK(adapter);
1184 switch (adapter->hw.mac.type) {
1185 case e1000_82571:
1186 case e1000_82572:
1187 case e1000_ich9lan:
1188 case e1000_ich10lan:
1189 case e1000_pch2lan:
1190 case e1000_pch_lpt:
1191 case e1000_pch_spt:
1192 case e1000_82574:
1193 case e1000_82583:
1194 case e1000_80003es2lan: /* 9K Jumbo Frame size */
1195 max_frame_size = 9234;
1196 break;
1197 case e1000_pchlan:
1198 max_frame_size = 4096;
1199 break;
1200 /* Adapters that do not support jumbo frames */
1201 case e1000_ich8lan:
1202 max_frame_size = ETHER_MAX_LEN;
1203 break;
1204 default:
1205 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1206 }
1207 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1208 ETHER_CRC_LEN) {
1209 EM_CORE_UNLOCK(adapter);
1210 error = EINVAL;
1211 break;
1212 }
1213
1214 if_setmtu(ifp, ifr->ifr_mtu);
1215 adapter->hw.mac.max_frame_size =
1216 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1217 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1218 em_init_locked(adapter);
1219 EM_CORE_UNLOCK(adapter);
1220 break;
1221 }
1222 case SIOCSIFFLAGS:
1223 IOCTL_DEBUGOUT("ioctl rcv'd:\
1224 SIOCSIFFLAGS (Set Interface Flags)");
1225 EM_CORE_LOCK(adapter);
1226 if (if_getflags(ifp) & IFF_UP) {
1227 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1228 if ((if_getflags(ifp) ^ adapter->if_flags) &
1229 (IFF_PROMISC | IFF_ALLMULTI)) {
1230 em_disable_promisc(adapter);
1231 em_set_promisc(adapter);
1232 }
1233 } else
1234 em_init_locked(adapter);
1235 } else
1236 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1237 em_stop(adapter);
1238 adapter->if_flags = if_getflags(ifp);
1239 EM_CORE_UNLOCK(adapter);
1240 break;
1241 case SIOCADDMULTI:
1242 case SIOCDELMULTI:
1243 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1244 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1245 EM_CORE_LOCK(adapter);
1246 em_disable_intr(adapter);
1247 em_set_multi(adapter);
1248#ifdef DEVICE_POLLING
1249 if (!(if_getcapenable(ifp) & IFCAP_POLLING))
1250#endif
1251 em_enable_intr(adapter);
1252 EM_CORE_UNLOCK(adapter);
1253 }
1254 break;
1255 case SIOCSIFMEDIA:
1256 /* Check SOL/IDER usage */
1257 EM_CORE_LOCK(adapter);
1258 if (e1000_check_reset_block(&adapter->hw)) {
1259 EM_CORE_UNLOCK(adapter);
1260 device_printf(adapter->dev, "Media change is"
1261 " blocked due to SOL/IDER session.\n");
1262 break;
1263 }
1264 EM_CORE_UNLOCK(adapter);
1265 /* falls thru */
1266 case SIOCGIFMEDIA:
1267 IOCTL_DEBUGOUT("ioctl rcv'd: \
1268 SIOCxIFMEDIA (Get/Set Interface Media)");
1269 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1270 break;
1271 case SIOCSIFCAP:
1272 {
1273 int mask, reinit;
1274
1275 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1276 reinit = 0;
1277 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1278#ifdef DEVICE_POLLING
1279 if (mask & IFCAP_POLLING) {
1280 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1281 error = ether_poll_register(em_poll, ifp);
1282 if (error)
1283 return (error);
1284 EM_CORE_LOCK(adapter);
1285 em_disable_intr(adapter);
1286 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1287 EM_CORE_UNLOCK(adapter);
1288 } else {
1289 error = ether_poll_deregister(ifp);
1290 /* Enable interrupt even in error case */
1291 EM_CORE_LOCK(adapter);
1292 em_enable_intr(adapter);
1293 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1294 EM_CORE_UNLOCK(adapter);
1295 }
1296 }
1297#endif
1298 if (mask & IFCAP_HWCSUM) {
1299 if_togglecapenable(ifp,IFCAP_HWCSUM);
1300 reinit = 1;
1301 }
1302 if (mask & IFCAP_TSO4) {
1303 if_togglecapenable(ifp,IFCAP_TSO4);
1304 reinit = 1;
1305 }
1306 if (mask & IFCAP_VLAN_HWTAGGING) {
1307 if_togglecapenable(ifp,IFCAP_VLAN_HWTAGGING);
1308 reinit = 1;
1309 }
1310 if (mask & IFCAP_VLAN_HWFILTER) {
1311 if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
1312 reinit = 1;
1313 }
1314 if (mask & IFCAP_VLAN_HWTSO) {
1315 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
1316 reinit = 1;
1317 }
1318 if ((mask & IFCAP_WOL) &&
1319 (if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
1320 if (mask & IFCAP_WOL_MCAST)
1321 if_togglecapenable(ifp, IFCAP_WOL_MCAST);
1322 if (mask & IFCAP_WOL_MAGIC)
1323 if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
1324 }
1325 if (reinit && (if_getdrvflags(ifp) & IFF_DRV_RUNNING))
1326 em_init(adapter);
1327 if_vlancap(ifp);
1328 break;
1329 }
1330
1331 default:
1332 error = ether_ioctl(ifp, command, data);
1333 break;
1334 }
1335
1336 return (error);
1337}
1338
1339
1340/*********************************************************************
1341 * Init entry point
1342 *
1343 * This routine is used in two ways. It is used by the stack as
1344 * init entry point in network interface structure. It is also used
1345 * by the driver as a hw/sw initialization routine to get to a
1346 * consistent state.
1347 *
1348 * return 0 on success, positive on failure
1349 **********************************************************************/
1350
1351static void
1113 * Init entry point
1114 *
1115 * This routine is used in two ways. It is used by the stack as
1116 * init entry point in network interface structure. It is also used
1117 * by the driver as a hw/sw initialization routine to get to a
1118 * consistent state.
1119 *
1120 * return 0 on success, positive on failure
1121 **********************************************************************/
1122
1123static void
1352em_init_locked(struct adapter *adapter)
1124em_if_init(if_ctx_t ctx)
1353{
1125{
1354 if_t ifp = adapter->ifp;
1355 device_t dev = adapter->dev;
1126 struct adapter *adapter = iflib_get_softc(ctx);
1127 struct ifnet *ifp = iflib_get_ifp(ctx);
1356
1128
1357 INIT_DEBUGOUT("em_init: begin");
1129 INIT_DEBUGOUT("em_if_init: begin");
1358
1130
1359 EM_CORE_LOCK_ASSERT(adapter);
1360
1361 em_disable_intr(adapter);
1362 callout_stop(&adapter->timer);
1363
1364 /* Get the latest mac address, User can use a LAA */
1131 /* Get the latest mac address, User can use a LAA */
1365 bcopy(if_getlladdr(adapter->ifp), adapter->hw.mac.addr,
1132 bcopy(if_getlladdr(ifp), adapter->hw.mac.addr,
1366 ETHER_ADDR_LEN);
1367
1368 /* Put the address into the Receive Address Array */
1369 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1370
1371 /*
1372 * With the 82571 adapter, RAR[0] may be overwritten
1373 * when the other port is reset, we make a duplicate
1374 * in RAR[14] for that eventuality, this assures
1375 * the interface continues to function.
1376 */
1377 if (adapter->hw.mac.type == e1000_82571) {
1378 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1379 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1380 E1000_RAR_ENTRIES - 1);
1381 }
1382
1383 /* Initialize the hardware */
1133 ETHER_ADDR_LEN);
1134
1135 /* Put the address into the Receive Address Array */
1136 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1137
1138 /*
1139 * With the 82571 adapter, RAR[0] may be overwritten
1140 * when the other port is reset, we make a duplicate
1141 * in RAR[14] for that eventuality, this assures
1142 * the interface continues to function.
1143 */
1144 if (adapter->hw.mac.type == e1000_82571) {
1145 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1146 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1147 E1000_RAR_ENTRIES - 1);
1148 }
1149
1150 /* Initialize the hardware */
1384 em_reset(adapter);
1385 em_update_link_status(adapter);
1151 em_reset(ctx);
1152 em_if_update_admin_status(ctx);
1386
1387 /* Setup VLAN support, basic and offload if available */
1388 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1389
1153
1154 /* Setup VLAN support, basic and offload if available */
1155 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1156
1390 /* Set hardware offload abilities */
1391 if_clearhwassist(ifp);
1392 if (if_getcapenable(ifp) & IFCAP_TXCSUM)
1393 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
1157 /* Clear bad data from Rx FIFOs */
1158 if (adapter->hw.mac.type >= igb_mac_min)
1159 e1000_rx_fifo_flush_82575(&adapter->hw);
1394
1160
1395 if (if_getcapenable(ifp) & IFCAP_TSO4)
1396 if_sethwassistbits(ifp, CSUM_TSO, 0);
1397
1398 /* Configure for OS presence */
1399 em_init_manageability(adapter);
1400
1401 /* Prepare transmit descriptors and buffers */
1161 /* Configure for OS presence */
1162 em_init_manageability(adapter);
1163
1164 /* Prepare transmit descriptors and buffers */
1402 em_setup_transmit_structures(adapter);
1403 em_initialize_transmit_unit(adapter);
1165 em_initialize_transmit_unit(ctx);
1404
1405 /* Setup Multicast table */
1166
1167 /* Setup Multicast table */
1406 em_set_multi(adapter);
1168 em_if_multi_set(ctx);
1407
1408 /*
1409 ** Figure out the desired mbuf
1410 ** pool for doing jumbos
1411 */
1412 if (adapter->hw.mac.max_frame_size <= 2048)
1413 adapter->rx_mbuf_sz = MCLBYTES;
1414 else if (adapter->hw.mac.max_frame_size <= 4096)
1415 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1416 else
1417 adapter->rx_mbuf_sz = MJUM9BYTES;
1418
1169
1170 /*
1171 ** Figure out the desired mbuf
1172 ** pool for doing jumbos
1173 */
1174 if (adapter->hw.mac.max_frame_size <= 2048)
1175 adapter->rx_mbuf_sz = MCLBYTES;
1176 else if (adapter->hw.mac.max_frame_size <= 4096)
1177 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1178 else
1179 adapter->rx_mbuf_sz = MJUM9BYTES;
1180
1419 /* Prepare receive descriptors and buffers */
1420 if (em_setup_receive_structures(adapter)) {
1421 device_printf(dev, "Could not setup receive structures\n");
1422 em_stop(adapter);
1423 return;
1424 }
1425 em_initialize_receive_unit(adapter);
1181 em_initialize_receive_unit(ctx);
1426
1427 /* Use real VLAN Filter support? */
1428 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1429 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
1430 /* Use real VLAN Filter support */
1431 em_setup_vlan_hw_support(adapter);
1432 else {
1433 u32 ctrl;
1434 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1435 ctrl |= E1000_CTRL_VME;
1436 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1437 }
1438 }
1439
1440 /* Don't lose promiscuous settings */
1182
1183 /* Use real VLAN Filter support? */
1184 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1185 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
1186 /* Use real VLAN Filter support */
1187 em_setup_vlan_hw_support(adapter);
1188 else {
1189 u32 ctrl;
1190 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1191 ctrl |= E1000_CTRL_VME;
1192 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1193 }
1194 }
1195
1196 /* Don't lose promiscuous settings */
1441 em_set_promisc(adapter);
1442
1443 /* Set the interface as ACTIVE */
1444 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1445
1446 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1197 em_if_set_promisc(ctx, IFF_PROMISC);
1447 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1448
1449 /* MSI/X configuration for 82574 */
1450 if (adapter->hw.mac.type == e1000_82574) {
1198 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1199
1200 /* MSI/X configuration for 82574 */
1201 if (adapter->hw.mac.type == e1000_82574) {
1451 int tmp;
1452 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1202 int tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1203
1453 tmp |= E1000_CTRL_EXT_PBA_CLR;
1454 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1455 /* Set the IVAR - interrupt vector routing. */
1456 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, adapter->ivars);
1204 tmp |= E1000_CTRL_EXT_PBA_CLR;
1205 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1206 /* Set the IVAR - interrupt vector routing. */
1207 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, adapter->ivars);
1457 }
1208 } else if (adapter->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */
1209 igb_configure_queues(adapter);
1458
1210
1459#ifdef DEVICE_POLLING
1460 /*
1461 * Only enable interrupts if we are not polling, make sure
1462 * they are off otherwise.
1463 */
1464 if (if_getcapenable(ifp) & IFCAP_POLLING)
1465 em_disable_intr(adapter);
1466 else
1467#endif /* DEVICE_POLLING */
1468 em_enable_intr(adapter);
1211 /* this clears any pending interrupts */
1212 E1000_READ_REG(&adapter->hw, E1000_ICR);
1213 E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
1469
1470 /* AMT based hardware can now take control from firmware */
1471 if (adapter->has_manage && adapter->has_amt)
1472 em_get_hw_control(adapter);
1214
1215 /* AMT based hardware can now take control from firmware */
1216 if (adapter->has_manage && adapter->has_amt)
1217 em_get_hw_control(adapter);
1473}
1474
1218
1475static void
1476em_init(void *arg)
1477{
1478 struct adapter *adapter = arg;
1479
1480 EM_CORE_LOCK(adapter);
1481 em_init_locked(adapter);
1482 EM_CORE_UNLOCK(adapter);
1483}
1484
1485
1486#ifdef DEVICE_POLLING
1487/*********************************************************************
1488 *
1489 * Legacy polling routine: note this only works with single queue
1490 *
1491 *********************************************************************/
1492static int
1493em_poll(if_t ifp, enum poll_cmd cmd, int count)
1494{
1495 struct adapter *adapter = if_getsoftc(ifp);
1496 struct tx_ring *txr = adapter->tx_rings;
1497 struct rx_ring *rxr = adapter->rx_rings;
1498 u32 reg_icr;
1499 int rx_done;
1500
1501 EM_CORE_LOCK(adapter);
1502 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1503 EM_CORE_UNLOCK(adapter);
1504 return (0);
1219 /* Set Energy Efficient Ethernet */
1220 if (adapter->hw.mac.type >= igb_mac_min &&
1221 adapter->hw.phy.media_type == e1000_media_type_copper) {
1222 if (adapter->hw.mac.type == e1000_i354)
1223 e1000_set_eee_i354(&adapter->hw, TRUE, TRUE);
1224 else
1225 e1000_set_eee_i350(&adapter->hw, TRUE, TRUE);
1505 }
1226 }
1506
1507 if (cmd == POLL_AND_CHECK_STATUS) {
1508 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1509 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1510 callout_stop(&adapter->timer);
1511 adapter->hw.mac.get_link_status = 1;
1512 em_update_link_status(adapter);
1513 callout_reset(&adapter->timer, hz,
1514 em_local_timer, adapter);
1515 }
1516 }
1517 EM_CORE_UNLOCK(adapter);
1518
1519 em_rxeof(rxr, count, &rx_done);
1520
1521 EM_TX_LOCK(txr);
1522 em_txeof(txr);
1523#ifdef EM_MULTIQUEUE
1524 if (!drbr_empty(ifp, txr->br))
1525 em_mq_start_locked(ifp, txr);
1526#else
1527 if (!if_sendq_empty(ifp))
1528 em_start_locked(ifp, txr);
1529#endif
1530 EM_TX_UNLOCK(txr);
1531
1532 return (rx_done);
1533}
1227}
1534#endif /* DEVICE_POLLING */
1535
1228
1536
1537/*********************************************************************
1538 *
1539 * Fast Legacy/MSI Combined Interrupt Service routine
1540 *
1541 *********************************************************************/
1229/*********************************************************************
1230 *
1231 * Fast Legacy/MSI Combined Interrupt Service routine
1232 *
1233 *********************************************************************/
1542static int
1543em_irq_fast(void *arg)
1234int
1235em_intr(void *arg)
1544{
1236{
1545 struct adapter *adapter = arg;
1546 if_t ifp;
1237 struct adapter *adapter = arg;
1238 if_ctx_t ctx = adapter->ctx;
1547 u32 reg_icr;
1548
1239 u32 reg_icr;
1240
1549 ifp = adapter->ifp;
1550
1551 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1552
1241 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1242
1243 if (adapter->intr_type != IFLIB_INTR_LEGACY)
1244 goto skip_stray;
1553 /* Hot eject? */
1554 if (reg_icr == 0xffffffff)
1555 return FILTER_STRAY;
1556
1557 /* Definitely not our interrupt. */
1558 if (reg_icr == 0x0)
1559 return FILTER_STRAY;
1560
1561 /*
1562 * Starting with the 82571 chip, bit 31 should be used to
1563 * determine whether the interrupt belongs to us.
1564 */
1565 if (adapter->hw.mac.type >= e1000_82571 &&
1566 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1567 return FILTER_STRAY;
1568
1245 /* Hot eject? */
1246 if (reg_icr == 0xffffffff)
1247 return FILTER_STRAY;
1248
1249 /* Definitely not our interrupt. */
1250 if (reg_icr == 0x0)
1251 return FILTER_STRAY;
1252
1253 /*
1254 * Starting with the 82571 chip, bit 31 should be used to
1255 * determine whether the interrupt belongs to us.
1256 */
1257 if (adapter->hw.mac.type >= e1000_82571 &&
1258 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1259 return FILTER_STRAY;
1260
1569 em_disable_intr(adapter);
1570 taskqueue_enqueue(adapter->tq, &adapter->que_task);
1571
1261skip_stray:
1572 /* Link status change */
1573 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1574 adapter->hw.mac.get_link_status = 1;
1262 /* Link status change */
1263 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1264 adapter->hw.mac.get_link_status = 1;
1575 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1265 iflib_admin_intr_deferred(ctx);
1576 }
1577
1578 if (reg_icr & E1000_ICR_RXO)
1579 adapter->rx_overruns++;
1266 }
1267
1268 if (reg_icr & E1000_ICR_RXO)
1269 adapter->rx_overruns++;
1580 return FILTER_HANDLED;
1270
1271 return (FILTER_SCHEDULE_THREAD);
1581}
1582
1272}
1273
1583/* Combined RX/TX handler, used by Legacy and MSI */
1584static void
1274static void
1585em_handle_que(void *context, int pending)
1275igb_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq)
1586{
1276{
1587 struct adapter *adapter = context;
1588 if_t ifp = adapter->ifp;
1589 struct tx_ring *txr = adapter->tx_rings;
1590 struct rx_ring *rxr = adapter->rx_rings;
1591
1592 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1593 bool more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
1594
1595 EM_TX_LOCK(txr);
1596 em_txeof(txr);
1597#ifdef EM_MULTIQUEUE
1598 if (!drbr_empty(ifp, txr->br))
1599 em_mq_start_locked(ifp, txr);
1600#else
1601 if (!if_sendq_empty(ifp))
1602 em_start_locked(ifp, txr);
1603#endif
1604 EM_TX_UNLOCK(txr);
1605 if (more) {
1606 taskqueue_enqueue(adapter->tq, &adapter->que_task);
1607 return;
1608 }
1609 }
1610
1611 em_enable_intr(adapter);
1612 return;
1277 E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxq->eims);
1613}
1614
1278}
1279
1615
1616/*********************************************************************
1617 *
1618 * MSIX Interrupt Service Routines
1619 *
1620 **********************************************************************/
1621static void
1280static void
1622em_msix_tx(void *arg)
1281em_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq)
1623{
1282{
1624 struct tx_ring *txr = arg;
1625 struct adapter *adapter = txr->adapter;
1626 if_t ifp = adapter->ifp;
1283 E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxq->eims);
1284}
1627
1285
1628 ++txr->tx_irq;
1629 EM_TX_LOCK(txr);
1630 em_txeof(txr);
1631#ifdef EM_MULTIQUEUE
1632 if (!drbr_empty(ifp, txr->br))
1633 em_mq_start_locked(ifp, txr);
1634#else
1635 if (!if_sendq_empty(ifp))
1636 em_start_locked(ifp, txr);
1637#endif
1638
1639 /* Reenable this interrupt */
1640 E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
1641 EM_TX_UNLOCK(txr);
1642 return;
1286static int
1287em_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1288{
1289 struct adapter *adapter = iflib_get_softc(ctx);
1290 struct em_rx_queue *rxq = &adapter->rx_queues[rxqid];
1291
1292 if (adapter->hw.mac.type >= igb_mac_min)
1293 igb_enable_queue(adapter, rxq);
1294 else
1295 em_enable_queue(adapter, rxq);
1296 return (0);
1643}
1644
1645/*********************************************************************
1646 *
1647 * MSIX RX Interrupt Service routine
1648 *
1649 **********************************************************************/
1297}
1298
1299/*********************************************************************
1300 *
1301 * MSIX RX Interrupt Service routine
1302 *
1303 **********************************************************************/
1650
1651static void
1652em_msix_rx(void *arg)
1304static int
1305em_msix_que(void *arg)
1653{
1306{
1654 struct rx_ring *rxr = arg;
1655 struct adapter *adapter = rxr->adapter;
1656 bool more;
1657
1658 ++rxr->rx_irq;
1659 if (!(if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING))
1660 return;
1661 more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
1662 if (more)
1663 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1664 else {
1665 /* Reenable this interrupt */
1666 E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims);
1667 }
1668 return;
1307 struct em_rx_queue *que = arg;
1308
1309 ++que->irqs;
1310
1311 return (FILTER_SCHEDULE_THREAD);
1669}
1670
1671/*********************************************************************
1672 *
1673 * MSIX Link Fast Interrupt Service routine
1674 *
1675 **********************************************************************/
1312}
1313
1314/*********************************************************************
1315 *
1316 * MSIX Link Fast Interrupt Service routine
1317 *
1318 **********************************************************************/
1676static void
1319static int
1677em_msix_link(void *arg)
1678{
1679 struct adapter *adapter = arg;
1680 u32 reg_icr;
1681
1682 ++adapter->link_irq;
1320em_msix_link(void *arg)
1321{
1322 struct adapter *adapter = arg;
1323 u32 reg_icr;
1324
1325 ++adapter->link_irq;
1326 MPASS(adapter->hw.back != NULL);
1683 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1684
1685 if (reg_icr & E1000_ICR_RXO)
1686 adapter->rx_overruns++;
1687
1688 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1327 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1328
1329 if (reg_icr & E1000_ICR_RXO)
1330 adapter->rx_overruns++;
1331
1332 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1689 adapter->hw.mac.get_link_status = 1;
1690 em_handle_link(adapter, 0);
1691 } else
1333 em_handle_link(adapter->ctx);
1334 } else {
1692 E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1335 E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1693 EM_MSIX_LINK | E1000_IMS_LSC);
1336 EM_MSIX_LINK | E1000_IMS_LSC);
1337 if (adapter->hw.mac.type >= igb_mac_min)
1338 E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask);
1339
1340 }
1341
1694 /*
1695 ** Because we must read the ICR for this interrupt
1696 ** it may clear other causes using autoclear, for
1697 ** this reason we simply create a soft interrupt
1698 ** for all these vectors.
1699 */
1342 /*
1343 ** Because we must read the ICR for this interrupt
1344 ** it may clear other causes using autoclear, for
1345 ** this reason we simply create a soft interrupt
1346 ** for all these vectors.
1347 */
1700 if (reg_icr) {
1348 if (reg_icr && adapter->hw.mac.type < igb_mac_min) {
1701 E1000_WRITE_REG(&adapter->hw,
1702 E1000_ICS, adapter->ims);
1703 }
1349 E1000_WRITE_REG(&adapter->hw,
1350 E1000_ICS, adapter->ims);
1351 }
1704 return;
1705}
1706
1352
1707static void
1708em_handle_rx(void *context, int pending)
1709{
1710 struct rx_ring *rxr = context;
1711 struct adapter *adapter = rxr->adapter;
1712 bool more;
1713
1714 more = em_rxeof(rxr, adapter->rx_process_limit, NULL);
1715 if (more)
1716 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1717 else {
1718 /* Reenable this interrupt */
1719 E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims);
1720 }
1353 return (FILTER_HANDLED);
1721}
1722
1723static void
1354}
1355
1356static void
1724em_handle_tx(void *context, int pending)
1357em_handle_link(void *context)
1725{
1358{
1726 struct tx_ring *txr = context;
1727 struct adapter *adapter = txr->adapter;
1728 if_t ifp = adapter->ifp;
1359 if_ctx_t ctx = context;
1360 struct adapter *adapter = iflib_get_softc(ctx);
1729
1361
1730 EM_TX_LOCK(txr);
1731 em_txeof(txr);
1732#ifdef EM_MULTIQUEUE
1733 if (!drbr_empty(ifp, txr->br))
1734 em_mq_start_locked(ifp, txr);
1735#else
1736 if (!if_sendq_empty(ifp))
1737 em_start_locked(ifp, txr);
1738#endif
1739 E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims);
1740 EM_TX_UNLOCK(txr);
1362 adapter->hw.mac.get_link_status = 1;
1363 iflib_admin_intr_deferred(ctx);
1741}
1742
1364}
1365
1743static void
1744em_handle_link(void *context, int pending)
1745{
1746 struct adapter *adapter = context;
1747 struct tx_ring *txr = adapter->tx_rings;
1748 if_t ifp = adapter->ifp;
1749
1366
1750 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
1751 return;
1752
1753 EM_CORE_LOCK(adapter);
1754 callout_stop(&adapter->timer);
1755 em_update_link_status(adapter);
1756 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1757 E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1758 EM_MSIX_LINK | E1000_IMS_LSC);
1759 if (adapter->link_active) {
1760 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1761 EM_TX_LOCK(txr);
1762#ifdef EM_MULTIQUEUE
1763 if (!drbr_empty(ifp, txr->br))
1764 em_mq_start_locked(ifp, txr);
1765#else
1766 if (if_sendq_empty(ifp))
1767 em_start_locked(ifp, txr);
1768#endif
1769 EM_TX_UNLOCK(txr);
1770 }
1771 }
1772 EM_CORE_UNLOCK(adapter);
1773}
1774
1775
1776/*********************************************************************
1777 *
1778 * Media Ioctl callback
1779 *
1780 * This routine is called whenever the user queries the status of
1781 * the interface using ifconfig.
1782 *
1783 **********************************************************************/
1784static void
1367/*********************************************************************
1368 *
1369 * Media Ioctl callback
1370 *
1371 * This routine is called whenever the user queries the status of
1372 * the interface using ifconfig.
1373 *
1374 **********************************************************************/
1375static void
1785em_media_status(if_t ifp, struct ifmediareq *ifmr)
1376em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1786{
1377{
1787 struct adapter *adapter = if_getsoftc(ifp);
1788 u_char fiber_type = IFM_1000_SX;
1378 struct adapter *adapter = iflib_get_softc(ctx);
1379 u_char fiber_type = IFM_1000_SX;
1380
1381 INIT_DEBUGOUT("em_if_media_status: begin");
1789
1382
1790 INIT_DEBUGOUT("em_media_status: begin");
1383 iflib_admin_intr_deferred(ctx);
1791
1384
1792 EM_CORE_LOCK(adapter);
1793 em_update_link_status(adapter);
1794
1795 ifmr->ifm_status = IFM_AVALID;
1796 ifmr->ifm_active = IFM_ETHER;
1797
1798 if (!adapter->link_active) {
1385 ifmr->ifm_status = IFM_AVALID;
1386 ifmr->ifm_active = IFM_ETHER;
1387
1388 if (!adapter->link_active) {
1799 EM_CORE_UNLOCK(adapter);
1800 return;
1801 }
1802
1803 ifmr->ifm_status |= IFM_ACTIVE;
1804
1805 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1806 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1389 return;
1390 }
1391
1392 ifmr->ifm_status |= IFM_ACTIVE;
1393
1394 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1395 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1396 if (adapter->hw.mac.type == e1000_82545)
1397 fiber_type = IFM_1000_LX;
1807 ifmr->ifm_active |= fiber_type | IFM_FDX;
1808 } else {
1809 switch (adapter->link_speed) {
1810 case 10:
1811 ifmr->ifm_active |= IFM_10_T;
1812 break;
1813 case 100:
1814 ifmr->ifm_active |= IFM_100_TX;
1815 break;
1816 case 1000:
1817 ifmr->ifm_active |= IFM_1000_T;
1818 break;
1819 }
1820 if (adapter->link_duplex == FULL_DUPLEX)
1821 ifmr->ifm_active |= IFM_FDX;
1822 else
1823 ifmr->ifm_active |= IFM_HDX;
1824 }
1398 ifmr->ifm_active |= fiber_type | IFM_FDX;
1399 } else {
1400 switch (adapter->link_speed) {
1401 case 10:
1402 ifmr->ifm_active |= IFM_10_T;
1403 break;
1404 case 100:
1405 ifmr->ifm_active |= IFM_100_TX;
1406 break;
1407 case 1000:
1408 ifmr->ifm_active |= IFM_1000_T;
1409 break;
1410 }
1411 if (adapter->link_duplex == FULL_DUPLEX)
1412 ifmr->ifm_active |= IFM_FDX;
1413 else
1414 ifmr->ifm_active |= IFM_HDX;
1415 }
1825 EM_CORE_UNLOCK(adapter);
1826}
1827
1828/*********************************************************************
1829 *
1830 * Media Ioctl callback
1831 *
1832 * This routine is called when the user changes speed/duplex using
1833 * media/mediopt option with ifconfig.
1834 *
1835 **********************************************************************/
1836static int
1416}
1417
1418/*********************************************************************
1419 *
1420 * Media Ioctl callback
1421 *
1422 * This routine is called when the user changes speed/duplex using
1423 * media/mediopt option with ifconfig.
1424 *
1425 **********************************************************************/
1426static int
1837em_media_change(if_t ifp)
1427em_if_media_change(if_ctx_t ctx)
1838{
1428{
1839 struct adapter *adapter = if_getsoftc(ifp);
1840 struct ifmedia *ifm = &adapter->media;
1429 struct adapter *adapter = iflib_get_softc(ctx);
1430 struct ifmedia *ifm = iflib_get_media(ctx);
1841
1431
1842 INIT_DEBUGOUT("em_media_change: begin");
1432 INIT_DEBUGOUT("em_if_media_change: begin");
1843
1844 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1845 return (EINVAL);
1846
1433
1434 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1435 return (EINVAL);
1436
1847 EM_CORE_LOCK(adapter);
1848 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1849 case IFM_AUTO:
1850 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1851 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1852 break;
1853 case IFM_1000_LX:
1854 case IFM_1000_SX:
1855 case IFM_1000_T:

--- 15 unchanged lines hidden (view full) ---

1871 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1872 else
1873 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1874 break;
1875 default:
1876 device_printf(adapter->dev, "Unsupported media type\n");
1877 }
1878
1437 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1438 case IFM_AUTO:
1439 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1440 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1441 break;
1442 case IFM_1000_LX:
1443 case IFM_1000_SX:
1444 case IFM_1000_T:

--- 15 unchanged lines hidden (view full) ---

1460 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1461 else
1462 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1463 break;
1464 default:
1465 device_printf(adapter->dev, "Unsupported media type\n");
1466 }
1467
1879 em_init_locked(adapter);
1880 EM_CORE_UNLOCK(adapter);
1468 em_if_init(ctx);
1881
1882 return (0);
1883}
1884
1469
1470 return (0);
1471}
1472
1885/*********************************************************************
1886 *
1887 * This routine maps the mbufs to tx descriptors.
1888 *
1889 * return 0 on success, positive on failure
1890 **********************************************************************/
1891
1892static int
1473static int
1893em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1474em_if_set_promisc(if_ctx_t ctx, int flags)
1894{
1475{
1895 struct adapter *adapter = txr->adapter;
1896 bus_dma_segment_t segs[EM_MAX_SCATTER];
1897 bus_dmamap_t map;
1898 struct em_txbuffer *tx_buffer, *tx_buffer_mapped;
1899 struct e1000_tx_desc *ctxd = NULL;
1900 struct mbuf *m_head;
1901 struct ether_header *eh;
1902 struct ip *ip = NULL;
1903 struct tcphdr *tp = NULL;
1904 u32 txd_upper = 0, txd_lower = 0;
1905 int ip_off, poff;
1906 int nsegs, i, j, first, last = 0;
1907 int error;
1908 bool do_tso, tso_desc, remap = TRUE;
1909
1910 m_head = *m_headp;
1911 do_tso = (m_head->m_pkthdr.csum_flags & CSUM_TSO);
1912 tso_desc = FALSE;
1913 ip_off = poff = 0;
1914
1915 /*
1916 * Intel recommends entire IP/TCP header length reside in a single
1917 * buffer. If multiple descriptors are used to describe the IP and
1918 * TCP header, each descriptor should describe one or more
1919 * complete headers; descriptors referencing only parts of headers
1920 * are not supported. If all layer headers are not coalesced into
1921 * a single buffer, each buffer should not cross a 4KB boundary,
1922 * or be larger than the maximum read request size.
1923 * Controller also requires modifing IP/TCP header to make TSO work
1924 * so we firstly get a writable mbuf chain then coalesce ethernet/
1925 * IP/TCP header into a single buffer to meet the requirement of
1926 * controller. This also simplifies IP/TCP/UDP checksum offloading
1927 * which also has similar restrictions.
1928 */
1929 if (do_tso || m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
1930 if (do_tso || (m_head->m_next != NULL &&
1931 m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)) {
1932 if (M_WRITABLE(*m_headp) == 0) {
1933 m_head = m_dup(*m_headp, M_NOWAIT);
1934 m_freem(*m_headp);
1935 if (m_head == NULL) {
1936 *m_headp = NULL;
1937 return (ENOBUFS);
1938 }
1939 *m_headp = m_head;
1940 }
1941 }
1942 /*
1943 * XXX
1944 * Assume IPv4, we don't have TSO/checksum offload support
1945 * for IPv6 yet.
1946 */
1947 ip_off = sizeof(struct ether_header);
1948 if (m_head->m_len < ip_off) {
1949 m_head = m_pullup(m_head, ip_off);
1950 if (m_head == NULL) {
1951 *m_headp = NULL;
1952 return (ENOBUFS);
1953 }
1954 }
1955 eh = mtod(m_head, struct ether_header *);
1956 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1957 ip_off = sizeof(struct ether_vlan_header);
1958 if (m_head->m_len < ip_off) {
1959 m_head = m_pullup(m_head, ip_off);
1960 if (m_head == NULL) {
1961 *m_headp = NULL;
1962 return (ENOBUFS);
1963 }
1964 }
1965 }
1966 if (m_head->m_len < ip_off + sizeof(struct ip)) {
1967 m_head = m_pullup(m_head, ip_off + sizeof(struct ip));
1968 if (m_head == NULL) {
1969 *m_headp = NULL;
1970 return (ENOBUFS);
1971 }
1972 }
1973 ip = (struct ip *)(mtod(m_head, char *) + ip_off);
1974 poff = ip_off + (ip->ip_hl << 2);
1975
1976 if (do_tso || (m_head->m_pkthdr.csum_flags & CSUM_TCP)) {
1977 if (m_head->m_len < poff + sizeof(struct tcphdr)) {
1978 m_head = m_pullup(m_head, poff +
1979 sizeof(struct tcphdr));
1980 if (m_head == NULL) {
1981 *m_headp = NULL;
1982 return (ENOBUFS);
1983 }
1984 }
1985 tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
1986 /*
1987 * TSO workaround:
1988 * pull 4 more bytes of data into it.
1989 */
1990 if (m_head->m_len < poff + (tp->th_off << 2)) {
1991 m_head = m_pullup(m_head, poff +
1992 (tp->th_off << 2) +
1993 TSO_WORKAROUND);
1994 if (m_head == NULL) {
1995 *m_headp = NULL;
1996 return (ENOBUFS);
1997 }
1998 }
1999 ip = (struct ip *)(mtod(m_head, char *) + ip_off);
2000 tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
2001 if (do_tso) {
2002 ip->ip_len = htons(m_head->m_pkthdr.tso_segsz +
2003 (ip->ip_hl << 2) +
2004 (tp->th_off << 2));
2005 ip->ip_sum = 0;
2006 /*
2007 * The pseudo TCP checksum does not include TCP
2008 * payload length so driver should recompute
2009 * the checksum here what hardware expect to
2010 * see. This is adherence of Microsoft's Large
2011 * Send specification.
2012 */
2013 tp->th_sum = in_pseudo(ip->ip_src.s_addr,
2014 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2015 }
2016 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
2017 if (m_head->m_len < poff + sizeof(struct udphdr)) {
2018 m_head = m_pullup(m_head, poff +
2019 sizeof(struct udphdr));
2020 if (m_head == NULL) {
2021 *m_headp = NULL;
2022 return (ENOBUFS);
2023 }
2024 }
2025 ip = (struct ip *)(mtod(m_head, char *) + ip_off);
2026 }
2027 *m_headp = m_head;
2028 }
2029
2030 /*
2031 * Map the packet for DMA
2032 *
2033 * Capture the first descriptor index,
2034 * this descriptor will have the index
2035 * of the EOP which is the only one that
2036 * now gets a DONE bit writeback.
2037 */
2038 first = txr->next_avail_desc;
2039 tx_buffer = &txr->tx_buffers[first];
2040 tx_buffer_mapped = tx_buffer;
2041 map = tx_buffer->map;
2042
2043retry:
2044 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
2045 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2046
2047 /*
2048 * There are two types of errors we can (try) to handle:
2049 * - EFBIG means the mbuf chain was too long and bus_dma ran
2050 * out of segments. Defragment the mbuf chain and try again.
2051 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2052 * at this point in time. Defer sending and try again later.
2053 * All other errors, in particular EINVAL, are fatal and prevent the
2054 * mbuf chain from ever going through. Drop it and report error.
2055 */
2056 if (error == EFBIG && remap) {
2057 struct mbuf *m;
2058
2059 m = m_collapse(*m_headp, M_NOWAIT, EM_MAX_SCATTER);
2060 if (m == NULL) {
2061 adapter->mbuf_defrag_failed++;
2062 m_freem(*m_headp);
2063 *m_headp = NULL;
2064 return (ENOBUFS);
2065 }
2066 *m_headp = m;
2067
2068 /* Try it again, but only once */
2069 remap = FALSE;
2070 goto retry;
2071 } else if (error != 0) {
2072 adapter->no_tx_dma_setup++;
2073 m_freem(*m_headp);
2074 *m_headp = NULL;
2075 return (error);
2076 }
2077
2078 /*
2079 * TSO Hardware workaround, if this packet is not
2080 * TSO, and is only a single descriptor long, and
2081 * it follows a TSO burst, then we need to add a
2082 * sentinel descriptor to prevent premature writeback.
2083 */
2084 if ((!do_tso) && (txr->tx_tso == TRUE)) {
2085 if (nsegs == 1)
2086 tso_desc = TRUE;
2087 txr->tx_tso = FALSE;
2088 }
2089
2090 if (txr->tx_avail < (nsegs + EM_MAX_SCATTER)) {
2091 txr->no_desc_avail++;
2092 bus_dmamap_unload(txr->txtag, map);
2093 return (ENOBUFS);
2094 }
2095 m_head = *m_headp;
2096
2097 /* Do hardware assists */
2098 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2099 em_tso_setup(txr, m_head, ip_off, ip, tp,
2100 &txd_upper, &txd_lower);
2101 /* we need to make a final sentinel transmit desc */
2102 tso_desc = TRUE;
2103 } else if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2104 em_transmit_checksum_setup(txr, m_head,
2105 ip_off, ip, &txd_upper, &txd_lower);
2106
2107 if (m_head->m_flags & M_VLANTAG) {
2108 /* Set the vlan id. */
2109 txd_upper |= htole16(if_getvtag(m_head)) << 16;
2110 /* Tell hardware to add tag */
2111 txd_lower |= htole32(E1000_TXD_CMD_VLE);
2112 }
2113
2114 i = txr->next_avail_desc;
2115
2116 /* Set up our transmit descriptors */
2117 for (j = 0; j < nsegs; j++) {
2118 bus_size_t seg_len;
2119 bus_addr_t seg_addr;
2120
2121 tx_buffer = &txr->tx_buffers[i];
2122 ctxd = &txr->tx_base[i];
2123 seg_addr = segs[j].ds_addr;
2124 seg_len = segs[j].ds_len;
2125 /*
2126 ** TSO Workaround:
2127 ** If this is the last descriptor, we want to
2128 ** split it so we have a small final sentinel
2129 */
2130 if (tso_desc && (j == (nsegs - 1)) && (seg_len > 8)) {
2131 seg_len -= TSO_WORKAROUND;
2132 ctxd->buffer_addr = htole64(seg_addr);
2133 ctxd->lower.data = htole32(
2134 adapter->txd_cmd | txd_lower | seg_len);
2135 ctxd->upper.data = htole32(txd_upper);
2136 if (++i == adapter->num_tx_desc)
2137 i = 0;
2138
2139 /* Now make the sentinel */
2140 txr->tx_avail--;
2141 ctxd = &txr->tx_base[i];
2142 tx_buffer = &txr->tx_buffers[i];
2143 ctxd->buffer_addr =
2144 htole64(seg_addr + seg_len);
2145 ctxd->lower.data = htole32(
2146 adapter->txd_cmd | txd_lower | TSO_WORKAROUND);
2147 ctxd->upper.data =
2148 htole32(txd_upper);
2149 last = i;
2150 if (++i == adapter->num_tx_desc)
2151 i = 0;
2152 } else {
2153 ctxd->buffer_addr = htole64(seg_addr);
2154 ctxd->lower.data = htole32(
2155 adapter->txd_cmd | txd_lower | seg_len);
2156 ctxd->upper.data = htole32(txd_upper);
2157 last = i;
2158 if (++i == adapter->num_tx_desc)
2159 i = 0;
2160 }
2161 tx_buffer->m_head = NULL;
2162 tx_buffer->next_eop = -1;
2163 }
2164
2165 txr->next_avail_desc = i;
2166 txr->tx_avail -= nsegs;
2167
2168 tx_buffer->m_head = m_head;
2169 /*
2170 ** Here we swap the map so the last descriptor,
2171 ** which gets the completion interrupt has the
2172 ** real map, and the first descriptor gets the
2173 ** unused map from this descriptor.
2174 */
2175 tx_buffer_mapped->map = tx_buffer->map;
2176 tx_buffer->map = map;
2177 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
2178
2179 /*
2180 * Last Descriptor of Packet
2181 * needs End Of Packet (EOP)
2182 * and Report Status (RS)
2183 */
2184 ctxd->lower.data |=
2185 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2186 /*
2187 * Keep track in the first buffer which
2188 * descriptor will be written back
2189 */
2190 tx_buffer = &txr->tx_buffers[first];
2191 tx_buffer->next_eop = last;
2192
2193 /*
2194 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2195 * that this frame is available to transmit.
2196 */
2197 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2198 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2199 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i);
2200
2201 return (0);
2202}
2203
2204static void
2205em_set_promisc(struct adapter *adapter)
2206{
2207 if_t ifp = adapter->ifp;
1476 struct adapter *adapter = iflib_get_softc(ctx);
2208 u32 reg_rctl;
2209
1477 u32 reg_rctl;
1478
1479 em_disable_promisc(ctx);
1480
2210 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2211
1481 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1482
2212 if (if_getflags(ifp) & IFF_PROMISC) {
1483 if (flags & IFF_PROMISC) {
2213 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2214 /* Turn this on if you want to see bad packets */
2215 if (em_debug_sbp)
2216 reg_rctl |= E1000_RCTL_SBP;
2217 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1484 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1485 /* Turn this on if you want to see bad packets */
1486 if (em_debug_sbp)
1487 reg_rctl |= E1000_RCTL_SBP;
1488 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2218 } else if (if_getflags(ifp) & IFF_ALLMULTI) {
1489 } else if (flags & IFF_ALLMULTI) {
2219 reg_rctl |= E1000_RCTL_MPE;
2220 reg_rctl &= ~E1000_RCTL_UPE;
2221 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2222 }
1490 reg_rctl |= E1000_RCTL_MPE;
1491 reg_rctl &= ~E1000_RCTL_UPE;
1492 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
1493 }
1494 return (0);
2223}
2224
2225static void
1495}
1496
1497static void
2226em_disable_promisc(struct adapter *adapter)
1498em_disable_promisc(if_ctx_t ctx)
2227{
1499{
2228 if_t ifp = adapter->ifp;
1500 struct adapter *adapter = iflib_get_softc(ctx);
1501 struct ifnet *ifp = iflib_get_ifp(ctx);
2229 u32 reg_rctl;
2230 int mcnt = 0;
2231
2232 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2233 reg_rctl &= (~E1000_RCTL_UPE);
2234 if (if_getflags(ifp) & IFF_ALLMULTI)
2235 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2236 else

--- 9 unchanged lines hidden (view full) ---

2246/*********************************************************************
2247 * Multicast Update
2248 *
2249 * This routine is called whenever multicast address list is updated.
2250 *
2251 **********************************************************************/
2252
2253static void
1502 u32 reg_rctl;
1503 int mcnt = 0;
1504
1505 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1506 reg_rctl &= (~E1000_RCTL_UPE);
1507 if (if_getflags(ifp) & IFF_ALLMULTI)
1508 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1509 else

--- 9 unchanged lines hidden (view full) ---

1519/*********************************************************************
1520 * Multicast Update
1521 *
1522 * This routine is called whenever multicast address list is updated.
1523 *
1524 **********************************************************************/
1525
1526static void
2254em_set_multi(struct adapter *adapter)
1527em_if_multi_set(if_ctx_t ctx)
2255{
1528{
2256 if_t ifp = adapter->ifp;
1529 struct adapter *adapter = iflib_get_softc(ctx);
1530 struct ifnet *ifp = iflib_get_ifp(ctx);
2257 u32 reg_rctl = 0;
2258 u8 *mta; /* Multicast array memory */
2259 int mcnt = 0;
2260
2261 IOCTL_DEBUGOUT("em_set_multi: begin");
2262
2263 mta = adapter->mta;
2264 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);

--- 32 unchanged lines hidden (view full) ---

2297/*********************************************************************
2298 * Timer routine
2299 *
2300 * This routine checks for link status and updates statistics.
2301 *
2302 **********************************************************************/
2303
2304static void
1531 u32 reg_rctl = 0;
1532 u8 *mta; /* Multicast array memory */
1533 int mcnt = 0;
1534
1535 IOCTL_DEBUGOUT("em_set_multi: begin");
1536
1537 mta = adapter->mta;
1538 bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);

--- 32 unchanged lines hidden (view full) ---

1571/*********************************************************************
1572 * Timer routine
1573 *
1574 * This routine checks for link status and updates statistics.
1575 *
1576 **********************************************************************/
1577
1578static void
2305em_local_timer(void *arg)
1579em_if_timer(if_ctx_t ctx, uint16_t qid)
2306{
1580{
2307 struct adapter *adapter = arg;
2308 if_t ifp = adapter->ifp;
2309 struct tx_ring *txr = adapter->tx_rings;
2310 struct rx_ring *rxr = adapter->rx_rings;
2311 u32 trigger = 0;
1581 struct adapter *adapter = iflib_get_softc(ctx);
1582 struct em_rx_queue *que;
1583 int i;
1584 int trigger = 0;
2312
1585
2313 EM_CORE_LOCK_ASSERT(adapter);
2314
2315 em_update_link_status(adapter);
1586 em_if_update_admin_status(ctx);
2316 em_update_stats_counters(adapter);
2317
2318 /* Reset LAA into RAR[0] on 82571 */
2319 if ((adapter->hw.mac.type == e1000_82571) &&
2320 e1000_get_laa_state_82571(&adapter->hw))
2321 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2322
1587 em_update_stats_counters(adapter);
1588
1589 /* Reset LAA into RAR[0] on 82571 */
1590 if ((adapter->hw.mac.type == e1000_82571) &&
1591 e1000_get_laa_state_82571(&adapter->hw))
1592 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1593
1594 if (adapter->hw.mac.type < em_mac_min)
1595 lem_smartspeed(adapter);
1596
2323 /* Mask to use in the irq trigger */
1597 /* Mask to use in the irq trigger */
2324 if (adapter->msix_mem) {
2325 for (int i = 0; i < adapter->num_queues; i++, rxr++)
2326 trigger |= rxr->ims;
2327 rxr = adapter->rx_rings;
2328 } else
1598 if (adapter->intr_type == IFLIB_INTR_MSIX) {
1599 for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++)
1600 trigger |= que->eims;
1601 } else {
2329 trigger = E1000_ICS_RXDMT0;
1602 trigger = E1000_ICS_RXDMT0;
2330
2331 /*
2332 ** Check on the state of the TX queue(s), this
2333 ** can be done without the lock because its RO
2334 ** and the HUNG state will be static if set.
2335 */
2336 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2337 if (txr->busy == EM_TX_HUNG)
2338 goto hung;
2339 if (txr->busy >= EM_TX_MAXTRIES)
2340 txr->busy = EM_TX_HUNG;
2341 /* Schedule a TX tasklet if needed */
2342 if (txr->tx_avail <= EM_MAX_SCATTER)
2343 taskqueue_enqueue(txr->tq, &txr->tx_task);
2344 }
1603 }
2345
2346 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2347#ifndef DEVICE_POLLING
2348 /* Trigger an RX interrupt to guarantee mbuf refresh */
2349 E1000_WRITE_REG(&adapter->hw, E1000_ICS, trigger);
2350#endif
2351 return;
2352hung:
2353 /* Looks like we're hung */
2354 device_printf(adapter->dev, "Watchdog timeout Queue[%d]-- resetting\n",
2355 txr->me);
2356 em_print_debug_info(adapter);
2357 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2358 adapter->watchdog_events++;
2359 em_init_locked(adapter);
2360}
2361
2362
2363static void
1604}
1605
1606
1607static void
2364em_update_link_status(struct adapter *adapter)
1608em_if_update_admin_status(if_ctx_t ctx)
2365{
1609{
1610 struct adapter *adapter = iflib_get_softc(ctx);
2366 struct e1000_hw *hw = &adapter->hw;
1611 struct e1000_hw *hw = &adapter->hw;
2367 if_t ifp = adapter->ifp;
2368 device_t dev = adapter->dev;
2369 struct tx_ring *txr = adapter->tx_rings;
1612 struct ifnet *ifp = iflib_get_ifp(ctx);
1613 device_t dev = iflib_get_dev(ctx);
2370 u32 link_check = 0;
2371
2372 /* Get the cached link value or read phy for real */
2373 switch (hw->phy.media_type) {
2374 case e1000_media_type_copper:
2375 if (hw->mac.get_link_status) {
2376 if (hw->mac.type == e1000_pch_spt)
2377 msec_delay(50);
2378 /* Do the work to read phy */
2379 e1000_check_for_link(hw);
2380 link_check = !hw->mac.get_link_status;
2381 if (link_check) /* ESB2 fix */
2382 e1000_cfg_on_link_up(hw);
1614 u32 link_check = 0;
1615
1616 /* Get the cached link value or read phy for real */
1617 switch (hw->phy.media_type) {
1618 case e1000_media_type_copper:
1619 if (hw->mac.get_link_status) {
1620 if (hw->mac.type == e1000_pch_spt)
1621 msec_delay(50);
1622 /* Do the work to read phy */
1623 e1000_check_for_link(hw);
1624 link_check = !hw->mac.get_link_status;
1625 if (link_check) /* ESB2 fix */
1626 e1000_cfg_on_link_up(hw);
2383 } else
1627 } else {
2384 link_check = TRUE;
1628 link_check = TRUE;
1629 }
2385 break;
2386 case e1000_media_type_fiber:
2387 e1000_check_for_link(hw);
2388 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2389 E1000_STATUS_LU);
2390 break;
2391 case e1000_media_type_internal_serdes:
2392 e1000_check_for_link(hw);
2393 link_check = adapter->hw.mac.serdes_has_link;
2394 break;
2395 default:
2396 case e1000_media_type_unknown:
2397 break;
2398 }
2399
2400 /* Now check for a transition */
2401 if (link_check && (adapter->link_active == 0)) {
2402 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2403 &adapter->link_duplex);
1630 break;
1631 case e1000_media_type_fiber:
1632 e1000_check_for_link(hw);
1633 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1634 E1000_STATUS_LU);
1635 break;
1636 case e1000_media_type_internal_serdes:
1637 e1000_check_for_link(hw);
1638 link_check = adapter->hw.mac.serdes_has_link;
1639 break;
1640 default:
1641 case e1000_media_type_unknown:
1642 break;
1643 }
1644
1645 /* Now check for a transition */
1646 if (link_check && (adapter->link_active == 0)) {
1647 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
1648 &adapter->link_duplex);
2404 /*
2405 ** There have proven to be problems with TSO when not
2406 ** at full gigabit speed, so disable the assist automatically
2407 ** when at lower speeds. -jfv
2408 */
2409 if (adapter->link_speed != SPEED_1000) {
2410 if_sethwassistbits(ifp, 0, CSUM_TSO);
2411 if_setcapenablebit(ifp, 0, IFCAP_TSO4);
2412 if_setcapabilitiesbit(ifp, 0, IFCAP_TSO4);
2413
2414 }
2415
2416 /* Check if we must disable SPEED_MODE bit on PCI-E */
2417 if ((adapter->link_speed != SPEED_1000) &&
2418 ((hw->mac.type == e1000_82571) ||
2419 (hw->mac.type == e1000_82572))) {
2420 int tarc0;
2421 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2422 tarc0 &= ~TARC_SPEED_MODE_BIT;
2423 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2424 }
2425 if (bootverbose)
2426 device_printf(dev, "Link is up %d Mbps %s\n",
2427 adapter->link_speed,
2428 ((adapter->link_duplex == FULL_DUPLEX) ?
2429 "Full Duplex" : "Half Duplex"));
2430 adapter->link_active = 1;
2431 adapter->smartspeed = 0;
2432 if_setbaudrate(ifp, adapter->link_speed * 1000000);
1649 /* Check if we must disable SPEED_MODE bit on PCI-E */
1650 if ((adapter->link_speed != SPEED_1000) &&
1651 ((hw->mac.type == e1000_82571) ||
1652 (hw->mac.type == e1000_82572))) {
1653 int tarc0;
1654 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
1655 tarc0 &= ~TARC_SPEED_MODE_BIT;
1656 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
1657 }
1658 if (bootverbose)
1659 device_printf(dev, "Link is up %d Mbps %s\n",
1660 adapter->link_speed,
1661 ((adapter->link_duplex == FULL_DUPLEX) ?
1662 "Full Duplex" : "Half Duplex"));
1663 adapter->link_active = 1;
1664 adapter->smartspeed = 0;
1665 if_setbaudrate(ifp, adapter->link_speed * 1000000);
2433 if_link_state_change(ifp, LINK_STATE_UP);
1666 iflib_link_state_change(ctx, LINK_STATE_UP, ifp->if_baudrate);
1667 printf("Link state changed to up\n");
2434 } else if (!link_check && (adapter->link_active == 1)) {
2435 if_setbaudrate(ifp, 0);
2436 adapter->link_speed = 0;
2437 adapter->link_duplex = 0;
2438 if (bootverbose)
2439 device_printf(dev, "Link is Down\n");
2440 adapter->link_active = 0;
1668 } else if (!link_check && (adapter->link_active == 1)) {
1669 if_setbaudrate(ifp, 0);
1670 adapter->link_speed = 0;
1671 adapter->link_duplex = 0;
1672 if (bootverbose)
1673 device_printf(dev, "Link is Down\n");
1674 adapter->link_active = 0;
2441 /* Link down, disable hang detection */
2442 for (int i = 0; i < adapter->num_queues; i++, txr++)
2443 txr->busy = EM_TX_IDLE;
2444 if_link_state_change(ifp, LINK_STATE_DOWN);
1675 iflib_link_state_change(ctx, LINK_STATE_DOWN, ifp->if_baudrate);
1676 printf("link state changed to down\n");
2445 }
1677 }
1678
1679 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC);
2446}
2447
2448/*********************************************************************
2449 *
2450 * This routine disables all traffic on the adapter by issuing a
2451 * global reset on the MAC and deallocates TX/RX buffers.
2452 *
2453 * This routine should always be called with BOTH the CORE
2454 * and TX locks.
2455 **********************************************************************/
2456
2457static void
1680}
1681
1682/*********************************************************************
1683 *
1684 * This routine disables all traffic on the adapter by issuing a
1685 * global reset on the MAC and deallocates TX/RX buffers.
1686 *
1687 * This routine should always be called with BOTH the CORE
1688 * and TX locks.
1689 **********************************************************************/
1690
1691static void
2458em_stop(void *arg)
1692em_if_stop(if_ctx_t ctx)
2459{
1693{
2460 struct adapter *adapter = arg;
2461 if_t ifp = adapter->ifp;
2462 struct tx_ring *txr = adapter->tx_rings;
1694 struct adapter *adapter = iflib_get_softc(ctx);
2463
1695
2464 EM_CORE_LOCK_ASSERT(adapter);
2465
2466 INIT_DEBUGOUT("em_stop: begin");
1696 INIT_DEBUGOUT("em_stop: begin");
2467
2468 em_disable_intr(adapter);
2469 callout_stop(&adapter->timer);
2470
2471 /* Tell the stack that the interface is no longer active */
2472 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2473
2474 /* Disarm Hang Detection. */
2475 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2476 EM_TX_LOCK(txr);
2477 txr->busy = EM_TX_IDLE;
2478 EM_TX_UNLOCK(txr);
2479 }
2480
2481 /* I219 needs some special flushing to avoid hangs */
2482 if (adapter->hw.mac.type == e1000_pch_spt)
2483 em_flush_desc_rings(adapter);
2484
1697
2485 e1000_reset_hw(&adapter->hw);
1698 e1000_reset_hw(&adapter->hw);
2486 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
1699 if (adapter->hw.mac.type >= e1000_82544)
1700 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2487
2488 e1000_led_off(&adapter->hw);
2489 e1000_cleanup_led(&adapter->hw);
2490}
2491
2492
2493/*********************************************************************
2494 *
2495 * Determine hardware revision.
2496 *
2497 **********************************************************************/
2498static void
1701
1702 e1000_led_off(&adapter->hw);
1703 e1000_cleanup_led(&adapter->hw);
1704}
1705
1706
1707/*********************************************************************
1708 *
1709 * Determine hardware revision.
1710 *
1711 **********************************************************************/
1712static void
2499em_identify_hardware(struct adapter *adapter)
1713em_identify_hardware(if_ctx_t ctx)
2500{
1714{
2501 device_t dev = adapter->dev;
2502
1715 device_t dev = iflib_get_dev(ctx);
1716 struct adapter *adapter = iflib_get_softc(ctx);
1717
2503 /* Make sure our PCI config space has the necessary stuff set */
1718 /* Make sure our PCI config space has the necessary stuff set */
2504 pci_enable_busmaster(dev);
2505 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2506
2507 /* Save off the information about this board */
2508 adapter->hw.vendor_id = pci_get_vendor(dev);
2509 adapter->hw.device_id = pci_get_device(dev);
2510 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2511 adapter->hw.subsystem_vendor_id =
2512 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2513 adapter->hw.subsystem_device_id =
2514 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2515
2516 /* Do Shared Code Init and Setup */
2517 if (e1000_set_mac_type(&adapter->hw)) {
2518 device_printf(dev, "Setup init failure\n");
2519 return;
2520 }
2521}
2522
2523static int
1719 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1720
1721 /* Save off the information about this board */
1722 adapter->hw.vendor_id = pci_get_vendor(dev);
1723 adapter->hw.device_id = pci_get_device(dev);
1724 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1725 adapter->hw.subsystem_vendor_id =
1726 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1727 adapter->hw.subsystem_device_id =
1728 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1729
1730 /* Do Shared Code Init and Setup */
1731 if (e1000_set_mac_type(&adapter->hw)) {
1732 device_printf(dev, "Setup init failure\n");
1733 return;
1734 }
1735}
1736
1737static int
2524em_allocate_pci_resources(struct adapter *adapter)
1738em_allocate_pci_resources(if_ctx_t ctx)
2525{
1739{
2526 device_t dev = adapter->dev;
2527 int rid;
1740 struct adapter *adapter = iflib_get_softc(ctx);
1741 device_t dev = iflib_get_dev(ctx);
1742 int rid, val;
2528
2529 rid = PCIR_BAR(0);
2530 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2531 &rid, RF_ACTIVE);
2532 if (adapter->memory == NULL) {
2533 device_printf(dev, "Unable to allocate bus resource: memory\n");
2534 return (ENXIO);
2535 }
2536 adapter->osdep.mem_bus_space_tag =
2537 rman_get_bustag(adapter->memory);
2538 adapter->osdep.mem_bus_space_handle =
2539 rman_get_bushandle(adapter->memory);
2540 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2541
1743
1744 rid = PCIR_BAR(0);
1745 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1746 &rid, RF_ACTIVE);
1747 if (adapter->memory == NULL) {
1748 device_printf(dev, "Unable to allocate bus resource: memory\n");
1749 return (ENXIO);
1750 }
1751 adapter->osdep.mem_bus_space_tag =
1752 rman_get_bustag(adapter->memory);
1753 adapter->osdep.mem_bus_space_handle =
1754 rman_get_bushandle(adapter->memory);
1755 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1756
1757 /* Only older adapters use IO mapping */
1758 if (adapter->hw.mac.type < em_mac_min &&
1759 adapter->hw.mac.type > e1000_82543) {
1760 /* Figure our where our IO BAR is ? */
1761 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1762 val = pci_read_config(dev, rid, 4);
1763 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
1764 adapter->io_rid = rid;
1765 break;
1766 }
1767 rid += 4;
1768 /* check for 64bit BAR */
1769 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
1770 rid += 4;
1771 }
1772 if (rid >= PCIR_CIS) {
1773 device_printf(dev, "Unable to locate IO BAR\n");
1774 return (ENXIO);
1775 }
1776 adapter->ioport = bus_alloc_resource_any(dev,
1777 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
1778 if (adapter->ioport == NULL) {
1779 device_printf(dev, "Unable to allocate bus resource: "
1780 "ioport\n");
1781 return (ENXIO);
1782 }
1783 adapter->hw.io_base = 0;
1784 adapter->osdep.io_bus_space_tag =
1785 rman_get_bustag(adapter->ioport);
1786 adapter->osdep.io_bus_space_handle =
1787 rman_get_bushandle(adapter->ioport);
1788 }
1789
2542 adapter->hw.back = &adapter->osdep;
2543
2544 return (0);
2545}
2546
2547/*********************************************************************
2548 *
1790 adapter->hw.back = &adapter->osdep;
1791
1792 return (0);
1793}
1794
1795/*********************************************************************
1796 *
2549 * Setup the Legacy or MSI Interrupt handler
2550 *
2551 **********************************************************************/
2552int
2553em_allocate_legacy(struct adapter *adapter)
2554{
2555 device_t dev = adapter->dev;
2556 struct tx_ring *txr = adapter->tx_rings;
2557 int error, rid = 0;
2558
2559 /* Manually turn off all interrupts */
2560 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2561
2562 if (adapter->msix == 1) /* using MSI */
2563 rid = 1;
2564 /* We allocate a single interrupt resource */
2565 adapter->res = bus_alloc_resource_any(dev,
2566 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2567 if (adapter->res == NULL) {
2568 device_printf(dev, "Unable to allocate bus resource: "
2569 "interrupt\n");
2570 return (ENXIO);
2571 }
2572
2573 /*
2574 * Allocate a fast interrupt and the associated
2575 * deferred processing contexts.
2576 */
2577 TASK_INIT(&adapter->que_task, 0, em_handle_que, adapter);
2578 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2579 taskqueue_thread_enqueue, &adapter->tq);
2580 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s que",
2581 device_get_nameunit(adapter->dev));
2582 /* Use a TX only tasklet for local timer */
2583 TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr);
2584 txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT,
2585 taskqueue_thread_enqueue, &txr->tq);
2586 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2587 device_get_nameunit(adapter->dev));
2588 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2589 if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET,
2590 em_irq_fast, NULL, adapter, &adapter->tag)) != 0) {
2591 device_printf(dev, "Failed to register fast interrupt "
2592 "handler: %d\n", error);
2593 taskqueue_free(adapter->tq);
2594 adapter->tq = NULL;
2595 return (error);
2596 }
2597
2598 return (0);
2599}
2600
2601/*********************************************************************
2602 *
2603 * Setup the MSIX Interrupt handlers
1797 * Setup the MSIX Interrupt handlers
2604 * This is not really Multiqueue, rather
2605 * its just separate interrupt vectors
2606 * for TX, RX, and Link.
2607 *
2608 **********************************************************************/
1798 *
1799 **********************************************************************/
2609int
2610em_allocate_msix(struct adapter *adapter)
1800static int
1801em_if_msix_intr_assign(if_ctx_t ctx, int msix)
2611{
1802{
2612 device_t dev = adapter->dev;
2613 struct tx_ring *txr = adapter->tx_rings;
2614 struct rx_ring *rxr = adapter->rx_rings;
2615 int error, rid, vector = 0;
2616 int cpu_id = 0;
1803 struct adapter *adapter = iflib_get_softc(ctx);
1804 struct em_rx_queue *rx_que = adapter->rx_queues;
1805 struct em_tx_queue *tx_que = adapter->tx_queues;
1806 int error, rid, i, vector = 0;
1807 char buf[16];
2617
1808
2618
2619 /* Make sure all interrupts are disabled */
2620 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2621
2622 /* First set up ring resources */
1809 /* First set up ring resources */
2623 for (int i = 0; i < adapter->num_queues; i++, rxr++, vector++) {
2624
2625 /* RX ring */
2626 rid = vector + 1;
2627
2628 rxr->res = bus_alloc_resource_any(dev,
2629 SYS_RES_IRQ, &rid, RF_ACTIVE);
2630 if (rxr->res == NULL) {
2631 device_printf(dev,
2632 "Unable to allocate bus resource: "
2633 "RX MSIX Interrupt %d\n", i);
2634 return (ENXIO);
1810 for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) {
1811 rid = vector +1;
1812 snprintf(buf, sizeof(buf), "rxq%d", i);
1813 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RX, em_msix_que, rx_que, rx_que->me, buf);
1814 if (error) {
1815 device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
1816 adapter->rx_num_queues = i + 1;
1817 goto fail;
2635 }
1818 }
2636 if ((error = bus_setup_intr(dev, rxr->res,
2637 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx,
2638 rxr, &rxr->tag)) != 0) {
2639 device_printf(dev, "Failed to register RX handler");
2640 return (error);
2641 }
2642#if __FreeBSD_version >= 800504
2643 bus_describe_intr(dev, rxr->res, rxr->tag, "rx%d", i);
2644#endif
2645 rxr->msix = vector;
2646
1819
2647 if (em_last_bind_cpu < 0)
2648 em_last_bind_cpu = CPU_FIRST();
2649 cpu_id = em_last_bind_cpu;
2650 bus_bind_intr(dev, rxr->res, cpu_id);
2651
2652 TASK_INIT(&rxr->rx_task, 0, em_handle_rx, rxr);
2653 rxr->tq = taskqueue_create_fast("em_rxq", M_NOWAIT,
2654 taskqueue_thread_enqueue, &rxr->tq);
2655 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq (cpuid %d)",
2656 device_get_nameunit(adapter->dev), cpu_id);
1820 rx_que->msix = vector;
1821
2657 /*
2658 ** Set the bit to enable interrupt
2659 ** in E1000_IMS -- bits 20 and 21
2660 ** are for RX0 and RX1, note this has
2661 ** NOTHING to do with the MSIX vector
2662 */
1822 /*
1823 ** Set the bit to enable interrupt
1824 ** in E1000_IMS -- bits 20 and 21
1825 ** are for RX0 and RX1, note this has
1826 ** NOTHING to do with the MSIX vector
1827 */
2663 rxr->ims = 1 << (20 + i);
2664 adapter->ims |= rxr->ims;
2665 adapter->ivars |= (8 | rxr->msix) << (i * 4);
2666
2667 em_last_bind_cpu = CPU_NEXT(em_last_bind_cpu);
1828 if (adapter->hw.mac.type == e1000_82574) {
1829 rx_que->eims = 1 << (20 + i);
1830 adapter->ims |= rx_que->eims;
1831 adapter->ivars |= (8 | rx_que->msix) << (i * 4);
1832 } else if (adapter->hw.mac.type == e1000_82575)
1833 rx_que->eims = E1000_EICR_TX_QUEUE0 << vector;
1834 else
1835 rx_que->eims = 1 << vector;
2668 }
2669
1836 }
1837
2670 for (int i = 0; i < adapter->num_queues; i++, txr++, vector++) {
2671 /* TX ring */
1838 for (i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
2672 rid = vector + 1;
1839 rid = vector + 1;
2673 txr->res = bus_alloc_resource_any(dev,
2674 SYS_RES_IRQ, &rid, RF_ACTIVE);
2675 if (txr->res == NULL) {
2676 device_printf(dev,
2677 "Unable to allocate bus resource: "
2678 "TX MSIX Interrupt %d\n", i);
2679 return (ENXIO);
2680 }
2681 if ((error = bus_setup_intr(dev, txr->res,
2682 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx,
2683 txr, &txr->tag)) != 0) {
2684 device_printf(dev, "Failed to register TX handler");
2685 return (error);
2686 }
2687#if __FreeBSD_version >= 800504
2688 bus_describe_intr(dev, txr->res, txr->tag, "tx%d", i);
2689#endif
2690 txr->msix = vector;
1840 snprintf(buf, sizeof(buf), "txq%d", i);
1841 tx_que = &adapter->tx_queues[i];
1842 iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, tx_que, tx_que->me, buf);
2691
1843
2692 if (em_last_bind_cpu < 0)
2693 em_last_bind_cpu = CPU_FIRST();
2694 cpu_id = em_last_bind_cpu;
2695 bus_bind_intr(dev, txr->res, cpu_id);
1844 tx_que->msix = vector;
2696
1845
2697 TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr);
2698 txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT,
2699 taskqueue_thread_enqueue, &txr->tq);
2700 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq (cpuid %d)",
2701 device_get_nameunit(adapter->dev), cpu_id);
2702 /*
1846 /*
2703 ** Set the bit to enable interrupt
2704 ** in E1000_IMS -- bits 22 and 23
2705 ** are for TX0 and TX1, note this has
2706 ** NOTHING to do with the MSIX vector
2707 */
1847 ** Set the bit to enable interrupt
1848 ** in E1000_IMS -- bits 22 and 23
1849 ** are for TX0 and TX1, note this has
1850 ** NOTHING to do with the MSIX vector
1851 */
2708 txr->ims = 1 << (22 + i);
2709 adapter->ims |= txr->ims;
2710 adapter->ivars |= (8 | txr->msix) << (8 + (i * 4));
2711
2712 em_last_bind_cpu = CPU_NEXT(em_last_bind_cpu);
1852 if (adapter->hw.mac.type < igb_mac_min) {
1853 tx_que->eims = 1 << (22 + i);
1854 adapter->ims |= tx_que->eims;
1855 adapter->ivars |= (8 | tx_que->msix) << (8 + (i * 4));
1856 } if (adapter->hw.mac.type == e1000_82575)
1857 tx_que->eims = E1000_EICR_TX_QUEUE0 << (i % adapter->tx_num_queues);
1858 else
1859 tx_que->eims = 1 << (i % adapter->tx_num_queues);
2713 }
1860 }
2714
1861
2715 /* Link interrupt */
2716 rid = vector + 1;
1862 /* Link interrupt */
1863 rid = vector + 1;
2717 adapter->res = bus_alloc_resource_any(dev,
2718 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2719 if (!adapter->res) {
2720 device_printf(dev,"Unable to allocate "
2721 "bus resource: Link interrupt [%d]\n", rid);
2722 return (ENXIO);
2723 }
2724 /* Set the link handler function */
2725 error = bus_setup_intr(dev, adapter->res,
2726 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2727 em_msix_link, adapter, &adapter->tag);
1864 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, em_msix_link, adapter, 0, "aq");
1865
2728 if (error) {
1866 if (error) {
2729 adapter->res = NULL;
2730 device_printf(dev, "Failed to register LINK handler");
2731 return (error);
1867 device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
1868 goto fail;
2732 }
1869 }
2733#if __FreeBSD_version >= 800504
2734 bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2735#endif
2736 adapter->linkvec = vector;
1870 adapter->linkvec = vector;
2737 adapter->ivars |= (8 | vector) << 16;
2738 adapter->ivars |= 0x80000000;
2739
1871 if (adapter->hw.mac.type < igb_mac_min) {
1872 adapter->ivars |= (8 | vector) << 16;
1873 adapter->ivars |= 0x80000000;
1874 }
2740 return (0);
1875 return (0);
1876 fail:
1877 iflib_irq_free(ctx, &adapter->irq);
1878 rx_que = adapter->rx_queues;
1879 for (int i = 0; i < adapter->rx_num_queues; i++, rx_que++)
1880 iflib_irq_free(ctx, &rx_que->que_irq);
1881 return (error);
2741}
2742
1882}
1883
2743
2744static void
1884static void
2745em_free_pci_resources(struct adapter *adapter)
1885igb_configure_queues(struct adapter *adapter)
2746{
1886{
2747 device_t dev = adapter->dev;
2748 struct tx_ring *txr;
2749 struct rx_ring *rxr;
2750 int rid;
1887 struct e1000_hw *hw = &adapter->hw;
1888 struct em_rx_queue *rx_que;
1889 struct em_tx_queue *tx_que;
1890 u32 tmp, ivar = 0, newitr = 0;
2751
1891
1892 /* First turn on RSS capability */
1893 if (adapter->hw.mac.type != e1000_82575)
1894 E1000_WRITE_REG(hw, E1000_GPIE,
1895 E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME |
1896 E1000_GPIE_PBA | E1000_GPIE_NSICR);
2752
1897
2753 /*
2754 ** Release all the queue interrupt resources:
2755 */
2756 for (int i = 0; i < adapter->num_queues; i++) {
2757 txr = &adapter->tx_rings[i];
2758 /* an early abort? */
2759 if (txr == NULL)
2760 break;
2761 rid = txr->msix +1;
2762 if (txr->tag != NULL) {
2763 bus_teardown_intr(dev, txr->res, txr->tag);
2764 txr->tag = NULL;
1898 /* Turn on MSIX */
1899 switch (adapter->hw.mac.type) {
1900 case e1000_82580:
1901 case e1000_i350:
1902 case e1000_i354:
1903 case e1000_i210:
1904 case e1000_i211:
1905 case e1000_vfadapt:
1906 case e1000_vfadapt_i350:
1907 /* RX entries */
1908 for (int i = 0; i < adapter->rx_num_queues; i++) {
1909 u32 index = i >> 1;
1910 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
1911 rx_que = &adapter->rx_queues[i];
1912 if (i & 1) {
1913 ivar &= 0xFF00FFFF;
1914 ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16;
1915 } else {
1916 ivar &= 0xFFFFFF00;
1917 ivar |= rx_que->msix | E1000_IVAR_VALID;
1918 }
1919 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
2765 }
1920 }
2766 if (txr->res != NULL)
2767 bus_release_resource(dev, SYS_RES_IRQ,
2768 rid, txr->res);
1921 /* TX entries */
1922 for (int i = 0; i < adapter->tx_num_queues; i++) {
1923 u32 index = i >> 1;
1924 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
1925 tx_que = &adapter->tx_queues[i];
1926 if (i & 1) {
1927 ivar &= 0x00FFFFFF;
1928 ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24;
1929 } else {
1930 ivar &= 0xFFFF00FF;
1931 ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8;
1932 }
1933 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
1934 adapter->que_mask |= tx_que->eims;
1935 }
2769
1936
2770 rxr = &adapter->rx_rings[i];
2771 /* an early abort? */
2772 if (rxr == NULL)
2773 break;
2774 rid = rxr->msix +1;
2775 if (rxr->tag != NULL) {
2776 bus_teardown_intr(dev, rxr->res, rxr->tag);
2777 rxr->tag = NULL;
1937 /* And for the link interrupt */
1938 ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
1939 adapter->link_mask = 1 << adapter->linkvec;
1940 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
1941 break;
1942 case e1000_82576:
1943 /* RX entries */
1944 for (int i = 0; i < adapter->rx_num_queues; i++) {
1945 u32 index = i & 0x7; /* Each IVAR has two entries */
1946 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
1947 rx_que = &adapter->rx_queues[i];
1948 if (i < 8) {
1949 ivar &= 0xFFFFFF00;
1950 ivar |= rx_que->msix | E1000_IVAR_VALID;
1951 } else {
1952 ivar &= 0xFF00FFFF;
1953 ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16;
1954 }
1955 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
1956 adapter->que_mask |= rx_que->eims;
2778 }
1957 }
2779 if (rxr->res != NULL)
2780 bus_release_resource(dev, SYS_RES_IRQ,
2781 rid, rxr->res);
2782 }
1958 /* TX entries */
1959 for (int i = 0; i < adapter->tx_num_queues; i++) {
1960 u32 index = i & 0x7; /* Each IVAR has two entries */
1961 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
1962 tx_que = &adapter->tx_queues[i];
1963 if (i < 8) {
1964 ivar &= 0xFFFF00FF;
1965 ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8;
1966 } else {
1967 ivar &= 0x00FFFFFF;
1968 ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24;
1969 }
1970 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
1971 adapter->que_mask |= tx_que->eims;
1972 }
2783
1973
2784 if (adapter->linkvec) /* we are doing MSIX */
2785 rid = adapter->linkvec + 1;
2786 else
2787 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1974 /* And for the link interrupt */
1975 ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
1976 adapter->link_mask = 1 << adapter->linkvec;
1977 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
1978 break;
2788
1979
2789 if (adapter->tag != NULL) {
2790 bus_teardown_intr(dev, adapter->res, adapter->tag);
2791 adapter->tag = NULL;
2792 }
1980 case e1000_82575:
1981 /* enable MSI-X support*/
1982 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
1983 tmp |= E1000_CTRL_EXT_PBA_CLR;
1984 /* Auto-Mask interrupts upon ICR read. */
1985 tmp |= E1000_CTRL_EXT_EIAME;
1986 tmp |= E1000_CTRL_EXT_IRCA;
1987 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
2793
1988
2794 if (adapter->res != NULL)
2795 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1989 /* Queues */
1990 for (int i = 0; i < adapter->rx_num_queues; i++) {
1991 rx_que = &adapter->rx_queues[i];
1992 tmp = E1000_EICR_RX_QUEUE0 << i;
1993 tmp |= E1000_EICR_TX_QUEUE0 << i;
1994 rx_que->eims = tmp;
1995 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0),
1996 i, rx_que->eims);
1997 adapter->que_mask |= rx_que->eims;
1998 }
2796
1999
2000 /* Link */
2001 E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec),
2002 E1000_EIMS_OTHER);
2003 adapter->link_mask |= E1000_EIMS_OTHER;
2004 default:
2005 break;
2006 }
2797
2007
2798 if (adapter->msix)
2799 pci_release_msi(dev);
2008 /* Set the starting interrupt rate */
2009 if (em_max_interrupt_rate > 0)
2010 newitr = (4000000 / em_max_interrupt_rate) & 0x7FFC;
2800
2011
2801 if (adapter->msix_mem != NULL)
2802 bus_release_resource(dev, SYS_RES_MEMORY,
2803 PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem);
2012 if (hw->mac.type == e1000_82575)
2013 newitr |= newitr << 16;
2014 else
2015 newitr |= E1000_EITR_CNT_IGNR;
2804
2016
2805 if (adapter->memory != NULL)
2806 bus_release_resource(dev, SYS_RES_MEMORY,
2807 PCIR_BAR(0), adapter->memory);
2017 for (int i = 0; i < adapter->rx_num_queues; i++) {
2018 rx_que = &adapter->rx_queues[i];
2019 E1000_WRITE_REG(hw, E1000_EITR(rx_que->msix), newitr);
2020 }
2808
2021
2809 if (adapter->flash != NULL)
2810 bus_release_resource(dev, SYS_RES_MEMORY,
2811 EM_FLASH, adapter->flash);
2022 return;
2812}
2813
2023}
2024
2814/*
2815 * Setup MSI or MSI/X
2816 */
2817static int
2818em_setup_msix(struct adapter *adapter)
2025static void
2026em_free_pci_resources(if_ctx_t ctx)
2819{
2027{
2820 device_t dev = adapter->dev;
2821 int val;
2028 struct adapter *adapter = iflib_get_softc(ctx);
2029 struct em_rx_queue *que = adapter->rx_queues;
2030 device_t dev = iflib_get_dev(ctx);
2822
2031
2823 /* Nearly always going to use one queue */
2824 adapter->num_queues = 1;
2032 /* Release all msix queue resources */
2033 if (adapter->intr_type == IFLIB_INTR_MSIX)
2034 iflib_irq_free(ctx, &adapter->irq);
2825
2035
2826 /*
2827 ** Try using MSI-X for Hartwell adapters
2828 */
2829 if ((adapter->hw.mac.type == e1000_82574) &&
2830 (em_enable_msix == TRUE)) {
2831#ifdef EM_MULTIQUEUE
2832 adapter->num_queues = (em_num_queues == 1) ? 1 : 2;
2833 if (adapter->num_queues > 1)
2834 em_enable_vectors_82574(adapter);
2835#endif
2836 /* Map the MSIX BAR */
2837 int rid = PCIR_BAR(EM_MSIX_BAR);
2838 adapter->msix_mem = bus_alloc_resource_any(dev,
2839 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2840 if (adapter->msix_mem == NULL) {
2841 /* May not be enabled */
2842 device_printf(adapter->dev,
2843 "Unable to map MSIX table \n");
2844 goto msi;
2845 }
2846 val = pci_msix_count(dev);
2036 for (int i = 0; i < adapter->rx_num_queues; i++, que++) {
2037 iflib_irq_free(ctx, &que->que_irq);
2038 }
2847
2039
2848#ifdef EM_MULTIQUEUE
2849 /* We need 5 vectors in the multiqueue case */
2850 if (adapter->num_queues > 1 ) {
2851 if (val >= 5)
2852 val = 5;
2853 else {
2854 adapter->num_queues = 1;
2855 device_printf(adapter->dev,
2856 "Insufficient MSIX vectors for >1 queue, "
2857 "using single queue...\n");
2858 goto msix_one;
2859 }
2860 } else {
2861msix_one:
2862#endif
2863 if (val >= 3)
2864 val = 3;
2865 else {
2866 device_printf(adapter->dev,
2867 "Insufficient MSIX vectors, using MSI\n");
2868 goto msi;
2869 }
2870#ifdef EM_MULTIQUEUE
2871 }
2872#endif
2873
2040
2874 if ((pci_alloc_msix(dev, &val) == 0)) {
2875 device_printf(adapter->dev,
2876 "Using MSIX interrupts "
2877 "with %d vectors\n", val);
2878 return (val);
2879 }
2880
2881 /*
2882 ** If MSIX alloc failed or provided us with
2883 ** less than needed, free and fall through to MSI
2884 */
2885 pci_release_msi(dev);
2041 /* First release all the interrupt resources */
2042 if (adapter->memory != NULL) {
2043 bus_release_resource(dev, SYS_RES_MEMORY,
2044 PCIR_BAR(0), adapter->memory);
2045 adapter->memory = NULL;
2886 }
2046 }
2887msi:
2888 if (adapter->msix_mem != NULL) {
2047
2048 if (adapter->flash != NULL) {
2889 bus_release_resource(dev, SYS_RES_MEMORY,
2049 bus_release_resource(dev, SYS_RES_MEMORY,
2890 PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem);
2891 adapter->msix_mem = NULL;
2050 EM_FLASH, adapter->flash);
2051 adapter->flash = NULL;
2892 }
2052 }
2893 val = 1;
2894 if (pci_alloc_msi(dev, &val) == 0) {
2895 device_printf(adapter->dev, "Using an MSI interrupt\n");
2896 return (val);
2897 }
2898 /* Should only happen due to manual configuration */
2899 device_printf(adapter->dev,"No MSI/MSIX using a Legacy IRQ\n");
2900 return (0);
2053 if (adapter->ioport != NULL)
2054 bus_release_resource(dev, SYS_RES_IOPORT,
2055 adapter->io_rid, adapter->ioport);
2901}
2902
2056}
2057
2903
2904/*
2905** The 3 following flush routines are used as a workaround in the
2906** I219 client parts and only for them.
2907**
2908** em_flush_tx_ring - remove all descriptors from the tx_ring
2909**
2910** We want to clear all pending descriptors from the TX ring.
2911** zeroing happens when the HW reads the regs. We assign the ring itself as
2912** the data of the next descriptor. We don't care about the data we are about
2913** to reset the HW.
2914*/
2915static void
2916em_flush_tx_ring(struct adapter *adapter)
2058/* Setup MSI or MSI/X */
2059static int
2060em_setup_msix(if_ctx_t ctx)
2917{
2061{
2918 struct e1000_hw *hw = &adapter->hw;
2919 struct tx_ring *txr = adapter->tx_rings;
2920 struct e1000_tx_desc *txd;
2921 u32 tctl, txd_lower = E1000_TXD_CMD_IFCS;
2922 u16 size = 512;
2062 struct adapter *adapter = iflib_get_softc(ctx);
2923
2063
2924 tctl = E1000_READ_REG(hw, E1000_TCTL);
2925 E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN);
2926
2927 txd = &txr->tx_base[txr->next_avail_desc++];
2928 if (txr->next_avail_desc == adapter->num_tx_desc)
2929 txr->next_avail_desc = 0;
2930
2931 /* Just use the ring as a dummy buffer addr */
2932 txd->buffer_addr = txr->txdma.dma_paddr;
2933 txd->lower.data = htole32(txd_lower | size);
2934 txd->upper.data = 0;
2935
2936 /* flush descriptors to memory before notifying the HW */
2937 wmb();
2938
2939 E1000_WRITE_REG(hw, E1000_TDT(0), txr->next_avail_desc);
2940 mb();
2941 usec_delay(250);
2064 if (adapter->hw.mac.type == e1000_82574) {
2065 em_enable_vectors_82574(ctx);
2066 }
2067 return (0);
2942}
2943
2068}
2069
2944/*
2945** em_flush_rx_ring - remove all descriptors from the rx_ring
2946**
2947** Mark all descriptors in the RX ring as consumed and disable the rx ring
2948*/
2949static void
2950em_flush_rx_ring(struct adapter *adapter)
2951{
2952 struct e1000_hw *hw = &adapter->hw;
2953 u32 rctl, rxdctl;
2070/*********************************************************************
2071 *
2072 * Initialize the hardware to a configuration
2073 * as specified by the adapter structure.
2074 *
2075 **********************************************************************/
2954
2076
2955 rctl = E1000_READ_REG(hw, E1000_RCTL);
2956 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2957 E1000_WRITE_FLUSH(hw);
2958 usec_delay(150);
2959
2960 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
2961 /* zero the lower 14 bits (prefetch and host thresholds) */
2962 rxdctl &= 0xffffc000;
2963 /*
2964 * update thresholds: prefetch threshold to 31, host threshold to 1
2965 * and make sure the granularity is "descriptors" and not "cache lines"
2966 */
2967 rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
2968 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl);
2969
2970 /* momentarily enable the RX ring for the changes to take effect */
2971 E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
2972 E1000_WRITE_FLUSH(hw);
2973 usec_delay(150);
2974 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2975}
2976
2977/*
2978** em_flush_desc_rings - remove all descriptors from the descriptor rings
2979**
2980** In i219, the descriptor rings must be emptied before resetting the HW
2981** or before changing the device state to D3 during runtime (runtime PM).
2982**
2983** Failure to do this will cause the HW to enter a unit hang state which can
2984** only be released by PCI reset on the device
2985**
2986*/
2987static void
2077static void
2988em_flush_desc_rings(struct adapter *adapter)
2078lem_smartspeed(struct adapter *adapter)
2989{
2079{
2990 struct e1000_hw *hw = &adapter->hw;
2991 device_t dev = adapter->dev;
2992 u16 hang_state;
2993 u32 fext_nvm11, tdlen;
2994
2995 /* First, disable MULR fix in FEXTNVM11 */
2996 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
2997 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
2998 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11);
2999
3000 /* do nothing if we're not in faulty state, or if the queue is empty */
3001 tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
3002 hang_state = pci_read_config(dev, PCICFG_DESC_RING_STATUS, 2);
3003 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
2080 u16 phy_tmp;
2081
2082 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
2083 adapter->hw.mac.autoneg == 0 ||
2084 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3004 return;
2085 return;
3005 em_flush_tx_ring(adapter);
3006
2086
3007 /* recheck, maybe the fault is caused by the rx ring */
3008 hang_state = pci_read_config(dev, PCICFG_DESC_RING_STATUS, 2);
3009 if (hang_state & FLUSH_DESC_REQUIRED)
3010 em_flush_rx_ring(adapter);
2087 if (adapter->smartspeed == 0) {
2088 /* If Master/Slave config fault is asserted twice,
2089 * we assume back-to-back */
2090 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2091 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2092 return;
2093 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2094 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2095 e1000_read_phy_reg(&adapter->hw,
2096 PHY_1000T_CTRL, &phy_tmp);
2097 if(phy_tmp & CR_1000T_MS_ENABLE) {
2098 phy_tmp &= ~CR_1000T_MS_ENABLE;
2099 e1000_write_phy_reg(&adapter->hw,
2100 PHY_1000T_CTRL, phy_tmp);
2101 adapter->smartspeed++;
2102 if(adapter->hw.mac.autoneg &&
2103 !e1000_copper_link_autoneg(&adapter->hw) &&
2104 !e1000_read_phy_reg(&adapter->hw,
2105 PHY_CONTROL, &phy_tmp)) {
2106 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2107 MII_CR_RESTART_AUTO_NEG);
2108 e1000_write_phy_reg(&adapter->hw,
2109 PHY_CONTROL, phy_tmp);
2110 }
2111 }
2112 }
2113 return;
2114 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2115 /* If still no link, perhaps using 2/3 pair cable */
2116 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2117 phy_tmp |= CR_1000T_MS_ENABLE;
2118 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2119 if(adapter->hw.mac.autoneg &&
2120 !e1000_copper_link_autoneg(&adapter->hw) &&
2121 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2122 phy_tmp |= (MII_CR_AUTO_NEG_EN |
2123 MII_CR_RESTART_AUTO_NEG);
2124 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
2125 }
2126 }
2127 /* Restart process after EM_SMARTSPEED_MAX iterations */
2128 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2129 adapter->smartspeed = 0;
3011}
3012
3013
2130}
2131
2132
3014/*********************************************************************
3015 *
3016 * Initialize the hardware to a configuration
3017 * as specified by the adapter structure.
3018 *
3019 **********************************************************************/
3020static void
2133static void
3021em_reset(struct adapter *adapter)
2134em_reset(if_ctx_t ctx)
3022{
2135{
3023 device_t dev = adapter->dev;
3024 if_t ifp = adapter->ifp;
2136 device_t dev = iflib_get_dev(ctx);
2137 struct adapter *adapter = iflib_get_softc(ctx);
2138 struct ifnet *ifp = iflib_get_ifp(ctx);
3025 struct e1000_hw *hw = &adapter->hw;
3026 u16 rx_buffer_size;
3027 u32 pba;
3028
3029 INIT_DEBUGOUT("em_reset: begin");
3030
3031 /* Set up smart power down as default off on newer adapters. */
3032 if (!em_smart_pwr_down && (hw->mac.type == e1000_82571 ||

--- 118 unchanged lines hidden (view full) ---

3151 }
3152 /* else fall thru */
3153 default:
3154 if (hw->mac.type == e1000_80003es2lan)
3155 hw->fc.pause_time = 0xFFFF;
3156 break;
3157 }
3158
2139 struct e1000_hw *hw = &adapter->hw;
2140 u16 rx_buffer_size;
2141 u32 pba;
2142
2143 INIT_DEBUGOUT("em_reset: begin");
2144
2145 /* Set up smart power down as default off on newer adapters. */
2146 if (!em_smart_pwr_down && (hw->mac.type == e1000_82571 ||

--- 118 unchanged lines hidden (view full) ---

2265 }
2266 /* else fall thru */
2267 default:
2268 if (hw->mac.type == e1000_80003es2lan)
2269 hw->fc.pause_time = 0xFFFF;
2270 break;
2271 }
2272
3159 /* I219 needs some special flushing to avoid hangs */
3160 if (hw->mac.type == e1000_pch_spt)
3161 em_flush_desc_rings(adapter);
3162
3163 /* Issue a global reset */
3164 e1000_reset_hw(hw);
3165 E1000_WRITE_REG(hw, E1000_WUC, 0);
3166 em_disable_aspm(adapter);
3167 /* and a re-init */
3168 if (e1000_init_hw(hw) < 0) {
3169 device_printf(dev, "Hardware Initialization Failed\n");
3170 return;
3171 }
3172
3173 E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN);
3174 e1000_get_phy_info(hw);
3175 e1000_check_for_link(hw);
2273 /* Issue a global reset */
2274 e1000_reset_hw(hw);
2275 E1000_WRITE_REG(hw, E1000_WUC, 0);
2276 em_disable_aspm(adapter);
2277 /* and a re-init */
2278 if (e1000_init_hw(hw) < 0) {
2279 device_printf(dev, "Hardware Initialization Failed\n");
2280 return;
2281 }
2282
2283 E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN);
2284 e1000_get_phy_info(hw);
2285 e1000_check_for_link(hw);
3176 return;
3177}
3178
2286}
2287
2288#define RSSKEYLEN 10
2289static void
2290em_initialize_rss_mapping(struct adapter *adapter)
2291{
2292 uint8_t rss_key[4 * RSSKEYLEN];
2293 uint32_t reta = 0;
2294 struct e1000_hw *hw = &adapter->hw;
2295 int i;
2296
2297 /*
2298 * Configure RSS key
2299 */
2300 arc4rand(rss_key, sizeof(rss_key), 0);
2301 for (i = 0; i < RSSKEYLEN; ++i) {
2302 uint32_t rssrk = 0;
2303
2304 rssrk = EM_RSSRK_VAL(rss_key, i);
2305 E1000_WRITE_REG(hw,E1000_RSSRK(i), rssrk);
2306 }
2307
2308 /*
2309 * Configure RSS redirect table in following fashion:
2310 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
2311 */
2312 for (i = 0; i < sizeof(reta); ++i) {
2313 uint32_t q;
2314
2315 q = (i % adapter->rx_num_queues) << 7;
2316 reta |= q << (8 * i);
2317 }
2318
2319 for (i = 0; i < 32; ++i)
2320 E1000_WRITE_REG(hw, E1000_RETA(i), reta);
2321
2322 E1000_WRITE_REG(hw, E1000_MRQC, E1000_MRQC_RSS_ENABLE_2Q |
2323 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2324 E1000_MRQC_RSS_FIELD_IPV4 |
2325 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX |
2326 E1000_MRQC_RSS_FIELD_IPV6_EX |
2327 E1000_MRQC_RSS_FIELD_IPV6);
2328
2329}
2330
2331static void
2332igb_initialize_rss_mapping(struct adapter *adapter)
2333{
2334 struct e1000_hw *hw = &adapter->hw;
2335 int i;
2336 int queue_id;
2337 u32 reta;
2338 u32 rss_key[10], mrqc, shift = 0;
2339
2340 /* XXX? */
2341 if (adapter->hw.mac.type == e1000_82575)
2342 shift = 6;
2343
2344 /*
2345 * The redirection table controls which destination
2346 * queue each bucket redirects traffic to.
2347 * Each DWORD represents four queues, with the LSB
2348 * being the first queue in the DWORD.
2349 *
2350 * This just allocates buckets to queues using round-robin
2351 * allocation.
2352 *
2353 * NOTE: It Just Happens to line up with the default
2354 * RSS allocation method.
2355 */
2356
2357 /* Warning FM follows */
2358 reta = 0;
2359 for (i = 0; i < 128; i++) {
2360#ifdef RSS
2361 queue_id = rss_get_indirection_to_bucket(i);
2362 /*
2363 * If we have more queues than buckets, we'll
2364 * end up mapping buckets to a subset of the
2365 * queues.
2366 *
2367 * If we have more buckets than queues, we'll
2368 * end up instead assigning multiple buckets
2369 * to queues.
2370 *
2371 * Both are suboptimal, but we need to handle
2372 * the case so we don't go out of bounds
2373 * indexing arrays and such.
2374 */
2375 queue_id = queue_id % adapter->rx_num_queues;
2376#else
2377 queue_id = (i % adapter->rx_num_queues);
2378#endif
2379 /* Adjust if required */
2380 queue_id = queue_id << shift;
2381
2382 /*
2383 * The low 8 bits are for hash value (n+0);
2384 * The next 8 bits are for hash value (n+1), etc.
2385 */
2386 reta = reta >> 8;
2387 reta = reta | ( ((uint32_t) queue_id) << 24);
2388 if ((i & 3) == 3) {
2389 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2390 reta = 0;
2391 }
2392 }
2393
2394 /* Now fill in hash table */
2395
2396 /*
2397 * MRQC: Multiple Receive Queues Command
2398 * Set queuing to RSS control, number depends on the device.
2399 */
2400 mrqc = E1000_MRQC_ENABLE_RSS_8Q;
2401
2402#ifdef RSS
2403 /* XXX ew typecasting */
2404 rss_getkey((uint8_t *) &rss_key);
2405#else
2406 arc4rand(&rss_key, sizeof(rss_key), 0);
2407#endif
2408 for (i = 0; i < 10; i++)
2409 E1000_WRITE_REG_ARRAY(hw,
2410 E1000_RSSRK(0), i, rss_key[i]);
2411
2412 /*
2413 * Configure the RSS fields to hash upon.
2414 */
2415 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2416 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2417 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2418 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2419 mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP |
2420 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2421 mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2422 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2423
2424 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2425}
2426
3179/*********************************************************************
3180 *
3181 * Setup networking device structure and register an interface.
3182 *
3183 **********************************************************************/
3184static int
2427/*********************************************************************
2428 *
2429 * Setup networking device structure and register an interface.
2430 *
2431 **********************************************************************/
2432static int
3185em_setup_interface(device_t dev, struct adapter *adapter)
2433em_setup_interface(if_ctx_t ctx)
3186{
2434{
3187 if_t ifp;
3188
2435 struct ifnet *ifp = iflib_get_ifp(ctx);
2436 struct adapter *adapter = iflib_get_softc(ctx);
2437 if_softc_ctx_t scctx = adapter->shared;
2438 uint64_t cap = 0;
2439
3189 INIT_DEBUGOUT("em_setup_interface: begin");
3190
2440 INIT_DEBUGOUT("em_setup_interface: begin");
2441
3191 ifp = adapter->ifp = if_gethandle(IFT_ETHER);
3192 if (ifp == 0) {
3193 device_printf(dev, "can not allocate ifnet structure\n");
3194 return (-1);
3195 }
3196 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3197 if_setdev(ifp, dev);
3198 if_setinitfn(ifp, em_init);
3199 if_setsoftc(ifp, adapter);
3200 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
3201 if_setioctlfn(ifp, em_ioctl);
3202 if_setgetcounterfn(ifp, em_get_counter);
3203
3204 /* TSO parameters */
3205 ifp->if_hw_tsomax = IP_MAXPACKET;
3206 /* Take m_pullup(9)'s in em_xmit() w/ TSO into acount. */
3207 ifp->if_hw_tsomaxsegcount = EM_MAX_SCATTER - 5;
3208 ifp->if_hw_tsomaxsegsize = EM_TSO_SEG_SIZE;
3209
2442 /* TSO parameters */
2443 ifp->if_hw_tsomax = IP_MAXPACKET;
2444 /* Take m_pullup(9)'s in em_xmit() w/ TSO into acount. */
2445 ifp->if_hw_tsomaxsegcount = EM_MAX_SCATTER - 5;
2446 ifp->if_hw_tsomaxsegsize = EM_TSO_SEG_SIZE;
2447
3210#ifdef EM_MULTIQUEUE
3211 /* Multiqueue stack interface */
3212 if_settransmitfn(ifp, em_mq_start);
3213 if_setqflushfn(ifp, em_qflush);
3214#else
3215 if_setstartfn(ifp, em_start);
3216 if_setsendqlen(ifp, adapter->num_tx_desc - 1);
3217 if_setsendqready(ifp);
3218#endif
2448 /* Single Queue */
2449 if (adapter->tx_num_queues == 1) {
2450 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1);
2451 if_setsendqready(ifp);
2452 }
3219
2453
3220 ether_ifattach(ifp, adapter->hw.mac.addr);
2454 cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4;
2455 cap |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU;
3221
2456
3222 if_setcapabilities(ifp, 0);
3223 if_setcapenable(ifp, 0);
3224
3225
3226 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
3227 IFCAP_TSO4, 0);
3228 /*
3229 * Tell the upper layer(s) we
3230 * support full VLAN capability
3231 */
3232 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2457 /*
2458 * Tell the upper layer(s) we
2459 * support full VLAN capability
2460 */
2461 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
3233 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
3234 IFCAP_VLAN_MTU, 0);
3235 if_setcapenable(ifp, if_getcapabilities(ifp));
2462 if_setcapabilitiesbit(ifp, cap, 0);
3236
3237 /*
3238 ** Don't turn this on by default, if vlans are
3239 ** created on another pseudo device (eg. lagg)
3240 ** then vlan events are not passed thru, breaking
3241 ** operation, but with HW FILTER off it works. If
3242 ** using vlans directly on the em driver you can
3243 ** enable this and get full hardware tag filtering.
3244 */
3245 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER,0);
3246
2463
2464 /*
2465 ** Don't turn this on by default, if vlans are
2466 ** created on another pseudo device (eg. lagg)
2467 ** then vlan events are not passed thru, breaking
2468 ** operation, but with HW FILTER off it works. If
2469 ** using vlans directly on the em driver you can
2470 ** enable this and get full hardware tag filtering.
2471 */
2472 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER,0);
2473
3247#ifdef DEVICE_POLLING
3248 if_setcapabilitiesbit(ifp, IFCAP_POLLING,0);
3249#endif
3250
3251 /* Enable only WOL MAGIC by default */
3252 if (adapter->wol) {
3253 if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
3254 if_setcapenablebit(ifp, IFCAP_WOL_MAGIC, 0);
3255 }
3256
3257 /*
3258 * Specify the media types supported by this adapter and register
3259 * callbacks to update media and link information
3260 */
2474 /* Enable only WOL MAGIC by default */
2475 if (adapter->wol) {
2476 if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
2477 if_setcapenablebit(ifp, IFCAP_WOL_MAGIC, 0);
2478 }
2479
2480 /*
2481 * Specify the media types supported by this adapter and register
2482 * callbacks to update media and link information
2483 */
3261 ifmedia_init(&adapter->media, IFM_IMASK,
3262 em_media_change, em_media_status);
3263 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3264 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3265 u_char fiber_type = IFM_1000_SX; /* default type */
3266
2484 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2485 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
2486 u_char fiber_type = IFM_1000_SX; /* default type */
2487
3267 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3268 0, NULL);
3269 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2488 if (adapter->hw.mac.type == e1000_82545)
2489 fiber_type = IFM_1000_LX;
2490 ifmedia_add(adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 0, NULL);
2491 ifmedia_add(adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3270 } else {
2492 } else {
3271 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3272 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3273 0, NULL);
3274 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3275 0, NULL);
3276 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3277 0, NULL);
2493 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2494 ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
2495 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2496 ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
3278 if (adapter->hw.phy.type != e1000_phy_ife) {
2497 if (adapter->hw.phy.type != e1000_phy_ife) {
3279 ifmedia_add(&adapter->media,
3280 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3281 ifmedia_add(&adapter->media,
3282 IFM_ETHER | IFM_1000_T, 0, NULL);
2498 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2499 ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
3283 }
3284 }
2500 }
2501 }
3285 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3286 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2502 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2503 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
3287 return (0);
3288}
3289
2504 return (0);
2505}
2506
3290
3291/*
3292 * Manage DMA'able memory.
3293 */
3294static void
3295em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3296{
3297 if (error)
3298 return;
3299 *(bus_addr_t *) arg = segs[0].ds_addr;
3300}
3301
3302static int
2507static int
3303em_dma_malloc(struct adapter *adapter, bus_size_t size,
3304 struct em_dma_alloc *dma, int mapflags)
2508em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
3305{
2509{
3306 int error;
2510 struct adapter *adapter = iflib_get_softc(ctx);
2511 if_softc_ctx_t scctx = adapter->shared;
2512 int error = E1000_SUCCESS;
2513 struct em_tx_queue *que;
2514 int i;
3307
2515
3308 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3309 EM_DBA_ALIGN, 0, /* alignment, bounds */
3310 BUS_SPACE_MAXADDR, /* lowaddr */
3311 BUS_SPACE_MAXADDR, /* highaddr */
3312 NULL, NULL, /* filter, filterarg */
3313 size, /* maxsize */
3314 1, /* nsegments */
3315 size, /* maxsegsize */
3316 0, /* flags */
3317 NULL, /* lockfunc */
3318 NULL, /* lockarg */
3319 &dma->dma_tag);
3320 if (error) {
3321 device_printf(adapter->dev,
3322 "%s: bus_dma_tag_create failed: %d\n",
3323 __func__, error);
3324 goto fail_0;
3325 }
2516 MPASS(adapter->tx_num_queues > 0);
2517 MPASS(adapter->tx_num_queues == ntxqsets);
3326
2518
3327 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3328 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3329 if (error) {
3330 device_printf(adapter->dev,
3331 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3332 __func__, (uintmax_t)size, error);
3333 goto fail_2;
2519 /* First allocate the top level queue structs */
2520 if (!(adapter->tx_queues =
2521 (struct em_tx_queue *) malloc(sizeof(struct em_tx_queue) *
2522 adapter->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2523 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
2524 return(ENOMEM);
3334 }
3335
2525 }
2526
3336 dma->dma_paddr = 0;
3337 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3338 size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3339 if (error || dma->dma_paddr == 0) {
3340 device_printf(adapter->dev,
3341 "%s: bus_dmamap_load failed: %d\n",
3342 __func__, error);
3343 goto fail_3;
3344 }
2527 for (i = 0, que = adapter->tx_queues; i < adapter->tx_num_queues; i++, que++) {
2528 /* Set up some basics */
2529 struct tx_ring *txr = &que->txr;
2530 txr->adapter = que->adapter = adapter;
2531 txr->que = que;
2532 que->me = txr->me = i;
3345
2533
3346 return (0);
2534 /* Allocate transmit buffer memory */
2535 if (!(txr->tx_buffers = (struct em_txbuffer *) malloc(sizeof(struct em_txbuffer) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
2536 device_printf(iflib_get_dev(ctx), "failed to allocate tx_buffer memory\n");
2537 error = ENOMEM;
2538 goto fail;
2539 }
3347
2540
3348fail_3:
3349 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3350fail_2:
3351 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3352 bus_dma_tag_destroy(dma->dma_tag);
3353fail_0:
3354 dma->dma_tag = NULL;
3355
2541 /* get the virtual and physical address of the hardware queues */
2542 txr->tx_base = (struct e1000_tx_desc *)vaddrs[i*ntxqs];
2543 txr->tx_paddr = paddrs[i*ntxqs];
2544
2545 }
2546
2547 device_printf(iflib_get_dev(ctx), "allocated for %d tx_queues\n", adapter->tx_num_queues);
2548 return (0);
2549 fail:
2550 em_if_queues_free(ctx);
3356 return (error);
3357}
3358
2551 return (error);
2552}
2553
3359static void
3360em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3361{
3362 if (dma->dma_tag == NULL)
3363 return;
3364 if (dma->dma_paddr != 0) {
3365 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3366 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3367 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3368 dma->dma_paddr = 0;
3369 }
3370 if (dma->dma_vaddr != NULL) {
3371 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3372 dma->dma_vaddr = NULL;
3373 }
3374 bus_dma_tag_destroy(dma->dma_tag);
3375 dma->dma_tag = NULL;
3376}
3377
3378
3379/*********************************************************************
3380 *
3381 * Allocate memory for the transmit and receive rings, and then
3382 * the descriptors associated with each, called only once at attach.
3383 *
3384 **********************************************************************/
3385static int
2554static int
3386em_allocate_queues(struct adapter *adapter)
2555em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
3387{
2556{
3388 device_t dev = adapter->dev;
3389 struct tx_ring *txr = NULL;
3390 struct rx_ring *rxr = NULL;
3391 int rsize, tsize, error = E1000_SUCCESS;
3392 int txconf = 0, rxconf = 0;
2557 struct adapter *adapter = iflib_get_softc(ctx);
2558 int error = E1000_SUCCESS;
2559 struct em_rx_queue *que;
2560 int i;
3393
2561
2562 MPASS(adapter->rx_num_queues > 0);
2563 MPASS(adapter->rx_num_queues == nrxqsets);
3394
2564
3395 /* Allocate the TX ring struct memory */
3396 if (!(adapter->tx_rings =
3397 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
3398 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3399 device_printf(dev, "Unable to allocate TX ring memory\n");
2565 /* First allocate the top level queue structs */
2566 if (!(adapter->rx_queues =
2567 (struct em_rx_queue *) malloc(sizeof(struct em_rx_queue) *
2568 adapter->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2569 device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
3400 error = ENOMEM;
2570 error = ENOMEM;
3401 goto fail;
2571 goto fail;
3402 }
3403
2572 }
2573
3404 /* Now allocate the RX */
3405 if (!(adapter->rx_rings =
3406 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
3407 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3408 device_printf(dev, "Unable to allocate RX ring memory\n");
3409 error = ENOMEM;
3410 goto rx_fail;
3411 }
3412
3413 tsize = roundup2(adapter->num_tx_desc *
3414 sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
3415 /*
3416 * Now set up the TX queues, txconf is needed to handle the
3417 * possibility that things fail midcourse and we need to
3418 * undo memory gracefully
3419 */
3420 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2574 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
3421 /* Set up some basics */
2575 /* Set up some basics */
3422 txr = &adapter->tx_rings[i];
3423 txr->adapter = adapter;
3424 txr->me = i;
2576 struct rx_ring *rxr = &que->rxr;
2577 rxr->adapter = que->adapter = adapter;
2578 rxr->que = que;
2579 que->me = rxr->me = i;
3425
2580
3426 /* Initialize the TX lock */
3427 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
3428 device_get_nameunit(dev), txr->me);
3429 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
3430
3431 if (em_dma_malloc(adapter, tsize,
3432 &txr->txdma, BUS_DMA_NOWAIT)) {
3433 device_printf(dev,
3434 "Unable to allocate TX Descriptor memory\n");
3435 error = ENOMEM;
3436 goto err_tx_desc;
3437 }
3438 txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr;
3439 bzero((void *)txr->tx_base, tsize);
3440
3441 if (em_allocate_transmit_buffers(txr)) {
3442 device_printf(dev,
3443 "Critical Failure setting up transmit buffers\n");
3444 error = ENOMEM;
3445 goto err_tx_desc;
3446 }
3447#if __FreeBSD_version >= 800000
3448 /* Allocate a buf ring */
3449 txr->br = buf_ring_alloc(4096, M_DEVBUF,
3450 M_WAITOK, &txr->tx_mtx);
3451#endif
2581 /* get the virtual and physical address of the hardware queues */
2582 rxr->rx_base = (union e1000_rx_desc_extended *)vaddrs[i*nrxqs];
2583 rxr->rx_paddr = paddrs[i*nrxqs];
3452 }
3453
2584 }
2585
3454 /*
3455 * Next the RX queues...
3456 */
3457 rsize = roundup2(adapter->num_rx_desc *
3458 sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN);
3459 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
3460 rxr = &adapter->rx_rings[i];
3461 rxr->adapter = adapter;
3462 rxr->me = i;
2586 device_printf(iflib_get_dev(ctx), "allocated for %d rx_queues\n", adapter->rx_num_queues);
3463
2587
3464 /* Initialize the RX lock */
3465 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3466 device_get_nameunit(dev), txr->me);
3467 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
3468
3469 if (em_dma_malloc(adapter, rsize,
3470 &rxr->rxdma, BUS_DMA_NOWAIT)) {
3471 device_printf(dev,
3472 "Unable to allocate RxDescriptor memory\n");
3473 error = ENOMEM;
3474 goto err_rx_desc;
3475 }
3476 rxr->rx_base = (union e1000_rx_desc_extended *)rxr->rxdma.dma_vaddr;
3477 bzero((void *)rxr->rx_base, rsize);
3478
3479 /* Allocate receive buffers for the ring*/
3480 if (em_allocate_receive_buffers(rxr)) {
3481 device_printf(dev,
3482 "Critical Failure setting up receive buffers\n");
3483 error = ENOMEM;
3484 goto err_rx_desc;
3485 }
3486 }
3487
3488 return (0);
2588 return (0);
3489
3490err_rx_desc:
3491 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
3492 em_dma_free(adapter, &rxr->rxdma);
3493err_tx_desc:
3494 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
3495 em_dma_free(adapter, &txr->txdma);
3496 free(adapter->rx_rings, M_DEVBUF);
3497rx_fail:
3498#if __FreeBSD_version >= 800000
3499 buf_ring_free(txr->br, M_DEVBUF);
3500#endif
3501 free(adapter->tx_rings, M_DEVBUF);
3502fail:
2589fail:
2590 em_if_queues_free(ctx);
3503 return (error);
3504}
3505
2591 return (error);
2592}
2593
3506
3507/*********************************************************************
3508 *
3509 * Allocate memory for tx_buffer structures. The tx_buffer stores all
3510 * the information needed to transmit a packet on the wire. This is
3511 * called only once at attach, setup is done every reset.
3512 *
3513 **********************************************************************/
3514static int
3515em_allocate_transmit_buffers(struct tx_ring *txr)
2594static void
2595em_if_queues_free(if_ctx_t ctx)
3516{
2596{
3517 struct adapter *adapter = txr->adapter;
3518 device_t dev = adapter->dev;
3519 struct em_txbuffer *txbuf;
3520 int error, i;
2597 struct adapter *adapter = iflib_get_softc(ctx);
2598 struct em_tx_queue *tx_que = adapter->tx_queues;
2599 struct em_rx_queue *rx_que = adapter->rx_queues;
3521
2600
3522 /*
3523 * Setup DMA descriptor areas.
3524 */
3525 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
3526 1, 0, /* alignment, bounds */
3527 BUS_SPACE_MAXADDR, /* lowaddr */
3528 BUS_SPACE_MAXADDR, /* highaddr */
3529 NULL, NULL, /* filter, filterarg */
3530 EM_TSO_SIZE, /* maxsize */
3531 EM_MAX_SCATTER, /* nsegments */
3532 PAGE_SIZE, /* maxsegsize */
3533 0, /* flags */
3534 NULL, /* lockfunc */
3535 NULL, /* lockfuncarg */
3536 &txr->txtag))) {
3537 device_printf(dev,"Unable to allocate TX DMA tag\n");
3538 goto fail;
3539 }
2601 if (tx_que != NULL) {
2602 for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
2603 struct tx_ring *txr = &tx_que->txr;
2604 if (txr->tx_buffers == NULL)
2605 break;
3540
2606
3541 if (!(txr->tx_buffers =
3542 (struct em_txbuffer *) malloc(sizeof(struct em_txbuffer) *
3543 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3544 device_printf(dev, "Unable to allocate tx_buffer memory\n");
3545 error = ENOMEM;
3546 goto fail;
2607 free(txr->tx_buffers, M_DEVBUF);
2608 txr->tx_buffers = NULL;
2609 }
2610 free(adapter->tx_queues, M_DEVBUF);
2611 adapter->tx_queues = NULL;
3547 }
3548
2612 }
2613
3549 /* Create the descriptor buffer dma maps */
3550 txbuf = txr->tx_buffers;
3551 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
3552 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
3553 if (error != 0) {
3554 device_printf(dev, "Unable to create TX DMA map\n");
3555 goto fail;
3556 }
2614 if (rx_que != NULL) {
2615 free(adapter->rx_queues, M_DEVBUF);
2616 adapter->rx_queues = NULL;
3557 }
3558
2617 }
2618
3559 return 0;
3560fail:
3561 /* We free all, it handles case where we are in the middle */
3562 em_free_transmit_structures(adapter);
3563 return (error);
3564}
2619 em_release_hw_control(adapter);
3565
2620
3566/*********************************************************************
3567 *
3568 * Initialize a transmit ring.
3569 *
3570 **********************************************************************/
3571static void
3572em_setup_transmit_ring(struct tx_ring *txr)
3573{
3574 struct adapter *adapter = txr->adapter;
3575 struct em_txbuffer *txbuf;
3576 int i;
3577#ifdef DEV_NETMAP
3578 struct netmap_slot *slot;
3579 struct netmap_adapter *na = netmap_getna(adapter->ifp);
3580#endif /* DEV_NETMAP */
3581
3582 /* Clear the old descriptor contents */
3583 EM_TX_LOCK(txr);
3584#ifdef DEV_NETMAP
3585 slot = netmap_reset(na, NR_TX, txr->me, 0);
3586#endif /* DEV_NETMAP */
3587
3588 bzero((void *)txr->tx_base,
3589 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3590 /* Reset indices */
3591 txr->next_avail_desc = 0;
3592 txr->next_to_clean = 0;
3593
3594 /* Free any existing tx buffers. */
3595 txbuf = txr->tx_buffers;
3596 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
3597 if (txbuf->m_head != NULL) {
3598 bus_dmamap_sync(txr->txtag, txbuf->map,
3599 BUS_DMASYNC_POSTWRITE);
3600 bus_dmamap_unload(txr->txtag, txbuf->map);
3601 m_freem(txbuf->m_head);
3602 txbuf->m_head = NULL;
3603 }
3604#ifdef DEV_NETMAP
3605 if (slot) {
3606 int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
3607 uint64_t paddr;
3608 void *addr;
3609
3610 addr = PNMB(na, slot + si, &paddr);
3611 txr->tx_base[i].buffer_addr = htole64(paddr);
3612 /* reload the map for netmap mode */
3613 netmap_load_map(na, txr->txtag, txbuf->map, addr);
3614 }
3615#endif /* DEV_NETMAP */
3616
3617 /* clear the watch index */
3618 txbuf->next_eop = -1;
3619 }
3620
3621 /* Set number of descriptors available */
3622 txr->tx_avail = adapter->num_tx_desc;
3623 txr->busy = EM_TX_IDLE;
3624
3625 /* Clear checksum offload context. */
3626 txr->last_hw_offload = 0;
3627 txr->last_hw_ipcss = 0;
3628 txr->last_hw_ipcso = 0;
3629 txr->last_hw_tucss = 0;
3630 txr->last_hw_tucso = 0;
3631
3632 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3633 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3634 EM_TX_UNLOCK(txr);
2621 if (adapter->mta != NULL) {
2622 free(adapter->mta, M_DEVBUF);
2623 }
3635}
3636
3637/*********************************************************************
3638 *
2624}
2625
2626/*********************************************************************
2627 *
3639 * Initialize all transmit rings.
3640 *
3641 **********************************************************************/
3642static void
3643em_setup_transmit_structures(struct adapter *adapter)
3644{
3645 struct tx_ring *txr = adapter->tx_rings;
3646
3647 for (int i = 0; i < adapter->num_queues; i++, txr++)
3648 em_setup_transmit_ring(txr);
3649
3650 return;
3651}
3652
3653/*********************************************************************
3654 *
3655 * Enable transmit unit.
3656 *
3657 **********************************************************************/
3658static void
2628 * Enable transmit unit.
2629 *
2630 **********************************************************************/
2631static void
3659em_initialize_transmit_unit(struct adapter *adapter)
2632em_initialize_transmit_unit(if_ctx_t ctx)
3660{
2633{
3661 struct tx_ring *txr = adapter->tx_rings;
2634 struct adapter *adapter = iflib_get_softc(ctx);
2635 if_softc_ctx_t scctx = adapter->shared;
2636 struct em_tx_queue *que;
2637 struct tx_ring *txr;
3662 struct e1000_hw *hw = &adapter->hw;
2638 struct e1000_hw *hw = &adapter->hw;
3663 u32 tctl, txdctl = 0, tarc, tipg = 0;
2639 u32 tctl, txdctl = 0, tarc, tipg = 0;
3664
3665 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3666
2640
2641 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2642
3667 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3668 u64 bus_addr = txr->txdma.dma_paddr;
2643 for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
2644 u64 bus_addr;
2645 caddr_t offp, endp;
2646
2647 que = &adapter->tx_queues[i];
2648 txr = &que->txr;
2649 bus_addr = txr->tx_paddr;
2650
2651 /*Enable all queues */
2652 em_init_tx_ring(que);
2653
2654 /* Clear checksum offload context. */
2655 offp = (caddr_t)&txr->csum_flags;
2656 endp = (caddr_t)(txr + 1);
2657 bzero(offp, endp - offp);
2658
3669 /* Base and Len of TX Ring */
3670 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2659 /* Base and Len of TX Ring */
2660 E1000_WRITE_REG(hw, E1000_TDLEN(i),
3671 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2661 scctx->isc_ntxd[0] * sizeof(struct e1000_tx_desc));
3672 E1000_WRITE_REG(hw, E1000_TDBAH(i),
3673 (u32)(bus_addr >> 32));
3674 E1000_WRITE_REG(hw, E1000_TDBAL(i),
3675 (u32)bus_addr);
3676 /* Init the HEAD/TAIL indices */
3677 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
3678 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
3679
3680 HW_DEBUGOUT2("Base = %x, Length = %x\n",
3681 E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)),
3682 E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
3683
2662 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2663 (u32)(bus_addr >> 32));
2664 E1000_WRITE_REG(hw, E1000_TDBAL(i),
2665 (u32)bus_addr);
2666 /* Init the HEAD/TAIL indices */
2667 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2668 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2669
2670 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2671 E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)),
2672 E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
2673
3684 txr->busy = EM_TX_IDLE;
3685 txdctl = 0; /* clear txdctl */
3686 txdctl |= 0x1f; /* PTHRESH */
3687 txdctl |= 1 << 8; /* HTHRESH */
3688 txdctl |= 1 << 16;/* WTHRESH */
3689 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */
3690 txdctl |= E1000_TXDCTL_GRAN;
3691 txdctl |= 1 << 25; /* LWTHRESH */
3692
3693 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
3694 }
3695
3696 /* Set the default values for the Tx Inter Packet Gap timer */
3697 switch (adapter->hw.mac.type) {
3698 case e1000_80003es2lan:
3699 tipg = DEFAULT_82543_TIPG_IPGR1;
3700 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3701 E1000_TIPG_IPGR2_SHIFT;
3702 break;
2674 txdctl = 0; /* clear txdctl */
2675 txdctl |= 0x1f; /* PTHRESH */
2676 txdctl |= 1 << 8; /* HTHRESH */
2677 txdctl |= 1 << 16;/* WTHRESH */
2678 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */
2679 txdctl |= E1000_TXDCTL_GRAN;
2680 txdctl |= 1 << 25; /* LWTHRESH */
2681
2682 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2683 }
2684
2685 /* Set the default values for the Tx Inter Packet Gap timer */
2686 switch (adapter->hw.mac.type) {
2687 case e1000_80003es2lan:
2688 tipg = DEFAULT_82543_TIPG_IPGR1;
2689 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
2690 E1000_TIPG_IPGR2_SHIFT;
2691 break;
2692 case e1000_82542:
2693 tipg = DEFAULT_82542_TIPG_IPGT;
2694 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2695 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2696 break;
3703 default:
3704 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3705 (adapter->hw.phy.media_type ==
3706 e1000_media_type_internal_serdes))
3707 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3708 else
3709 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3710 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;

--- 18 unchanged lines hidden (view full) ---

3729 tarc |= 1;
3730 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3731 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3732 tarc |= 1;
3733 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3734 } else if (adapter->hw.mac.type == e1000_82574) {
3735 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3736 tarc |= TARC_ERRATA_BIT;
2697 default:
2698 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
2699 (adapter->hw.phy.media_type ==
2700 e1000_media_type_internal_serdes))
2701 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2702 else
2703 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2704 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;

--- 18 unchanged lines hidden (view full) ---

2723 tarc |= 1;
2724 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
2725 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
2726 tarc |= 1;
2727 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
2728 } else if (adapter->hw.mac.type == e1000_82574) {
2729 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
2730 tarc |= TARC_ERRATA_BIT;
3737 if ( adapter->num_queues > 1) {
2731 if ( adapter->tx_num_queues > 1) {
3738 tarc |= (TARC_COMPENSATION_MODE | TARC_MQ_FIX);
3739 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3740 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3741 } else
3742 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3743 }
3744
2732 tarc |= (TARC_COMPENSATION_MODE | TARC_MQ_FIX);
2733 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
2734 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
2735 } else
2736 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
2737 }
2738
3745 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3746 if (adapter->tx_int_delay.value > 0)
3747 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3748
3749 /* Program the Transmit Control Register */
3750 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3751 tctl &= ~E1000_TCTL_CT;
3752 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3753 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));

--- 10 unchanged lines hidden (view full) ---

3764 reg |= E1000_RCTL_RDMTS_HEX;
3765 E1000_WRITE_REG(hw, E1000_IOSFPC, reg);
3766 reg = E1000_READ_REG(hw, E1000_TARC(0));
3767 reg |= E1000_TARC0_CB_MULTIQ_3_REQ;
3768 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3769 }
3770}
3771
2739 if (adapter->tx_int_delay.value > 0)
2740 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2741
2742 /* Program the Transmit Control Register */
2743 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2744 tctl &= ~E1000_TCTL_CT;
2745 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2746 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));

--- 10 unchanged lines hidden (view full) ---

2757 reg |= E1000_RCTL_RDMTS_HEX;
2758 E1000_WRITE_REG(hw, E1000_IOSFPC, reg);
2759 reg = E1000_READ_REG(hw, E1000_TARC(0));
2760 reg |= E1000_TARC0_CB_MULTIQ_3_REQ;
2761 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
2762 }
2763}
2764
3772
3773/*********************************************************************
3774 *
2765/*********************************************************************
2766 *
3775 * Free all transmit rings.
3776 *
3777 **********************************************************************/
3778static void
3779em_free_transmit_structures(struct adapter *adapter)
3780{
3781 struct tx_ring *txr = adapter->tx_rings;
3782
3783 for (int i = 0; i < adapter->num_queues; i++, txr++) {
3784 EM_TX_LOCK(txr);
3785 em_free_transmit_buffers(txr);
3786 em_dma_free(adapter, &txr->txdma);
3787 EM_TX_UNLOCK(txr);
3788 EM_TX_LOCK_DESTROY(txr);
3789 }
3790
3791 free(adapter->tx_rings, M_DEVBUF);
3792}
3793
3794/*********************************************************************
3795 *
3796 * Free transmit ring related data structures.
3797 *
3798 **********************************************************************/
3799static void
3800em_free_transmit_buffers(struct tx_ring *txr)
3801{
3802 struct adapter *adapter = txr->adapter;
3803 struct em_txbuffer *txbuf;
3804
3805 INIT_DEBUGOUT("free_transmit_ring: begin");
3806
3807 if (txr->tx_buffers == NULL)
3808 return;
3809
3810 for (int i = 0; i < adapter->num_tx_desc; i++) {
3811 txbuf = &txr->tx_buffers[i];
3812 if (txbuf->m_head != NULL) {
3813 bus_dmamap_sync(txr->txtag, txbuf->map,
3814 BUS_DMASYNC_POSTWRITE);
3815 bus_dmamap_unload(txr->txtag,
3816 txbuf->map);
3817 m_freem(txbuf->m_head);
3818 txbuf->m_head = NULL;
3819 if (txbuf->map != NULL) {
3820 bus_dmamap_destroy(txr->txtag,
3821 txbuf->map);
3822 txbuf->map = NULL;
3823 }
3824 } else if (txbuf->map != NULL) {
3825 bus_dmamap_unload(txr->txtag,
3826 txbuf->map);
3827 bus_dmamap_destroy(txr->txtag,
3828 txbuf->map);
3829 txbuf->map = NULL;
3830 }
3831 }
3832#if __FreeBSD_version >= 800000
3833 if (txr->br != NULL)
3834 buf_ring_free(txr->br, M_DEVBUF);
3835#endif
3836 if (txr->tx_buffers != NULL) {
3837 free(txr->tx_buffers, M_DEVBUF);
3838 txr->tx_buffers = NULL;
3839 }
3840 if (txr->txtag != NULL) {
3841 bus_dma_tag_destroy(txr->txtag);
3842 txr->txtag = NULL;
3843 }
3844 return;
3845}
3846
3847
3848/*********************************************************************
3849 * The offload context is protocol specific (TCP/UDP) and thus
3850 * only needs to be set when the protocol changes. The occasion
3851 * of a context change can be a performance detriment, and
3852 * might be better just disabled. The reason arises in the way
3853 * in which the controller supports pipelined requests from the
3854 * Tx data DMA. Up to four requests can be pipelined, and they may
3855 * belong to the same packet or to multiple packets. However all
3856 * requests for one packet are issued before a request is issued
3857 * for a subsequent packet and if a request for the next packet
3858 * requires a context change, that request will be stalled
3859 * until the previous request completes. This means setting up
3860 * a new context effectively disables pipelined Tx data DMA which
3861 * in turn greatly slow down performance to send small sized
3862 * frames.
3863 **********************************************************************/
3864static void
3865em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
3866 struct ip *ip, u32 *txd_upper, u32 *txd_lower)
3867{
3868 struct adapter *adapter = txr->adapter;
3869 struct e1000_context_desc *TXD = NULL;
3870 struct em_txbuffer *tx_buffer;
3871 int cur, hdr_len;
3872 u32 cmd = 0;
3873 u16 offload = 0;
3874 u8 ipcso, ipcss, tucso, tucss;
3875
3876 ipcss = ipcso = tucss = tucso = 0;
3877 hdr_len = ip_off + (ip->ip_hl << 2);
3878 cur = txr->next_avail_desc;
3879
3880 /* Setup of IP header checksum. */
3881 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3882 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3883 offload |= CSUM_IP;
3884 ipcss = ip_off;
3885 ipcso = ip_off + offsetof(struct ip, ip_sum);
3886 /*
3887 * Start offset for header checksum calculation.
3888 * End offset for header checksum calculation.
3889 * Offset of place to put the checksum.
3890 */
3891 TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
3892 TXD->lower_setup.ip_fields.ipcss = ipcss;
3893 TXD->lower_setup.ip_fields.ipcse = htole16(hdr_len);
3894 TXD->lower_setup.ip_fields.ipcso = ipcso;
3895 cmd |= E1000_TXD_CMD_IP;
3896 }
3897
3898 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3899 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3900 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3901 offload |= CSUM_TCP;
3902 tucss = hdr_len;
3903 tucso = hdr_len + offsetof(struct tcphdr, th_sum);
3904 /*
3905 * The 82574L can only remember the *last* context used
3906 * regardless of queue that it was use for. We cannot reuse
3907 * contexts on this hardware platform and must generate a new
3908 * context every time. 82574L hardware spec, section 7.2.6,
3909 * second note.
3910 */
3911 if (adapter->num_queues < 2) {
3912 /*
3913 * Setting up new checksum offload context for every
3914 * frames takes a lot of processing time for hardware.
3915 * This also reduces performance a lot for small sized
3916 * frames so avoid it if driver can use previously
3917 * configured checksum offload context.
3918 */
3919 if (txr->last_hw_offload == offload) {
3920 if (offload & CSUM_IP) {
3921 if (txr->last_hw_ipcss == ipcss &&
3922 txr->last_hw_ipcso == ipcso &&
3923 txr->last_hw_tucss == tucss &&
3924 txr->last_hw_tucso == tucso)
3925 return;
3926 } else {
3927 if (txr->last_hw_tucss == tucss &&
3928 txr->last_hw_tucso == tucso)
3929 return;
3930 }
3931 }
3932 txr->last_hw_offload = offload;
3933 txr->last_hw_tucss = tucss;
3934 txr->last_hw_tucso = tucso;
3935 }
3936 /*
3937 * Start offset for payload checksum calculation.
3938 * End offset for payload checksum calculation.
3939 * Offset of place to put the checksum.
3940 */
3941 TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
3942 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3943 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3944 TXD->upper_setup.tcp_fields.tucso = tucso;
3945 cmd |= E1000_TXD_CMD_TCP;
3946 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3947 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3948 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3949 tucss = hdr_len;
3950 tucso = hdr_len + offsetof(struct udphdr, uh_sum);
3951 /*
3952 * The 82574L can only remember the *last* context used
3953 * regardless of queue that it was use for. We cannot reuse
3954 * contexts on this hardware platform and must generate a new
3955 * context every time. 82574L hardware spec, section 7.2.6,
3956 * second note.
3957 */
3958 if (adapter->num_queues < 2) {
3959 /*
3960 * Setting up new checksum offload context for every
3961 * frames takes a lot of processing time for hardware.
3962 * This also reduces performance a lot for small sized
3963 * frames so avoid it if driver can use previously
3964 * configured checksum offload context.
3965 */
3966 if (txr->last_hw_offload == offload) {
3967 if (offload & CSUM_IP) {
3968 if (txr->last_hw_ipcss == ipcss &&
3969 txr->last_hw_ipcso == ipcso &&
3970 txr->last_hw_tucss == tucss &&
3971 txr->last_hw_tucso == tucso)
3972 return;
3973 } else {
3974 if (txr->last_hw_tucss == tucss &&
3975 txr->last_hw_tucso == tucso)
3976 return;
3977 }
3978 }
3979 txr->last_hw_offload = offload;
3980 txr->last_hw_tucss = tucss;
3981 txr->last_hw_tucso = tucso;
3982 }
3983 /*
3984 * Start offset for header checksum calculation.
3985 * End offset for header checksum calculation.
3986 * Offset of place to put the checksum.
3987 */
3988 TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
3989 TXD->upper_setup.tcp_fields.tucss = tucss;
3990 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3991 TXD->upper_setup.tcp_fields.tucso = tucso;
3992 }
3993
3994 if (offload & CSUM_IP) {
3995 txr->last_hw_ipcss = ipcss;
3996 txr->last_hw_ipcso = ipcso;
3997 }
3998
3999 TXD->tcp_seg_setup.data = htole32(0);
4000 TXD->cmd_and_length =
4001 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
4002 tx_buffer = &txr->tx_buffers[cur];
4003 tx_buffer->m_head = NULL;
4004 tx_buffer->next_eop = -1;
4005
4006 if (++cur == adapter->num_tx_desc)
4007 cur = 0;
4008
4009 txr->tx_avail--;
4010 txr->next_avail_desc = cur;
4011}
4012
4013
4014/**********************************************************************
4015 *
4016 * Setup work for hardware segmentation offload (TSO)
4017 *
4018 **********************************************************************/
4019static void
4020em_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off,
4021 struct ip *ip, struct tcphdr *tp, u32 *txd_upper, u32 *txd_lower)
4022{
4023 struct adapter *adapter = txr->adapter;
4024 struct e1000_context_desc *TXD;
4025 struct em_txbuffer *tx_buffer;
4026 int cur, hdr_len;
4027
4028 /*
4029 * In theory we can use the same TSO context if and only if
4030 * frame is the same type(IP/TCP) and the same MSS. However
4031 * checking whether a frame has the same IP/TCP structure is
4032 * hard thing so just ignore that and always restablish a
4033 * new TSO context.
4034 */
4035 hdr_len = ip_off + (ip->ip_hl << 2) + (tp->th_off << 2);
4036 *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
4037 E1000_TXD_DTYP_D | /* Data descr type */
4038 E1000_TXD_CMD_TSE); /* Do TSE on this packet */
4039
4040 /* IP and/or TCP header checksum calculation and insertion. */
4041 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
4042
4043 cur = txr->next_avail_desc;
4044 tx_buffer = &txr->tx_buffers[cur];
4045 TXD = (struct e1000_context_desc *) &txr->tx_base[cur];
4046
4047 /*
4048 * Start offset for header checksum calculation.
4049 * End offset for header checksum calculation.
4050 * Offset of place put the checksum.
4051 */
4052 TXD->lower_setup.ip_fields.ipcss = ip_off;
4053 TXD->lower_setup.ip_fields.ipcse =
4054 htole16(ip_off + (ip->ip_hl << 2) - 1);
4055 TXD->lower_setup.ip_fields.ipcso = ip_off + offsetof(struct ip, ip_sum);
4056 /*
4057 * Start offset for payload checksum calculation.
4058 * End offset for payload checksum calculation.
4059 * Offset of place to put the checksum.
4060 */
4061 TXD->upper_setup.tcp_fields.tucss = ip_off + (ip->ip_hl << 2);
4062 TXD->upper_setup.tcp_fields.tucse = 0;
4063 TXD->upper_setup.tcp_fields.tucso =
4064 ip_off + (ip->ip_hl << 2) + offsetof(struct tcphdr, th_sum);
4065 /*
4066 * Payload size per packet w/o any headers.
4067 * Length of all headers up to payload.
4068 */
4069 TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
4070 TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
4071
4072 TXD->cmd_and_length = htole32(adapter->txd_cmd |
4073 E1000_TXD_CMD_DEXT | /* Extended descr */
4074 E1000_TXD_CMD_TSE | /* TSE context */
4075 E1000_TXD_CMD_IP | /* Do IP csum */
4076 E1000_TXD_CMD_TCP | /* Do TCP checksum */
4077 (mp->m_pkthdr.len - (hdr_len))); /* Total len */
4078
4079 tx_buffer->m_head = NULL;
4080 tx_buffer->next_eop = -1;
4081
4082 if (++cur == adapter->num_tx_desc)
4083 cur = 0;
4084
4085 txr->tx_avail--;
4086 txr->next_avail_desc = cur;
4087 txr->tx_tso = TRUE;
4088}
4089
4090
4091/**********************************************************************
4092 *
4093 * Examine each tx_buffer in the used queue. If the hardware is done
4094 * processing the packet then free associated resources. The
4095 * tx_buffer is put back on the free queue.
4096 *
4097 **********************************************************************/
4098static void
4099em_txeof(struct tx_ring *txr)
4100{
4101 struct adapter *adapter = txr->adapter;
4102 int first, last, done, processed;
4103 struct em_txbuffer *tx_buffer;
4104 struct e1000_tx_desc *tx_desc, *eop_desc;
4105 if_t ifp = adapter->ifp;
4106
4107 EM_TX_LOCK_ASSERT(txr);
4108#ifdef DEV_NETMAP
4109 if (netmap_tx_irq(ifp, txr->me))
4110 return;
4111#endif /* DEV_NETMAP */
4112
4113 /* No work, make sure hang detection is disabled */
4114 if (txr->tx_avail == adapter->num_tx_desc) {
4115 txr->busy = EM_TX_IDLE;
4116 return;
4117 }
4118
4119 processed = 0;
4120 first = txr->next_to_clean;
4121 tx_desc = &txr->tx_base[first];
4122 tx_buffer = &txr->tx_buffers[first];
4123 last = tx_buffer->next_eop;
4124 eop_desc = &txr->tx_base[last];
4125
4126 /*
4127 * What this does is get the index of the
4128 * first descriptor AFTER the EOP of the
4129 * first packet, that way we can do the
4130 * simple comparison on the inner while loop.
4131 */
4132 if (++last == adapter->num_tx_desc)
4133 last = 0;
4134 done = last;
4135
4136 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
4137 BUS_DMASYNC_POSTREAD);
4138
4139 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
4140 /* We clean the range of the packet */
4141 while (first != done) {
4142 tx_desc->upper.data = 0;
4143 tx_desc->lower.data = 0;
4144 tx_desc->buffer_addr = 0;
4145 ++txr->tx_avail;
4146 ++processed;
4147
4148 if (tx_buffer->m_head) {
4149 bus_dmamap_sync(txr->txtag,
4150 tx_buffer->map,
4151 BUS_DMASYNC_POSTWRITE);
4152 bus_dmamap_unload(txr->txtag,
4153 tx_buffer->map);
4154 m_freem(tx_buffer->m_head);
4155 tx_buffer->m_head = NULL;
4156 }
4157 tx_buffer->next_eop = -1;
4158
4159 if (++first == adapter->num_tx_desc)
4160 first = 0;
4161
4162 tx_buffer = &txr->tx_buffers[first];
4163 tx_desc = &txr->tx_base[first];
4164 }
4165 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
4166 /* See if we can continue to the next packet */
4167 last = tx_buffer->next_eop;
4168 if (last != -1) {
4169 eop_desc = &txr->tx_base[last];
4170 /* Get new done point */
4171 if (++last == adapter->num_tx_desc) last = 0;
4172 done = last;
4173 } else
4174 break;
4175 }
4176 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
4177 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4178
4179 txr->next_to_clean = first;
4180
4181 /*
4182 ** Hang detection: we know there's work outstanding
4183 ** or the entry return would have been taken, so no
4184 ** descriptor processed here indicates a potential hang.
4185 ** The local timer will examine this and do a reset if needed.
4186 */
4187 if (processed == 0) {
4188 if (txr->busy != EM_TX_HUNG)
4189 ++txr->busy;
4190 } else /* At least one descriptor was cleaned */
4191 txr->busy = EM_TX_BUSY; /* note this clears HUNG */
4192
4193 /*
4194 * If we have a minimum free, clear IFF_DRV_OACTIVE
4195 * to tell the stack that it is OK to send packets.
4196 * Notice that all writes of OACTIVE happen under the
4197 * TX lock which, with a single queue, guarantees
4198 * sanity.
4199 */
4200 if (txr->tx_avail >= EM_MAX_SCATTER) {
4201 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
4202 }
4203
4204 /* Disable hang detection if all clean */
4205 if (txr->tx_avail == adapter->num_tx_desc)
4206 txr->busy = EM_TX_IDLE;
4207}
4208
4209/*********************************************************************
4210 *
4211 * Refresh RX descriptor mbufs from system mbuf buffer pool.
4212 *
4213 **********************************************************************/
4214static void
4215em_refresh_mbufs(struct rx_ring *rxr, int limit)
4216{
4217 struct adapter *adapter = rxr->adapter;
4218 struct mbuf *m;
4219 bus_dma_segment_t segs;
4220 struct em_rxbuffer *rxbuf;
4221 int i, j, error, nsegs;
4222 bool cleaned = FALSE;
4223
4224 i = j = rxr->next_to_refresh;
4225 /*
4226 ** Get one descriptor beyond
4227 ** our work mark to control
4228 ** the loop.
4229 */
4230 if (++j == adapter->num_rx_desc)
4231 j = 0;
4232
4233 while (j != limit) {
4234 rxbuf = &rxr->rx_buffers[i];
4235 if (rxbuf->m_head == NULL) {
4236 m = m_getjcl(M_NOWAIT, MT_DATA,
4237 M_PKTHDR, adapter->rx_mbuf_sz);
4238 /*
4239 ** If we have a temporary resource shortage
4240 ** that causes a failure, just abort refresh
4241 ** for now, we will return to this point when
4242 ** reinvoked from em_rxeof.
4243 */
4244 if (m == NULL)
4245 goto update;
4246 } else
4247 m = rxbuf->m_head;
4248
4249 m->m_len = m->m_pkthdr.len = adapter->rx_mbuf_sz;
4250 m->m_flags |= M_PKTHDR;
4251 m->m_data = m->m_ext.ext_buf;
4252
4253 /* Use bus_dma machinery to setup the memory mapping */
4254 error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxbuf->map,
4255 m, &segs, &nsegs, BUS_DMA_NOWAIT);
4256 if (error != 0) {
4257 printf("Refresh mbufs: hdr dmamap load"
4258 " failure - %d\n", error);
4259 m_free(m);
4260 rxbuf->m_head = NULL;
4261 goto update;
4262 }
4263 rxbuf->m_head = m;
4264 rxbuf->paddr = segs.ds_addr;
4265 bus_dmamap_sync(rxr->rxtag,
4266 rxbuf->map, BUS_DMASYNC_PREREAD);
4267 em_setup_rxdesc(&rxr->rx_base[i], rxbuf);
4268 cleaned = TRUE;
4269
4270 i = j; /* Next is precalulated for us */
4271 rxr->next_to_refresh = i;
4272 /* Calculate next controlling index */
4273 if (++j == adapter->num_rx_desc)
4274 j = 0;
4275 }
4276update:
4277 /*
4278 ** Update the tail pointer only if,
4279 ** and as far as we have refreshed.
4280 */
4281 if (cleaned)
4282 E1000_WRITE_REG(&adapter->hw,
4283 E1000_RDT(rxr->me), rxr->next_to_refresh);
4284
4285 return;
4286}
4287
4288
4289/*********************************************************************
4290 *
4291 * Allocate memory for rx_buffer structures. Since we use one
4292 * rx_buffer per received packet, the maximum number of rx_buffer's
4293 * that we'll need is equal to the number of receive descriptors
4294 * that we've allocated.
4295 *
4296 **********************************************************************/
4297static int
4298em_allocate_receive_buffers(struct rx_ring *rxr)
4299{
4300 struct adapter *adapter = rxr->adapter;
4301 device_t dev = adapter->dev;
4302 struct em_rxbuffer *rxbuf;
4303 int error;
4304
4305 rxr->rx_buffers = malloc(sizeof(struct em_rxbuffer) *
4306 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4307 if (rxr->rx_buffers == NULL) {
4308 device_printf(dev, "Unable to allocate rx_buffer memory\n");
4309 return (ENOMEM);
4310 }
4311
4312 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4313 1, 0, /* alignment, bounds */
4314 BUS_SPACE_MAXADDR, /* lowaddr */
4315 BUS_SPACE_MAXADDR, /* highaddr */
4316 NULL, NULL, /* filter, filterarg */
4317 MJUM9BYTES, /* maxsize */
4318 1, /* nsegments */
4319 MJUM9BYTES, /* maxsegsize */
4320 0, /* flags */
4321 NULL, /* lockfunc */
4322 NULL, /* lockarg */
4323 &rxr->rxtag);
4324 if (error) {
4325 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4326 __func__, error);
4327 goto fail;
4328 }
4329
4330 rxbuf = rxr->rx_buffers;
4331 for (int i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
4332 rxbuf = &rxr->rx_buffers[i];
4333 error = bus_dmamap_create(rxr->rxtag, 0, &rxbuf->map);
4334 if (error) {
4335 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4336 __func__, error);
4337 goto fail;
4338 }
4339 }
4340
4341 return (0);
4342
4343fail:
4344 em_free_receive_structures(adapter);
4345 return (error);
4346}
4347
4348
4349/*********************************************************************
4350 *
4351 * Initialize a receive ring and its buffers.
4352 *
4353 **********************************************************************/
4354static int
4355em_setup_receive_ring(struct rx_ring *rxr)
4356{
4357 struct adapter *adapter = rxr->adapter;
4358 struct em_rxbuffer *rxbuf;
4359 bus_dma_segment_t seg[1];
4360 int rsize, nsegs, error = 0;
4361#ifdef DEV_NETMAP
4362 struct netmap_slot *slot;
4363 struct netmap_adapter *na = netmap_getna(adapter->ifp);
4364#endif
4365
4366
4367 /* Clear the ring contents */
4368 EM_RX_LOCK(rxr);
4369 rsize = roundup2(adapter->num_rx_desc *
4370 sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN);
4371 bzero((void *)rxr->rx_base, rsize);
4372#ifdef DEV_NETMAP
4373 slot = netmap_reset(na, NR_RX, rxr->me, 0);
4374#endif
4375
4376 /*
4377 ** Free current RX buffer structs and their mbufs
4378 */
4379 for (int i = 0; i < adapter->num_rx_desc; i++) {
4380 rxbuf = &rxr->rx_buffers[i];
4381 if (rxbuf->m_head != NULL) {
4382 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
4383 BUS_DMASYNC_POSTREAD);
4384 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
4385 m_freem(rxbuf->m_head);
4386 rxbuf->m_head = NULL; /* mark as freed */
4387 }
4388 }
4389
4390 /* Now replenish the mbufs */
4391 for (int j = 0; j != adapter->num_rx_desc; ++j) {
4392 rxbuf = &rxr->rx_buffers[j];
4393#ifdef DEV_NETMAP
4394 if (slot) {
4395 int si = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
4396 uint64_t paddr;
4397 void *addr;
4398
4399 addr = PNMB(na, slot + si, &paddr);
4400 netmap_load_map(na, rxr->rxtag, rxbuf->map, addr);
4401 rxbuf->paddr = paddr;
4402 em_setup_rxdesc(&rxr->rx_base[j], rxbuf);
4403 continue;
4404 }
4405#endif /* DEV_NETMAP */
4406 rxbuf->m_head = m_getjcl(M_NOWAIT, MT_DATA,
4407 M_PKTHDR, adapter->rx_mbuf_sz);
4408 if (rxbuf->m_head == NULL) {
4409 error = ENOBUFS;
4410 goto fail;
4411 }
4412 rxbuf->m_head->m_len = adapter->rx_mbuf_sz;
4413 rxbuf->m_head->m_flags &= ~M_HASFCS; /* we strip it */
4414 rxbuf->m_head->m_pkthdr.len = adapter->rx_mbuf_sz;
4415
4416 /* Get the memory mapping */
4417 error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
4418 rxbuf->map, rxbuf->m_head, seg,
4419 &nsegs, BUS_DMA_NOWAIT);
4420 if (error != 0) {
4421 m_freem(rxbuf->m_head);
4422 rxbuf->m_head = NULL;
4423 goto fail;
4424 }
4425 bus_dmamap_sync(rxr->rxtag,
4426 rxbuf->map, BUS_DMASYNC_PREREAD);
4427
4428 rxbuf->paddr = seg[0].ds_addr;
4429 em_setup_rxdesc(&rxr->rx_base[j], rxbuf);
4430 }
4431 rxr->next_to_check = 0;
4432 rxr->next_to_refresh = 0;
4433 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4434 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4435
4436fail:
4437 EM_RX_UNLOCK(rxr);
4438 return (error);
4439}
4440
4441/*********************************************************************
4442 *
4443 * Initialize all receive rings.
4444 *
4445 **********************************************************************/
4446static int
4447em_setup_receive_structures(struct adapter *adapter)
4448{
4449 struct rx_ring *rxr = adapter->rx_rings;
4450 int q;
4451
4452 for (q = 0; q < adapter->num_queues; q++, rxr++)
4453 if (em_setup_receive_ring(rxr))
4454 goto fail;
4455
4456 return (0);
4457fail:
4458 /*
4459 * Free RX buffers allocated so far, we will only handle
4460 * the rings that completed, the failing case will have
4461 * cleaned up for itself. 'q' failed, so its the terminus.
4462 */
4463 for (int i = 0; i < q; ++i) {
4464 rxr = &adapter->rx_rings[i];
4465 for (int n = 0; n < adapter->num_rx_desc; n++) {
4466 struct em_rxbuffer *rxbuf;
4467 rxbuf = &rxr->rx_buffers[n];
4468 if (rxbuf->m_head != NULL) {
4469 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
4470 BUS_DMASYNC_POSTREAD);
4471 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
4472 m_freem(rxbuf->m_head);
4473 rxbuf->m_head = NULL;
4474 }
4475 }
4476 rxr->next_to_check = 0;
4477 rxr->next_to_refresh = 0;
4478 }
4479
4480 return (ENOBUFS);
4481}
4482
4483/*********************************************************************
4484 *
4485 * Free all receive rings.
4486 *
4487 **********************************************************************/
4488static void
4489em_free_receive_structures(struct adapter *adapter)
4490{
4491 struct rx_ring *rxr = adapter->rx_rings;
4492
4493 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4494 em_free_receive_buffers(rxr);
4495 /* Free the ring memory as well */
4496 em_dma_free(adapter, &rxr->rxdma);
4497 EM_RX_LOCK_DESTROY(rxr);
4498 }
4499
4500 free(adapter->rx_rings, M_DEVBUF);
4501}
4502
4503
4504/*********************************************************************
4505 *
4506 * Free receive ring data structures
4507 *
4508 **********************************************************************/
4509static void
4510em_free_receive_buffers(struct rx_ring *rxr)
4511{
4512 struct adapter *adapter = rxr->adapter;
4513 struct em_rxbuffer *rxbuf = NULL;
4514
4515 INIT_DEBUGOUT("free_receive_buffers: begin");
4516
4517 if (rxr->rx_buffers != NULL) {
4518 for (int i = 0; i < adapter->num_rx_desc; i++) {
4519 rxbuf = &rxr->rx_buffers[i];
4520 if (rxbuf->map != NULL) {
4521 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
4522 BUS_DMASYNC_POSTREAD);
4523 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
4524 bus_dmamap_destroy(rxr->rxtag, rxbuf->map);
4525 }
4526 if (rxbuf->m_head != NULL) {
4527 m_freem(rxbuf->m_head);
4528 rxbuf->m_head = NULL;
4529 }
4530 }
4531 free(rxr->rx_buffers, M_DEVBUF);
4532 rxr->rx_buffers = NULL;
4533 rxr->next_to_check = 0;
4534 rxr->next_to_refresh = 0;
4535 }
4536
4537 if (rxr->rxtag != NULL) {
4538 bus_dma_tag_destroy(rxr->rxtag);
4539 rxr->rxtag = NULL;
4540 }
4541
4542 return;
4543}
4544
4545
4546/*********************************************************************
4547 *
4548 * Enable receive unit.
4549 *
4550 **********************************************************************/
4551
4552static void
2767 * Enable receive unit.
2768 *
2769 **********************************************************************/
2770
2771static void
4553em_initialize_receive_unit(struct adapter *adapter)
2772em_initialize_receive_unit(if_ctx_t ctx)
4554{
2773{
4555 struct rx_ring *rxr = adapter->rx_rings;
4556 if_t ifp = adapter->ifp;
2774 struct adapter *adapter = iflib_get_softc(ctx);
2775 if_softc_ctx_t scctx = adapter->shared;
2776 struct ifnet *ifp = iflib_get_ifp(ctx);
4557 struct e1000_hw *hw = &adapter->hw;
2777 struct e1000_hw *hw = &adapter->hw;
2778 struct em_rx_queue *que;
2779 int i;
4558 u32 rctl, rxcsum, rfctl;
4559
4560 INIT_DEBUGOUT("em_initialize_receive_units: begin");
4561
4562 /*
4563 * Make sure receives are disabled while setting
4564 * up the descriptor ring
4565 */

--- 16 unchanged lines hidden (view full) ---

4582 rctl |= E1000_RCTL_LPE;
4583 else
4584 rctl &= ~E1000_RCTL_LPE;
4585
4586 /* Strip the CRC */
4587 if (!em_disable_crc_stripping)
4588 rctl |= E1000_RCTL_SECRC;
4589
2780 u32 rctl, rxcsum, rfctl;
2781
2782 INIT_DEBUGOUT("em_initialize_receive_units: begin");
2783
2784 /*
2785 * Make sure receives are disabled while setting
2786 * up the descriptor ring
2787 */

--- 16 unchanged lines hidden (view full) ---

2804 rctl |= E1000_RCTL_LPE;
2805 else
2806 rctl &= ~E1000_RCTL_LPE;
2807
2808 /* Strip the CRC */
2809 if (!em_disable_crc_stripping)
2810 rctl |= E1000_RCTL_SECRC;
2811
4590 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4591 adapter->rx_abs_int_delay.value);
2812 if (adapter->hw.mac.type >= e1000_82540) {
2813 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
2814 adapter->rx_abs_int_delay.value);
4592
2815
2816 /*
2817 * Set the interrupt throttling rate. Value is calculated
2818 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
2819 */
2820 E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR);
2821 }
4593 E1000_WRITE_REG(&adapter->hw, E1000_RDTR,
4594 adapter->rx_int_delay.value);
2822 E1000_WRITE_REG(&adapter->hw, E1000_RDTR,
2823 adapter->rx_int_delay.value);
4595 /*
4596 * Set the interrupt throttling rate. Value is calculated
4597 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4598 */
4599 E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR);
4600
4601 /* Use extended rx descriptor formats */
4602 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
4603 rfctl |= E1000_RFCTL_EXTEN;
4604 /*
4605 ** When using MSIX interrupts we need to throttle
4606 ** using the EITR register (82574 only)
4607 */
4608 if (hw->mac.type == e1000_82574) {
4609 for (int i = 0; i < 4; i++)
4610 E1000_WRITE_REG(hw, E1000_EITR_82574(i),
4611 DEFAULT_ITR);
4612 /* Disable accelerated acknowledge */
4613 rfctl |= E1000_RFCTL_ACK_DIS;
4614 }
4615 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
4616
4617 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2824
2825 /* Use extended rx descriptor formats */
2826 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2827 rfctl |= E1000_RFCTL_EXTEN;
2828 /*
2829 ** When using MSIX interrupts we need to throttle
2830 ** using the EITR register (82574 only)
2831 */
2832 if (hw->mac.type == e1000_82574) {
2833 for (int i = 0; i < 4; i++)
2834 E1000_WRITE_REG(hw, E1000_EITR_82574(i),
2835 DEFAULT_ITR);
2836 /* Disable accelerated acknowledge */
2837 rfctl |= E1000_RFCTL_ACK_DIS;
2838 }
2839 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2840
2841 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
4618 if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4619#ifdef EM_MULTIQUEUE
4620 rxcsum |= E1000_RXCSUM_TUOFL |
4621 E1000_RXCSUM_IPOFL |
4622 E1000_RXCSUM_PCSD;
4623#else
4624 rxcsum |= E1000_RXCSUM_TUOFL;
4625#endif
2842 if (if_getcapenable(ifp) & IFCAP_RXCSUM &&
2843 adapter->hw.mac.type >= e1000_82543) {
2844 if (adapter->tx_num_queues > 1) {
2845 if (adapter->hw.mac.type >= igb_mac_min) {
2846 rxcsum |= E1000_RXCSUM_PCSD;
2847 if (hw->mac.type != e1000_82575)
2848 rxcsum |= E1000_RXCSUM_CRCOFL;
2849 } else
2850 rxcsum |= E1000_RXCSUM_TUOFL |
2851 E1000_RXCSUM_IPOFL |
2852 E1000_RXCSUM_PCSD;
2853 } else {
2854 if (adapter->hw.mac.type >= igb_mac_min)
2855 rxcsum |= E1000_RXCSUM_IPPCSE;
2856 else
2857 rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL;
2858 if (adapter->hw.mac.type > e1000_82575)
2859 rxcsum |= E1000_RXCSUM_CRCOFL;
2860 }
4626 } else
4627 rxcsum &= ~E1000_RXCSUM_TUOFL;
4628
4629 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
4630
2861 } else
2862 rxcsum &= ~E1000_RXCSUM_TUOFL;
2863
2864 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2865
4631#ifdef EM_MULTIQUEUE
4632#define RSSKEYLEN 10
4633 if (adapter->num_queues > 1) {
4634 uint8_t rss_key[4 * RSSKEYLEN];
4635 uint32_t reta = 0;
4636 int i;
4637
4638 /*
4639 * Configure RSS key
4640 */
4641 arc4rand(rss_key, sizeof(rss_key), 0);
4642 for (i = 0; i < RSSKEYLEN; ++i) {
4643 uint32_t rssrk = 0;
4644
4645 rssrk = EM_RSSRK_VAL(rss_key, i);
4646 E1000_WRITE_REG(hw,E1000_RSSRK(i), rssrk);
4647 }
4648
4649 /*
4650 * Configure RSS redirect table in following fashion:
4651 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
4652 */
4653 for (i = 0; i < sizeof(reta); ++i) {
4654 uint32_t q;
4655
4656 q = (i % adapter->num_queues) << 7;
4657 reta |= q << (8 * i);
4658 }
4659
4660 for (i = 0; i < 32; ++i) {
4661 E1000_WRITE_REG(hw, E1000_RETA(i), reta);
4662 }
4663
4664 E1000_WRITE_REG(hw, E1000_MRQC, E1000_MRQC_RSS_ENABLE_2Q |
4665 E1000_MRQC_RSS_FIELD_IPV4_TCP |
4666 E1000_MRQC_RSS_FIELD_IPV4 |
4667 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX |
4668 E1000_MRQC_RSS_FIELD_IPV6_EX |
4669 E1000_MRQC_RSS_FIELD_IPV6);
2866 if (adapter->rx_num_queues > 1) {
2867 if (adapter->hw.mac.type >= igb_mac_min)
2868 igb_initialize_rss_mapping(adapter);
2869 else
2870 em_initialize_rss_mapping(adapter);
4670 }
2871 }
4671#endif
2872
4672 /*
4673 ** XXX TEMPORARY WORKAROUND: on some systems with 82573
4674 ** long latencies are observed, like Lenovo X60. This
4675 ** change eliminates the problem, but since having positive
4676 ** values in RDTR is a known source of problems on other
4677 ** platforms another solution is being sought.
4678 */
4679 if (hw->mac.type == e1000_82573)
4680 E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
4681
2873 /*
2874 ** XXX TEMPORARY WORKAROUND: on some systems with 82573
2875 ** long latencies are observed, like Lenovo X60. This
2876 ** change eliminates the problem, but since having positive
2877 ** values in RDTR is a known source of problems on other
2878 ** platforms another solution is being sought.
2879 */
2880 if (hw->mac.type == e1000_82573)
2881 E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
2882
4682 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2883 for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) {
2884 struct rx_ring *rxr = &que->rxr;
4683 /* Setup the Base and Length of the Rx Descriptor Ring */
2885 /* Setup the Base and Length of the Rx Descriptor Ring */
4684 u64 bus_addr = rxr->rxdma.dma_paddr;
4685 u32 rdt = adapter->num_rx_desc - 1; /* default */
2886 u64 bus_addr = rxr->rx_paddr;
2887#if 0
2888 u32 rdt = adapter->rx_num_queues -1; /* default */
2889#endif
4686
4687 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2890
2891 E1000_WRITE_REG(hw, E1000_RDLEN(i),
4688 adapter->num_rx_desc * sizeof(union e1000_rx_desc_extended));
2892 scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended));
4689 E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32));
4690 E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr);
4691 /* Setup the Head and Tail Descriptor Pointers */
4692 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2893 E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32));
2894 E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr);
2895 /* Setup the Head and Tail Descriptor Pointers */
2896 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
4693#ifdef DEV_NETMAP
4694 /*
4695 * an init() while a netmap client is active must
4696 * preserve the rx buffers passed to userspace.
4697 */
4698 if (if_getcapenable(ifp) & IFCAP_NETMAP) {
4699 struct netmap_adapter *na = netmap_getna(adapter->ifp);
4700 rdt -= nm_kr_rxspace(&na->rx_rings[i]);
4701 }
4702#endif /* DEV_NETMAP */
4703 E1000_WRITE_REG(hw, E1000_RDT(i), rdt);
2897 E1000_WRITE_REG(hw, E1000_RDT(i), 0);
4704 }
4705
4706 /*
4707 * Set PTHRESH for improved jumbo performance
4708 * According to 10.2.5.11 of Intel 82574 Datasheet,
4709 * RXDCTL(1) is written whenever RXDCTL(0) is written.
4710 * Only write to RXDCTL(1) if there is a need for different
4711 * settings.
4712 */
2898 }
2899
2900 /*
2901 * Set PTHRESH for improved jumbo performance
2902 * According to 10.2.5.11 of Intel 82574 Datasheet,
2903 * RXDCTL(1) is written whenever RXDCTL(0) is written.
2904 * Only write to RXDCTL(1) if there is a need for different
2905 * settings.
2906 */
2907
4713 if (((adapter->hw.mac.type == e1000_ich9lan) ||
4714 (adapter->hw.mac.type == e1000_pch2lan) ||
4715 (adapter->hw.mac.type == e1000_ich10lan)) &&
4716 (if_getmtu(ifp) > ETHERMTU)) {
4717 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
4718 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
4719 } else if (adapter->hw.mac.type == e1000_82574) {
2908 if (((adapter->hw.mac.type == e1000_ich9lan) ||
2909 (adapter->hw.mac.type == e1000_pch2lan) ||
2910 (adapter->hw.mac.type == e1000_ich10lan)) &&
2911 (if_getmtu(ifp) > ETHERMTU)) {
2912 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
2913 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
2914 } else if (adapter->hw.mac.type == e1000_82574) {
4720 for (int i = 0; i < adapter->num_queues; i++) {
2915 for (int i = 0; i < adapter->rx_num_queues; i++) {
4721 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2916 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
4722
4723 rxdctl |= 0x20; /* PTHRESH */
4724 rxdctl |= 4 << 8; /* HTHRESH */
4725 rxdctl |= 4 << 16;/* WTHRESH */
4726 rxdctl |= 1 << 24; /* Switch to granularity */
4727 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
4728 }
2917 rxdctl |= 0x20; /* PTHRESH */
2918 rxdctl |= 4 << 8; /* HTHRESH */
2919 rxdctl |= 4 << 16;/* WTHRESH */
2920 rxdctl |= 1 << 24; /* Switch to granularity */
2921 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2922 }
2923 } else if (adapter->hw.mac.type >= igb_mac_min) {
2924 u32 psize, srrctl = 0;
2925
2926 if (ifp->if_mtu > ETHERMTU) {
2927 rctl |= E1000_RCTL_LPE;
2928
2929 /* Set maximum packet len */
2930 psize = scctx->isc_max_frame_size;
2931 if (psize <= 4096) {
2932 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2933 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
2934 } else if (psize > 4096) {
2935 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2936 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
2937 }
2938
2939 /* are we on a vlan? */
2940 if (ifp->if_vlantrunk != NULL)
2941 psize += VLAN_TAG_SIZE;
2942 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
2943 } else {
2944 rctl &= ~E1000_RCTL_LPE;
2945 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2946 rctl |= E1000_RCTL_SZ_2048;
2947 }
2948
2949 /*
2950 * If TX flow control is disabled and there's >1 queue defined,
2951 * enable DROP.
2952 *
2953 * This drops frames rather than hanging the RX MAC for all queues.
2954 */
2955 if ((adapter->rx_num_queues > 1) &&
2956 (adapter->fc == e1000_fc_none ||
2957 adapter->fc == e1000_fc_rx_pause)) {
2958 srrctl |= E1000_SRRCTL_DROP_EN;
2959 }
2960 /* Setup the Base and Length of the Rx Descriptor Rings */
2961 for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) {
2962 struct rx_ring *rxr = &que->rxr;
2963 u64 bus_addr = rxr->rx_paddr;
2964 u32 rxdctl;
2965
2966#ifdef notyet
2967 /* Configure for header split? -- ignore for now */
2968 rxr->hdr_split = igb_header_split;
2969#else
2970 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2971#endif
2972
2973
2974 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2975 scctx->isc_nrxd[0] * sizeof(struct e1000_rx_desc));
2976 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2977 (uint32_t)(bus_addr >> 32));
2978 E1000_WRITE_REG(hw, E1000_RDBAL(i),
2979 (uint32_t)bus_addr);
2980 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2981 /* Enable this Queue */
2982 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2983 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2984 rxdctl &= 0xFFF00000;
2985 rxdctl |= IGB_RX_PTHRESH;
2986 rxdctl |= IGB_RX_HTHRESH << 8;
2987 rxdctl |= IGB_RX_WTHRESH << 16;
2988 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2989 }
4729 }
2990 }
4730
4731 if (adapter->hw.mac.type >= e1000_pch2lan) {
4732 if (if_getmtu(ifp) > ETHERMTU)
4733 e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
4734 else
4735 e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
4736 }
4737
4738 /* Make sure VLAN Filters are off */

--- 9 unchanged lines hidden (view full) ---

4748 /* ensure we clear use DTYPE of 00 here */
4749 rctl &= ~0x00000C00;
4750 /* Write out the settings */
4751 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
4752
4753 return;
4754}
4755
2991 if (adapter->hw.mac.type >= e1000_pch2lan) {
2992 if (if_getmtu(ifp) > ETHERMTU)
2993 e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
2994 else
2995 e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
2996 }
2997
2998 /* Make sure VLAN Filters are off */

--- 9 unchanged lines hidden (view full) ---

3008 /* ensure we clear use DTYPE of 00 here */
3009 rctl &= ~0x00000C00;
3010 /* Write out the settings */
3011 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3012
3013 return;
3014}
3015
4756
4757/*********************************************************************
4758 *
4759 * This routine executes in interrupt context. It replenishes
4760 * the mbufs in the descriptor and sends data which has been
4761 * dma'ed into host memory to upper layer.
4762 *
4763 * We loop at most count times if count is > 0, or until done if
4764 * count < 0.
4765 *
4766 * For polling we also now return the number of cleaned packets
4767 *********************************************************************/
4768static bool
4769em_rxeof(struct rx_ring *rxr, int count, int *done)
4770{
4771 struct adapter *adapter = rxr->adapter;
4772 if_t ifp = adapter->ifp;
4773 struct mbuf *mp, *sendmp;
4774 u32 status = 0;
4775 u16 len;
4776 int i, processed, rxdone = 0;
4777 bool eop;
4778 union e1000_rx_desc_extended *cur;
4779
4780 EM_RX_LOCK(rxr);
4781
4782 /* Sync the ring */
4783 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4784 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4785
4786
4787#ifdef DEV_NETMAP
4788 if (netmap_rx_irq(ifp, rxr->me, &processed)) {
4789 EM_RX_UNLOCK(rxr);
4790 return (FALSE);
4791 }
4792#endif /* DEV_NETMAP */
4793
4794 for (i = rxr->next_to_check, processed = 0; count != 0;) {
4795 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
4796 break;
4797
4798 cur = &rxr->rx_base[i];
4799 status = le32toh(cur->wb.upper.status_error);
4800 mp = sendmp = NULL;
4801
4802 if ((status & E1000_RXD_STAT_DD) == 0)
4803 break;
4804
4805 len = le16toh(cur->wb.upper.length);
4806 eop = (status & E1000_RXD_STAT_EOP) != 0;
4807
4808 if ((status & E1000_RXDEXT_ERR_FRAME_ERR_MASK) ||
4809 (rxr->discard == TRUE)) {
4810 adapter->dropped_pkts++;
4811 ++rxr->rx_discarded;
4812 if (!eop) /* Catch subsequent segs */
4813 rxr->discard = TRUE;
4814 else
4815 rxr->discard = FALSE;
4816 em_rx_discard(rxr, i);
4817 goto next_desc;
4818 }
4819 bus_dmamap_unload(rxr->rxtag, rxr->rx_buffers[i].map);
4820
4821 /* Assign correct length to the current fragment */
4822 mp = rxr->rx_buffers[i].m_head;
4823 mp->m_len = len;
4824
4825 /* Trigger for refresh */
4826 rxr->rx_buffers[i].m_head = NULL;
4827
4828 /* First segment? */
4829 if (rxr->fmp == NULL) {
4830 mp->m_pkthdr.len = len;
4831 rxr->fmp = rxr->lmp = mp;
4832 } else {
4833 /* Chain mbuf's together */
4834 mp->m_flags &= ~M_PKTHDR;
4835 rxr->lmp->m_next = mp;
4836 rxr->lmp = mp;
4837 rxr->fmp->m_pkthdr.len += len;
4838 }
4839
4840 if (eop) {
4841 --count;
4842 sendmp = rxr->fmp;
4843 if_setrcvif(sendmp, ifp);
4844 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
4845 em_receive_checksum(status, sendmp);
4846#ifndef __NO_STRICT_ALIGNMENT
4847 if (adapter->hw.mac.max_frame_size >
4848 (MCLBYTES - ETHER_ALIGN) &&
4849 em_fixup_rx(rxr) != 0)
4850 goto skip;
4851#endif
4852 if (status & E1000_RXD_STAT_VP) {
4853 if_setvtag(sendmp,
4854 le16toh(cur->wb.upper.vlan));
4855 sendmp->m_flags |= M_VLANTAG;
4856 }
4857#ifndef __NO_STRICT_ALIGNMENT
4858skip:
4859#endif
4860 rxr->fmp = rxr->lmp = NULL;
4861 }
4862next_desc:
4863 /* Sync the ring */
4864 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4865 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4866
4867 /* Zero out the receive descriptors status. */
4868 cur->wb.upper.status_error &= htole32(~0xFF);
4869 ++rxdone; /* cumulative for POLL */
4870 ++processed;
4871
4872 /* Advance our pointers to the next descriptor. */
4873 if (++i == adapter->num_rx_desc)
4874 i = 0;
4875
4876 /* Send to the stack */
4877 if (sendmp != NULL) {
4878 rxr->next_to_check = i;
4879 EM_RX_UNLOCK(rxr);
4880 if_input(ifp, sendmp);
4881 EM_RX_LOCK(rxr);
4882 i = rxr->next_to_check;
4883 }
4884
4885 /* Only refresh mbufs every 8 descriptors */
4886 if (processed == 8) {
4887 em_refresh_mbufs(rxr, i);
4888 processed = 0;
4889 }
4890 }
4891
4892 /* Catch any remaining refresh work */
4893 if (e1000_rx_unrefreshed(rxr))
4894 em_refresh_mbufs(rxr, i);
4895
4896 rxr->next_to_check = i;
4897 if (done != NULL)
4898 *done = rxdone;
4899 EM_RX_UNLOCK(rxr);
4900
4901 return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
4902}
4903
4904static __inline void
4905em_rx_discard(struct rx_ring *rxr, int i)
4906{
4907 struct em_rxbuffer *rbuf;
4908
4909 rbuf = &rxr->rx_buffers[i];
4910 bus_dmamap_unload(rxr->rxtag, rbuf->map);
4911
4912 /* Free any previous pieces */
4913 if (rxr->fmp != NULL) {
4914 rxr->fmp->m_flags |= M_PKTHDR;
4915 m_freem(rxr->fmp);
4916 rxr->fmp = NULL;
4917 rxr->lmp = NULL;
4918 }
4919 /*
4920 ** Free buffer and allow em_refresh_mbufs()
4921 ** to clean up and recharge buffer.
4922 */
4923 if (rbuf->m_head) {
4924 m_free(rbuf->m_head);
4925 rbuf->m_head = NULL;
4926 }
4927 return;
4928}
4929
4930#ifndef __NO_STRICT_ALIGNMENT
4931/*
4932 * When jumbo frames are enabled we should realign entire payload on
4933 * architecures with strict alignment. This is serious design mistake of 8254x
4934 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4935 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4936 * payload. On architecures without strict alignment restrictions 8254x still
4937 * performs unaligned memory access which would reduce the performance too.
4938 * To avoid copying over an entire frame to align, we allocate a new mbuf and
4939 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4940 * existing mbuf chain.
4941 *
4942 * Be aware, best performance of the 8254x is achived only when jumbo frame is
4943 * not used at all on architectures with strict alignment.
4944 */
4945static int
4946em_fixup_rx(struct rx_ring *rxr)
4947{
4948 struct adapter *adapter = rxr->adapter;
4949 struct mbuf *m, *n;
4950 int error;
4951
4952 error = 0;
4953 m = rxr->fmp;
4954 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4955 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4956 m->m_data += ETHER_HDR_LEN;
4957 } else {
4958 MGETHDR(n, M_NOWAIT, MT_DATA);
4959 if (n != NULL) {
4960 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4961 m->m_data += ETHER_HDR_LEN;
4962 m->m_len -= ETHER_HDR_LEN;
4963 n->m_len = ETHER_HDR_LEN;
4964 M_MOVE_PKTHDR(n, m);
4965 n->m_next = m;
4966 rxr->fmp = n;
4967 } else {
4968 adapter->dropped_pkts++;
4969 m_freem(rxr->fmp);
4970 rxr->fmp = NULL;
4971 error = ENOMEM;
4972 }
4973 }
4974
4975 return (error);
4976}
4977#endif
4978
4979static void
3016static void
4980em_setup_rxdesc(union e1000_rx_desc_extended *rxd, const struct em_rxbuffer *rxbuf)
3017em_if_vlan_register(if_ctx_t ctx, u16 vtag)
4981{
3018{
4982 rxd->read.buffer_addr = htole64(rxbuf->paddr);
4983 /* DD bits must be cleared */
4984 rxd->wb.upper.status_error= 0;
4985}
4986
4987/*********************************************************************
4988 *
4989 * Verify that the hardware indicated that the checksum is valid.
4990 * Inform the stack about the status of checksum so that stack
4991 * doesn't spend time verifying the checksum.
4992 *
4993 *********************************************************************/
4994static void
4995em_receive_checksum(uint32_t status, struct mbuf *mp)
4996{
4997 mp->m_pkthdr.csum_flags = 0;
4998
4999 /* Ignore Checksum bit is set */
5000 if (status & E1000_RXD_STAT_IXSM)
5001 return;
5002
5003 /* If the IP checksum exists and there is no IP Checksum error */
5004 if ((status & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
5005 E1000_RXD_STAT_IPCS) {
5006 mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
5007 }
5008
5009 /* TCP or UDP checksum */
5010 if ((status & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
5011 E1000_RXD_STAT_TCPCS) {
5012 mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5013 mp->m_pkthdr.csum_data = htons(0xffff);
5014 }
5015 if (status & E1000_RXD_STAT_UDPCS) {
5016 mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5017 mp->m_pkthdr.csum_data = htons(0xffff);
5018 }
5019}
5020
5021/*
5022 * This routine is run via an vlan
5023 * config EVENT
5024 */
5025static void
5026em_register_vlan(void *arg, if_t ifp, u16 vtag)
5027{
5028 struct adapter *adapter = if_getsoftc(ifp);
3019 struct adapter *adapter = iflib_get_softc(ctx);
5029 u32 index, bit;
5030
3020 u32 index, bit;
3021
5031 if ((void*)adapter != arg) /* Not our event */
5032 return;
5033
5034 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
5035 return;
5036
5037 EM_CORE_LOCK(adapter);
5038 index = (vtag >> 5) & 0x7F;
5039 bit = vtag & 0x1F;
5040 adapter->shadow_vfta[index] |= (1 << bit);
5041 ++adapter->num_vlans;
3022 index = (vtag >> 5) & 0x7F;
3023 bit = vtag & 0x1F;
3024 adapter->shadow_vfta[index] |= (1 << bit);
3025 ++adapter->num_vlans;
5042 /* Re-init to load the changes */
5043 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
5044 em_init_locked(adapter);
5045 EM_CORE_UNLOCK(adapter);
5046}
5047
3026}
3027
5048/*
5049 * This routine is run via an vlan
5050 * unconfig EVENT
5051 */
5052static void
3028static void
5053em_unregister_vlan(void *arg, if_t ifp, u16 vtag)
3029em_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
5054{
3030{
5055 struct adapter *adapter = if_getsoftc(ifp);
3031 struct adapter *adapter = iflib_get_softc(ctx);
5056 u32 index, bit;
5057
3032 u32 index, bit;
3033
5058 if (adapter != arg)
5059 return;
5060
5061 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
5062 return;
5063
5064 EM_CORE_LOCK(adapter);
5065 index = (vtag >> 5) & 0x7F;
5066 bit = vtag & 0x1F;
5067 adapter->shadow_vfta[index] &= ~(1 << bit);
5068 --adapter->num_vlans;
3034 index = (vtag >> 5) & 0x7F;
3035 bit = vtag & 0x1F;
3036 adapter->shadow_vfta[index] &= ~(1 << bit);
3037 --adapter->num_vlans;
5069 /* Re-init to load the changes */
5070 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
5071 em_init_locked(adapter);
5072 EM_CORE_UNLOCK(adapter);
5073}
5074
5075static void
5076em_setup_vlan_hw_support(struct adapter *adapter)
5077{
5078 struct e1000_hw *hw = &adapter->hw;
5079 u32 reg;
5080

--- 22 unchanged lines hidden (view full) ---

5103 /* Enable the Filter Table */
5104 reg = E1000_READ_REG(hw, E1000_RCTL);
5105 reg &= ~E1000_RCTL_CFIEN;
5106 reg |= E1000_RCTL_VFE;
5107 E1000_WRITE_REG(hw, E1000_RCTL, reg);
5108}
5109
5110static void
3038}
3039
3040static void
3041em_setup_vlan_hw_support(struct adapter *adapter)
3042{
3043 struct e1000_hw *hw = &adapter->hw;
3044 u32 reg;
3045

--- 22 unchanged lines hidden (view full) ---

3068 /* Enable the Filter Table */
3069 reg = E1000_READ_REG(hw, E1000_RCTL);
3070 reg &= ~E1000_RCTL_CFIEN;
3071 reg |= E1000_RCTL_VFE;
3072 E1000_WRITE_REG(hw, E1000_RCTL, reg);
3073}
3074
3075static void
5111em_enable_intr(struct adapter *adapter)
3076em_if_enable_intr(if_ctx_t ctx)
5112{
3077{
3078 struct adapter *adapter = iflib_get_softc(ctx);
5113 struct e1000_hw *hw = &adapter->hw;
5114 u32 ims_mask = IMS_ENABLE_MASK;
5115
5116 if (hw->mac.type == e1000_82574) {
5117 E1000_WRITE_REG(hw, EM_EIAC, adapter->ims);
5118 ims_mask |= adapter->ims;
3079 struct e1000_hw *hw = &adapter->hw;
3080 u32 ims_mask = IMS_ENABLE_MASK;
3081
3082 if (hw->mac.type == e1000_82574) {
3083 E1000_WRITE_REG(hw, EM_EIAC, adapter->ims);
3084 ims_mask |= adapter->ims;
5119 }
3085 } if (adapter->intr_type == IFLIB_INTR_MSIX && hw->mac.type >= igb_mac_min) {
3086 u32 mask = (adapter->que_mask | adapter->link_mask);
3087
3088 E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask);
3089 E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask);
3090 E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask);
3091 ims_mask = E1000_IMS_LSC;
3092 }
3093
5120 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
5121}
5122
5123static void
3094 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
3095}
3096
3097static void
5124em_disable_intr(struct adapter *adapter)
3098em_if_disable_intr(if_ctx_t ctx)
5125{
3099{
3100 struct adapter *adapter = iflib_get_softc(ctx);
5126 struct e1000_hw *hw = &adapter->hw;
5127
3101 struct e1000_hw *hw = &adapter->hw;
3102
5128 if (hw->mac.type == e1000_82574)
5129 E1000_WRITE_REG(hw, EM_EIAC, 0);
3103 if (adapter->intr_type == IFLIB_INTR_MSIX) {
3104 if (hw->mac.type >= igb_mac_min)
3105 E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0);
3106 E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0);
3107 }
5130 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
5131}
5132
5133/*
5134 * Bit of a misnomer, what this really means is
5135 * to enable OS management of the system... aka
5136 * to disable special hardware management features
5137 */

--- 102 unchanged lines hidden (view full) ---

5240}
5241
5242/*
5243** Parse the interface capabilities with regard
5244** to both system management and wake-on-lan for
5245** later use.
5246*/
5247static void
3108 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
3109}
3110
3111/*
3112 * Bit of a misnomer, what this really means is
3113 * to enable OS management of the system... aka
3114 * to disable special hardware management features
3115 */

--- 102 unchanged lines hidden (view full) ---

3218}
3219
3220/*
3221** Parse the interface capabilities with regard
3222** to both system management and wake-on-lan for
3223** later use.
3224*/
3225static void
5248em_get_wakeup(device_t dev)
3226em_get_wakeup(if_ctx_t ctx)
5249{
3227{
5250 struct adapter *adapter = device_get_softc(dev);
3228 struct adapter *adapter = iflib_get_softc(ctx);
3229 device_t dev = iflib_get_dev(ctx);
5251 u16 eeprom_data = 0, device_id, apme_mask;
5252
5253 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
5254 apme_mask = EM_EEPROM_APME;
5255
5256 switch (adapter->hw.mac.type) {
3230 u16 eeprom_data = 0, device_id, apme_mask;
3231
3232 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
3233 apme_mask = EM_EEPROM_APME;
3234
3235 switch (adapter->hw.mac.type) {
3236 case e1000_82542:
3237 case e1000_82543:
3238 break;
3239 case e1000_82544:
3240 e1000_read_nvm(&adapter->hw,
3241 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
3242 apme_mask = EM_82544_APME;
3243 break;
3244 case e1000_82546:
3245 case e1000_82546_rev_3:
3246 if (adapter->hw.bus.func == 1) {
3247 e1000_read_nvm(&adapter->hw,
3248 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3249 break;
3250 } else
3251 e1000_read_nvm(&adapter->hw,
3252 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3253 break;
5257 case e1000_82573:
5258 case e1000_82583:
5259 adapter->has_amt = TRUE;
5260 /* Falls thru */
5261 case e1000_82571:
5262 case e1000_82572:
5263 case e1000_80003es2lan:
5264 if (adapter->hw.bus.func == 1) {

--- 4 unchanged lines hidden (view full) ---

5269 e1000_read_nvm(&adapter->hw,
5270 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
5271 break;
5272 case e1000_ich8lan:
5273 case e1000_ich9lan:
5274 case e1000_ich10lan:
5275 case e1000_pchlan:
5276 case e1000_pch2lan:
3254 case e1000_82573:
3255 case e1000_82583:
3256 adapter->has_amt = TRUE;
3257 /* Falls thru */
3258 case e1000_82571:
3259 case e1000_82572:
3260 case e1000_80003es2lan:
3261 if (adapter->hw.bus.func == 1) {

--- 4 unchanged lines hidden (view full) ---

3266 e1000_read_nvm(&adapter->hw,
3267 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3268 break;
3269 case e1000_ich8lan:
3270 case e1000_ich9lan:
3271 case e1000_ich10lan:
3272 case e1000_pchlan:
3273 case e1000_pch2lan:
5277 case e1000_pch_lpt:
5278 case e1000_pch_spt:
5279 apme_mask = E1000_WUC_APME;
5280 adapter->has_amt = TRUE;
5281 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
5282 break;
5283 default:
5284 e1000_read_nvm(&adapter->hw,
5285 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
5286 break;
5287 }
5288 if (eeprom_data & apme_mask)
5289 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
5290 /*
5291 * We have the eeprom settings, now apply the special cases
5292 * where the eeprom may be wrong or the board won't support
5293 * wake on lan on a particular port
5294 */
5295 device_id = pci_get_device(dev);
5296 switch (device_id) {
3274 apme_mask = E1000_WUC_APME;
3275 adapter->has_amt = TRUE;
3276 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
3277 break;
3278 default:
3279 e1000_read_nvm(&adapter->hw,
3280 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3281 break;
3282 }
3283 if (eeprom_data & apme_mask)
3284 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
3285 /*
3286 * We have the eeprom settings, now apply the special cases
3287 * where the eeprom may be wrong or the board won't support
3288 * wake on lan on a particular port
3289 */
3290 device_id = pci_get_device(dev);
3291 switch (device_id) {
3292 case E1000_DEV_ID_82546GB_PCIE:
3293 adapter->wol = 0;
3294 break;
3295 case E1000_DEV_ID_82546EB_FIBER:
3296 case E1000_DEV_ID_82546GB_FIBER:
3297 /* Wake events only supported on port A for dual fiber
3298 * regardless of eeprom setting */
3299 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3300 E1000_STATUS_FUNC_1)
3301 adapter->wol = 0;
3302 break;
3303 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
3304 /* if quad port adapter, disable WoL on all but port A */
3305 if (global_quad_port_a != 0)
3306 adapter->wol = 0;
3307 /* Reset for multiple quad port adapters */
3308 if (++global_quad_port_a == 4)
3309 global_quad_port_a = 0;
3310 break;
5297 case E1000_DEV_ID_82571EB_FIBER:
5298 /* Wake events only supported on port A for dual fiber
5299 * regardless of eeprom setting */
5300 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
5301 E1000_STATUS_FUNC_1)
5302 adapter->wol = 0;
5303 break;
5304 case E1000_DEV_ID_82571EB_QUAD_COPPER:

--- 10 unchanged lines hidden (view full) ---

5315 return;
5316}
5317
5318
5319/*
5320 * Enable PCI Wake On Lan capability
5321 */
5322static void
3311 case E1000_DEV_ID_82571EB_FIBER:
3312 /* Wake events only supported on port A for dual fiber
3313 * regardless of eeprom setting */
3314 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
3315 E1000_STATUS_FUNC_1)
3316 adapter->wol = 0;
3317 break;
3318 case E1000_DEV_ID_82571EB_QUAD_COPPER:

--- 10 unchanged lines hidden (view full) ---

3329 return;
3330}
3331
3332
3333/*
3334 * Enable PCI Wake On Lan capability
3335 */
3336static void
5323em_enable_wakeup(device_t dev)
3337em_enable_wakeup(if_ctx_t ctx)
5324{
3338{
5325 struct adapter *adapter = device_get_softc(dev);
5326 if_t ifp = adapter->ifp;
5327 u32 pmc, ctrl, ctrl_ext, rctl, wuc;
3339 struct adapter *adapter = iflib_get_softc(ctx);
3340 device_t dev = iflib_get_dev(ctx);
3341 if_t ifp = iflib_get_ifp(ctx);
3342 u32 pmc, ctrl, ctrl_ext, rctl;
5328 u16 status;
5329
5330 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
5331 return;
5332
5333 /* Advertise the wakeup capability */
5334 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
5335 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
5336 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
3343 u16 status;
3344
3345 if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0))
3346 return;
3347
3348 /* Advertise the wakeup capability */
3349 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
3350 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
3351 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
5337 wuc = E1000_READ_REG(&adapter->hw, E1000_WUC);
5338 wuc |= E1000_WUC_PME_EN;
5339 E1000_WRITE_REG(&adapter->hw, E1000_WUC, wuc);
3352 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
5340
5341 if ((adapter->hw.mac.type == e1000_ich8lan) ||
5342 (adapter->hw.mac.type == e1000_pchlan) ||
5343 (adapter->hw.mac.type == e1000_ich9lan) ||
5344 (adapter->hw.mac.type == e1000_ich10lan))
5345 e1000_suspend_workarounds_ich8lan(&adapter->hw);
5346
5347 /* Keep the laser running on Fiber adapters */

--- 14 unchanged lines hidden (view full) ---

5362 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0)
5363 adapter->wol &= ~E1000_WUFC_MC;
5364 else {
5365 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
5366 rctl |= E1000_RCTL_MPE;
5367 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
5368 }
5369
3353
3354 if ((adapter->hw.mac.type == e1000_ich8lan) ||
3355 (adapter->hw.mac.type == e1000_pchlan) ||
3356 (adapter->hw.mac.type == e1000_ich9lan) ||
3357 (adapter->hw.mac.type == e1000_ich10lan))
3358 e1000_suspend_workarounds_ich8lan(&adapter->hw);
3359
3360 /* Keep the laser running on Fiber adapters */

--- 14 unchanged lines hidden (view full) ---

3375 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0)
3376 adapter->wol &= ~E1000_WUFC_MC;
3377 else {
3378 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3379 rctl |= E1000_RCTL_MPE;
3380 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
3381 }
3382
5370 if ((adapter->hw.mac.type == e1000_pchlan) ||
5371 (adapter->hw.mac.type == e1000_pch2lan) ||
5372 (adapter->hw.mac.type == e1000_pch_lpt) ||
5373 (adapter->hw.mac.type == e1000_pch_spt)) {
3383 if ((adapter->hw.mac.type == e1000_pchlan) ||
3384 (adapter->hw.mac.type == e1000_pch2lan)) {
5374 if (em_enable_phy_wakeup(adapter))
5375 return;
5376 } else {
5377 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
5378 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
5379 }
5380
5381 if (adapter->hw.phy.type == e1000_phy_igp_3)

--- 79 unchanged lines hidden (view full) ---

5461 printf("Could not set PHY Host Wakeup bit\n");
5462out:
5463 hw->phy.ops.release(hw);
5464
5465 return ret;
5466}
5467
5468static void
3385 if (em_enable_phy_wakeup(adapter))
3386 return;
3387 } else {
3388 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
3389 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
3390 }
3391
3392 if (adapter->hw.phy.type == e1000_phy_igp_3)

--- 79 unchanged lines hidden (view full) ---

3472 printf("Could not set PHY Host Wakeup bit\n");
3473out:
3474 hw->phy.ops.release(hw);
3475
3476 return ret;
3477}
3478
3479static void
5469em_led_func(void *arg, int onoff)
3480em_if_led_func(if_ctx_t ctx, int onoff)
5470{
3481{
5471 struct adapter *adapter = arg;
3482 struct adapter *adapter = iflib_get_softc(ctx);
5472
3483
5473 EM_CORE_LOCK(adapter);
5474 if (onoff) {
5475 e1000_setup_led(&adapter->hw);
5476 e1000_led_on(&adapter->hw);
5477 } else {
5478 e1000_led_off(&adapter->hw);
5479 e1000_cleanup_led(&adapter->hw);
5480 }
3484 if (onoff) {
3485 e1000_setup_led(&adapter->hw);
3486 e1000_led_on(&adapter->hw);
3487 } else {
3488 e1000_led_off(&adapter->hw);
3489 e1000_cleanup_led(&adapter->hw);
3490 }
5481 EM_CORE_UNLOCK(adapter);
5482}
5483
5484/*
5485** Disable the L0S and L1 LINK states
5486*/
5487static void
5488em_disable_aspm(struct adapter *adapter)
5489{

--- 114 unchanged lines hidden (view full) ---

5604 adapter->stats.tsctc +=
5605 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5606 adapter->stats.tsctfc +=
5607 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5608 }
5609}
5610
5611static uint64_t
3491}
3492
3493/*
3494** Disable the L0S and L1 LINK states
3495*/
3496static void
3497em_disable_aspm(struct adapter *adapter)
3498{

--- 114 unchanged lines hidden (view full) ---

3613 adapter->stats.tsctc +=
3614 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
3615 adapter->stats.tsctfc +=
3616 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
3617 }
3618}
3619
3620static uint64_t
5612em_get_counter(if_t ifp, ift_counter cnt)
3621em_if_get_counter(if_ctx_t ctx, ift_counter cnt)
5613{
3622{
5614 struct adapter *adapter;
3623 struct adapter *adapter = iflib_get_softc(ctx);
3624 struct ifnet *ifp = iflib_get_ifp(ctx);
5615
3625
5616 adapter = if_getsoftc(ifp);
5617
5618 switch (cnt) {
5619 case IFCOUNTER_COLLISIONS:
5620 return (adapter->stats.colc);
5621 case IFCOUNTER_IERRORS:
5622 return (adapter->dropped_pkts + adapter->stats.rxerrc +
5623 adapter->stats.crcerrs + adapter->stats.algnerrc +
5624 adapter->stats.ruc + adapter->stats.roc +
5625 adapter->stats.mpc + adapter->stats.cexterr);

--- 18 unchanged lines hidden (view full) ---

5644}
5645
5646/*
5647 * Add sysctl variables, one per statistic, to the system.
5648 */
5649static void
5650em_add_hw_stats(struct adapter *adapter)
5651{
3626 switch (cnt) {
3627 case IFCOUNTER_COLLISIONS:
3628 return (adapter->stats.colc);
3629 case IFCOUNTER_IERRORS:
3630 return (adapter->dropped_pkts + adapter->stats.rxerrc +
3631 adapter->stats.crcerrs + adapter->stats.algnerrc +
3632 adapter->stats.ruc + adapter->stats.roc +
3633 adapter->stats.mpc + adapter->stats.cexterr);

--- 18 unchanged lines hidden (view full) ---

3652}
3653
3654/*
3655 * Add sysctl variables, one per statistic, to the system.
3656 */
3657static void
3658em_add_hw_stats(struct adapter *adapter)
3659{
5652 device_t dev = adapter->dev;
5653
5654 struct tx_ring *txr = adapter->tx_rings;
5655 struct rx_ring *rxr = adapter->rx_rings;
5656
3660 device_t dev = iflib_get_dev(adapter->ctx);
3661 struct em_tx_queue *tx_que = adapter->tx_queues;
3662 struct em_rx_queue *rx_que = adapter->rx_queues;
3663
5657 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
5658 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
5659 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
5660 struct e1000_hw_stats *stats = &adapter->stats;
5661
5662 struct sysctl_oid *stat_node, *queue_node, *int_node;
5663 struct sysctl_oid_list *stat_list, *queue_list, *int_list;
5664

--- 30 unchanged lines hidden (view full) ---

5695 "Receiver Control Register");
5696 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
5697 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
5698 "Flow Control High Watermark");
5699 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
5700 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
5701 "Flow Control Low Watermark");
5702
3664 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3665 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3666 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3667 struct e1000_hw_stats *stats = &adapter->stats;
3668
3669 struct sysctl_oid *stat_node, *queue_node, *int_node;
3670 struct sysctl_oid_list *stat_list, *queue_list, *int_list;
3671

--- 30 unchanged lines hidden (view full) ---

3702 "Receiver Control Register");
3703 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
3704 CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
3705 "Flow Control High Watermark");
3706 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
3707 CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
3708 "Flow Control Low Watermark");
3709
5703 for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
3710 for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
3711 struct tx_ring *txr = &tx_que->txr;
5704 snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i);
5705 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5706 CTLFLAG_RD, NULL, "TX Queue Name");
5707 queue_list = SYSCTL_CHILDREN(queue_node);
5708
5709 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
5710 CTLTYPE_UINT | CTLFLAG_RD, adapter,
5711 E1000_TDH(txr->me),

--- 5 unchanged lines hidden (view full) ---

5717 em_sysctl_reg_handler, "IU",
5718 "Transmit Descriptor Tail");
5719 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq",
5720 CTLFLAG_RD, &txr->tx_irq,
5721 "Queue MSI-X Transmit Interrupts");
5722 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail",
5723 CTLFLAG_RD, &txr->no_desc_avail,
5724 "Queue No Descriptor Available");
3712 snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i);
3713 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3714 CTLFLAG_RD, NULL, "TX Queue Name");
3715 queue_list = SYSCTL_CHILDREN(queue_node);
3716
3717 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
3718 CTLTYPE_UINT | CTLFLAG_RD, adapter,
3719 E1000_TDH(txr->me),

--- 5 unchanged lines hidden (view full) ---

3725 em_sysctl_reg_handler, "IU",
3726 "Transmit Descriptor Tail");
3727 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq",
3728 CTLFLAG_RD, &txr->tx_irq,
3729 "Queue MSI-X Transmit Interrupts");
3730 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail",
3731 CTLFLAG_RD, &txr->no_desc_avail,
3732 "Queue No Descriptor Available");
3733 }
5725
3734
5726 snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", i);
3735 for (int j = 0; j < adapter->rx_num_queues; j++, rx_que++) {
3736 struct rx_ring *rxr = &rx_que->rxr;
3737 snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j);
5727 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5728 CTLFLAG_RD, NULL, "RX Queue Name");
5729 queue_list = SYSCTL_CHILDREN(queue_node);
5730
5731 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
5732 CTLTYPE_UINT | CTLFLAG_RD, adapter,
5733 E1000_RDH(rxr->me),
5734 em_sysctl_reg_handler, "IU",

--- 270 unchanged lines hidden (view full) ---

6005 return (EINVAL);
6006 info->value = usecs;
6007 ticks = EM_USECS_TO_TICKS(usecs);
6008 if (info->offset == E1000_ITR) /* units are 256ns here */
6009 ticks *= 4;
6010
6011 adapter = info->adapter;
6012
3738 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3739 CTLFLAG_RD, NULL, "RX Queue Name");
3740 queue_list = SYSCTL_CHILDREN(queue_node);
3741
3742 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
3743 CTLTYPE_UINT | CTLFLAG_RD, adapter,
3744 E1000_RDH(rxr->me),
3745 em_sysctl_reg_handler, "IU",

--- 270 unchanged lines hidden (view full) ---

4016 return (EINVAL);
4017 info->value = usecs;
4018 ticks = EM_USECS_TO_TICKS(usecs);
4019 if (info->offset == E1000_ITR) /* units are 256ns here */
4020 ticks *= 4;
4021
4022 adapter = info->adapter;
4023
6013 EM_CORE_LOCK(adapter);
6014 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
6015 regval = (regval & ~0xffff) | (ticks & 0xffff);
6016 /* Handle a few special cases. */
6017 switch (info->offset) {
6018 case E1000_RDTR:
6019 break;
6020 case E1000_TIDV:
6021 if (ticks == 0) {
6022 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
6023 /* Don't write 0 into the TIDV register. */
6024 regval++;
6025 } else
6026 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
6027 break;
6028 }
6029 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
4024 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
4025 regval = (regval & ~0xffff) | (ticks & 0xffff);
4026 /* Handle a few special cases. */
4027 switch (info->offset) {
4028 case E1000_RDTR:
4029 break;
4030 case E1000_TIDV:
4031 if (ticks == 0) {
4032 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
4033 /* Don't write 0 into the TIDV register. */
4034 regval++;
4035 } else
4036 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
4037 break;
4038 }
4039 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
6030 EM_CORE_UNLOCK(adapter);
6031 return (0);
6032}
6033
6034static void
6035em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
6036 const char *description, struct em_int_delay_info *info,
6037 int offset, int value)
6038{

--- 68 unchanged lines hidden (view full) ---

6107{
6108 struct adapter *adapter = (struct adapter *) arg1;
6109 int error, value;
6110
6111 value = adapter->hw.dev_spec.ich8lan.eee_disable;
6112 error = sysctl_handle_int(oidp, &value, 0, req);
6113 if (error || req->newptr == NULL)
6114 return (error);
4040 return (0);
4041}
4042
4043static void
4044em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
4045 const char *description, struct em_int_delay_info *info,
4046 int offset, int value)
4047{

--- 68 unchanged lines hidden (view full) ---

4116{
4117 struct adapter *adapter = (struct adapter *) arg1;
4118 int error, value;
4119
4120 value = adapter->hw.dev_spec.ich8lan.eee_disable;
4121 error = sysctl_handle_int(oidp, &value, 0, req);
4122 if (error || req->newptr == NULL)
4123 return (error);
6115 EM_CORE_LOCK(adapter);
6116 adapter->hw.dev_spec.ich8lan.eee_disable = (value != 0);
4124 adapter->hw.dev_spec.ich8lan.eee_disable = (value != 0);
6117 em_init_locked(adapter);
6118 EM_CORE_UNLOCK(adapter);
4125 em_if_init(adapter->ctx);
4126
6119 return (0);
6120}
6121
6122static int
6123em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
6124{
6125 struct adapter *adapter;
6126 int error;

--- 16 unchanged lines hidden (view full) ---

6143/*
6144** This routine is meant to be fluid, add whatever is
6145** needed for debugging a problem. -jfv
6146*/
6147static void
6148em_print_debug_info(struct adapter *adapter)
6149{
6150 device_t dev = adapter->dev;
4127 return (0);
4128}
4129
4130static int
4131em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
4132{
4133 struct adapter *adapter;
4134 int error;

--- 16 unchanged lines hidden (view full) ---

4151/*
4152** This routine is meant to be fluid, add whatever is
4153** needed for debugging a problem. -jfv
4154*/
4155static void
4156em_print_debug_info(struct adapter *adapter)
4157{
4158 device_t dev = adapter->dev;
6151 struct tx_ring *txr = adapter->tx_rings;
6152 struct rx_ring *rxr = adapter->rx_rings;
4159 struct tx_ring *txr = &adapter->tx_queues->txr;
4160 struct rx_ring *rxr = &adapter->rx_queues->rxr;
6153
6154 if (if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING)
6155 printf("Interface is RUNNING ");
6156 else
6157 printf("Interface is NOT RUNNING\n");
6158
6159 if (if_getdrvflags(adapter->ifp) & IFF_DRV_OACTIVE)
6160 printf("and INACTIVE\n");
6161 else
6162 printf("and ACTIVE\n");
6163
4161
4162 if (if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING)
4163 printf("Interface is RUNNING ");
4164 else
4165 printf("Interface is NOT RUNNING\n");
4166
4167 if (if_getdrvflags(adapter->ifp) & IFF_DRV_OACTIVE)
4168 printf("and INACTIVE\n");
4169 else
4170 printf("and ACTIVE\n");
4171
6164 for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
4172 for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
6165 device_printf(dev, "TX Queue %d ------\n", i);
6166 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
6167 E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
6168 E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
4173 device_printf(dev, "TX Queue %d ------\n", i);
4174 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
4175 E1000_READ_REG(&adapter->hw, E1000_TDH(i)),
4176 E1000_READ_REG(&adapter->hw, E1000_TDT(i)));
6169 device_printf(dev, "Tx Queue Status = %d\n", txr->busy);
6170 device_printf(dev, "TX descriptors avail = %d\n",
6171 txr->tx_avail);
6172 device_printf(dev, "Tx Descriptors avail failure = %ld\n",
6173 txr->no_desc_avail);
6174 device_printf(dev, "RX Queue %d ------\n", i);
4177
4178 }
4179 for (int j=0; j < adapter->rx_num_queues; j++, rxr++) {
4180 device_printf(dev, "RX Queue %d ------\n", j);
6175 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
4181 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
6176 E1000_READ_REG(&adapter->hw, E1000_RDH(i)),
6177 E1000_READ_REG(&adapter->hw, E1000_RDT(i)));
6178 device_printf(dev, "RX discarded packets = %ld\n",
6179 rxr->rx_discarded);
6180 device_printf(dev, "RX Next to Check = %d\n", rxr->next_to_check);
6181 device_printf(dev, "RX Next to Refresh = %d\n", rxr->next_to_refresh);
4182 E1000_READ_REG(&adapter->hw, E1000_RDH(j)),
4183 E1000_READ_REG(&adapter->hw, E1000_RDT(j)));
6182 }
6183}
6184
4184 }
4185}
4186
6185#ifdef EM_MULTIQUEUE
4187
6186/*
6187 * 82574 only:
6188 * Write a new value to the EEPROM increasing the number of MSIX
6189 * vectors from 3 to 5, for proper multiqueue support.
6190 */
6191static void
4188/*
4189 * 82574 only:
4190 * Write a new value to the EEPROM increasing the number of MSIX
4191 * vectors from 3 to 5, for proper multiqueue support.
4192 */
4193static void
6192em_enable_vectors_82574(struct adapter *adapter)
4194em_enable_vectors_82574(if_ctx_t ctx)
6193{
4195{
4196 struct adapter *adapter = iflib_get_softc(ctx);
6194 struct e1000_hw *hw = &adapter->hw;
4197 struct e1000_hw *hw = &adapter->hw;
6195 device_t dev = adapter->dev;
4198 device_t dev = iflib_get_dev(ctx);
6196 u16 edata;
6197
6198 e1000_read_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata);
6199 printf("Current cap: %#06x\n", edata);
6200 if (((edata & EM_NVM_MSIX_N_MASK) >> EM_NVM_MSIX_N_SHIFT) != 4) {
6201 device_printf(dev, "Writing to eeprom: increasing "
6202 "reported MSIX vectors from 3 to 5...\n");
6203 edata &= ~(EM_NVM_MSIX_N_MASK);
6204 edata |= 4 << EM_NVM_MSIX_N_SHIFT;
6205 e1000_write_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata);
6206 e1000_update_nvm_checksum(hw);
6207 device_printf(dev, "Writing to eeprom: done\n");
6208 }
6209}
4199 u16 edata;
4200
4201 e1000_read_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata);
4202 printf("Current cap: %#06x\n", edata);
4203 if (((edata & EM_NVM_MSIX_N_MASK) >> EM_NVM_MSIX_N_SHIFT) != 4) {
4204 device_printf(dev, "Writing to eeprom: increasing "
4205 "reported MSIX vectors from 3 to 5...\n");
4206 edata &= ~(EM_NVM_MSIX_N_MASK);
4207 edata |= 4 << EM_NVM_MSIX_N_SHIFT;
4208 e1000_write_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata);
4209 e1000_update_nvm_checksum(hw);
4210 device_printf(dev, "Writing to eeprom: done\n");
4211 }
4212}
6210#endif
6211
4213
4214
6212#ifdef DDB
6213DB_COMMAND(em_reset_dev, em_ddb_reset_dev)
6214{
6215 devclass_t dc;
6216 int max_em;
6217
6218 dc = devclass_find("em");
6219 max_em = devclass_get_maxunit(dc);
6220
6221 for (int index = 0; index < (max_em - 1); index++) {
6222 device_t dev;
6223 dev = devclass_get_device(dc, index);
6224 if (device_get_driver(dev) == &em_driver) {
6225 struct adapter *adapter = device_get_softc(dev);
4215#ifdef DDB
4216DB_COMMAND(em_reset_dev, em_ddb_reset_dev)
4217{
4218 devclass_t dc;
4219 int max_em;
4220
4221 dc = devclass_find("em");
4222 max_em = devclass_get_maxunit(dc);
4223
4224 for (int index = 0; index < (max_em - 1); index++) {
4225 device_t dev;
4226 dev = devclass_get_device(dc, index);
4227 if (device_get_driver(dev) == &em_driver) {
4228 struct adapter *adapter = device_get_softc(dev);
6226 EM_CORE_LOCK(adapter);
6227 em_init_locked(adapter);
6228 EM_CORE_UNLOCK(adapter);
4229 em_if_init(adapter->ctx);
6229 }
6230 }
6231}
6232DB_COMMAND(em_dump_queue, em_ddb_dump_queue)
6233{
6234 devclass_t dc;
6235 int max_em;
6236

--- 12 unchanged lines hidden ---
4230 }
4231 }
4232}
4233DB_COMMAND(em_dump_queue, em_ddb_dump_queue)
4234{
4235 devclass_t dc;
4236 int max_em;
4237

--- 12 unchanged lines hidden ---