xref: /titanic_41/usr/src/uts/common/io/hxge/hxge_vmac.c (revision 24a1f0af9f770e0e795ef1fa1c6dece8dd8dc959)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <hxge_impl.h>
27 #include <hxge_vmac.h>
28 
29 hxge_status_t hxge_vmac_init(p_hxge_t hxgep);
30 hxge_status_t hxge_tx_vmac_init(p_hxge_t hxgep);
31 hxge_status_t hxge_rx_vmac_init(p_hxge_t hxgep);
32 hxge_status_t hxge_tx_vmac_enable(p_hxge_t hxgep);
33 hxge_status_t hxge_tx_vmac_disable(p_hxge_t hxgep);
34 hxge_status_t hxge_rx_vmac_enable(p_hxge_t hxgep);
35 hxge_status_t hxge_rx_vmac_disable(p_hxge_t hxgep);
36 hxge_status_t hxge_tx_vmac_reset(p_hxge_t hxgep);
37 hxge_status_t hxge_rx_vmac_reset(p_hxge_t hxgep);
38 uint_t hxge_vmac_intr(caddr_t arg1, caddr_t arg2);
39 hxge_status_t hxge_set_promisc(p_hxge_t hxgep, boolean_t on);
40 
41 hxge_status_t
42 hxge_link_init(p_hxge_t hxgep)
43 {
44 	p_hxge_stats_t		statsp;
45 
46 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_link_init>"));
47 
48 	statsp = hxgep->statsp;
49 
50 	statsp->mac_stats.cap_10gfdx = 1;
51 	statsp->mac_stats.lp_cap_10gfdx = 1;
52 
53 	/*
54 	 * The driver doesn't control the link.
55 	 * It is always 10Gb full duplex.
56 	 */
57 	statsp->mac_stats.link_duplex = 2;
58 	statsp->mac_stats.link_speed = 10000;
59 
60 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_link_init"));
61 	return (HXGE_OK);
62 }
63 
64 hxge_status_t
65 hxge_vmac_init(p_hxge_t hxgep)
66 {
67 	hxge_status_t status = HXGE_OK;
68 
69 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_vmac_init:"));
70 
71 	if ((status = hxge_tx_vmac_reset(hxgep)) != HXGE_OK)
72 		goto fail;
73 
74 	if ((status = hxge_rx_vmac_reset(hxgep)) != HXGE_OK)
75 		goto fail;
76 
77 	if ((status = hxge_tx_vmac_enable(hxgep)) != HXGE_OK)
78 		goto fail;
79 
80 	if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK)
81 		goto fail;
82 
83 	/* Clear the interrupt status registers */
84 	(void) hpi_vmac_clear_rx_int_stat(hxgep->hpi_handle);
85 	(void) hpi_vmac_clear_tx_int_stat(hxgep->hpi_handle);
86 
87 	/*
88 	 * Take the masks off the overflow counters. Interrupt the system when
89 	 * any counts overflow. Don't interrupt the system for each frame.
90 	 * The current counts are retrieved when the "kstat" command is used.
91 	 */
92 	(void) hpi_pfc_set_rx_int_stat_mask(hxgep->hpi_handle, 0, 1);
93 	(void) hpi_pfc_set_tx_int_stat_mask(hxgep->hpi_handle, 0, 1);
94 
95 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_vmac_init:"));
96 
97 	return (HXGE_OK);
98 fail:
99 	HXGE_DEBUG_MSG((hxgep, MAC_CTL,
100 	    "hxge_vmac_init: failed to initialize VMAC>"));
101 
102 	return (status);
103 }
104 
105 
106 /* Initialize the TxVMAC sub-block */
107 
108 hxge_status_t
109 hxge_tx_vmac_init(p_hxge_t hxgep)
110 {
111 	uint64_t	config;
112 	hpi_handle_t	handle = hxgep->hpi_handle;
113 
114 	/* CFG_VMAC_TX_EN is done separately */
115 	config = CFG_VMAC_TX_CRC_INSERT | CFG_VMAC_TX_PAD;
116 
117 	if (hpi_vmac_tx_config(handle, INIT, config,
118 	    hxgep->vmac.maxframesize) != HPI_SUCCESS)
119 		return (HXGE_ERROR);
120 
121 	hxgep->vmac.tx_config = config;
122 
123 	return (HXGE_OK);
124 }
125 
126 /* Initialize the RxVMAC sub-block */
127 
128 hxge_status_t
129 hxge_rx_vmac_init(p_hxge_t hxgep)
130 {
131 	uint64_t	xconfig;
132 	hpi_handle_t	handle = hxgep->hpi_handle;
133 	uint16_t	max_frame_length = hxgep->vmac.maxframesize;
134 
135 	/*
136 	 * NOTE: CFG_VMAC_RX_ENABLE is done separately. Do not enable
137 	 * strip CRC.  Bug ID 11451 -- enable strip CRC will cause
138 	 * rejection on minimum sized packets.
139 	 */
140 	xconfig = CFG_VMAC_RX_PASS_FLOW_CTRL_FR;
141 
142 	if (hxgep->filter.all_phys_cnt != 0)
143 		xconfig |= CFG_VMAC_RX_PROMISCUOUS_MODE;
144 
145 	if (hxgep->filter.all_multicast_cnt != 0)
146 		xconfig |= CFG_VMAC_RX_PROMIXCUOUS_GROUP;
147 
148 	if (hxgep->statsp->port_stats.lb_mode != hxge_lb_normal)
149 		xconfig |= CFG_VMAC_RX_LOOP_BACK;
150 
151 	if (hpi_vmac_rx_config(handle, INIT, xconfig, max_frame_length)
152 	    != HPI_SUCCESS)
153 		return (HXGE_ERROR);
154 
155 	hxgep->vmac.rx_config = xconfig;
156 
157 	return (HXGE_OK);
158 }
159 
160 /* Enable TxVMAC */
161 
162 hxge_status_t
163 hxge_tx_vmac_enable(p_hxge_t hxgep)
164 {
165 	hpi_status_t	rv;
166 	hxge_status_t	status = HXGE_OK;
167 	hpi_handle_t	handle = hxgep->hpi_handle;
168 
169 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_tx_vmac_enable"));
170 
171 	rv = hxge_tx_vmac_init(hxgep);
172 	if (rv != HXGE_OK)
173 		return (rv);
174 
175 	/* Based on speed */
176 	hxgep->msg_min = ETHERMIN;
177 
178 	rv = hpi_vmac_tx_config(handle, ENABLE, CFG_VMAC_TX_EN, 0);
179 
180 	status = (rv == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR;
181 
182 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_tx_vmac_enable"));
183 
184 	return (status);
185 }
186 
187 /* Disable TxVMAC */
188 
189 hxge_status_t
190 hxge_tx_vmac_disable(p_hxge_t hxgep)
191 {
192 	hpi_status_t	rv;
193 	hxge_status_t	status = HXGE_OK;
194 	hpi_handle_t	handle = hxgep->hpi_handle;
195 
196 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_tx_vmac_disable"));
197 
198 	rv = hpi_vmac_tx_config(handle, DISABLE, CFG_VMAC_TX_EN, 0);
199 
200 	status = (rv == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR;
201 
202 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_tx_vmac_disable"));
203 
204 	return (status);
205 }
206 
207 /* Enable RxVMAC */
208 
209 hxge_status_t
210 hxge_rx_vmac_enable(p_hxge_t hxgep)
211 {
212 	hpi_status_t	rv;
213 	hxge_status_t	status = HXGE_OK;
214 	hpi_handle_t	handle = hxgep->hpi_handle;
215 
216 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_rx_vmac_enable"));
217 
218 	rv = hxge_rx_vmac_init(hxgep);
219 	if (rv != HXGE_OK)
220 		return (rv);
221 
222 	rv = hpi_vmac_rx_config(handle, ENABLE, CFG_VMAC_RX_EN, 0);
223 
224 	status = (rv == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR;
225 
226 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_rx_vmac_enable"));
227 
228 	return (status);
229 }
230 
231 /* Disable RxVMAC */
232 
233 hxge_status_t
234 hxge_rx_vmac_disable(p_hxge_t hxgep)
235 {
236 	hpi_status_t	rv;
237 	hxge_status_t	status = HXGE_OK;
238 	hpi_handle_t	handle = hxgep->hpi_handle;
239 
240 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_rx_vmac_disable"));
241 
242 	rv = hpi_vmac_rx_config(handle, DISABLE, CFG_VMAC_RX_EN, 0);
243 
244 	status = (rv == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR;
245 
246 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_rx_vmac_disable"));
247 
248 	return (status);
249 }
250 
251 /* Reset TxVMAC */
252 
253 hxge_status_t
254 hxge_tx_vmac_reset(p_hxge_t hxgep)
255 {
256 	hpi_handle_t	handle = hxgep->hpi_handle;
257 
258 	(void) hpi_tx_vmac_reset(handle);
259 
260 	return (HXGE_OK);
261 }
262 
263 /* Reset RxVMAC */
264 
265 hxge_status_t
266 hxge_rx_vmac_reset(p_hxge_t hxgep)
267 {
268 	hpi_handle_t	handle = hxgep->hpi_handle;
269 
270 	(void) hpi_rx_vmac_reset(handle);
271 
272 	return (HXGE_OK);
273 }
274 
275 /*ARGSUSED*/
276 uint_t
277 hxge_vmac_intr(caddr_t arg1, caddr_t arg2)
278 {
279 	p_hxge_t	hxgep = (p_hxge_t)arg2;
280 	hpi_handle_t	handle;
281 
282 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_vmac_intr"));
283 
284 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
285 
286 	hxge_save_cntrs(hxgep);
287 
288 	/* Clear the interrupt status registers */
289 	(void) hpi_vmac_clear_rx_int_stat(handle);
290 	(void) hpi_vmac_clear_tx_int_stat(handle);
291 
292 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_vmac_intr"));
293 	return (DDI_INTR_CLAIMED);
294 }
295 
296 /*
297  * Set promiscous mode
298  */
299 hxge_status_t
300 hxge_set_promisc(p_hxge_t hxgep, boolean_t on)
301 {
302 	hxge_status_t status = HXGE_OK;
303 
304 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_set_promisc: on %d", on));
305 
306 	hxgep->filter.all_phys_cnt = ((on) ? 1 : 0);
307 
308 	RW_ENTER_WRITER(&hxgep->filter_lock);
309 	if ((status = hxge_rx_vmac_disable(hxgep)) != HXGE_OK)
310 		goto fail;
311 	if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK)
312 		goto fail;
313 	RW_EXIT(&hxgep->filter_lock);
314 
315 	if (on)
316 		hxgep->statsp->mac_stats.promisc = B_TRUE;
317 	else
318 		hxgep->statsp->mac_stats.promisc = B_FALSE;
319 
320 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_set_promisc"));
321 	return (HXGE_OK);
322 
323 fail:
324 	RW_EXIT(&hxgep->filter_lock);
325 
326 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_set_promisc: "
327 	    "Unable to set promisc (%d)", on));
328 	return (status);
329 }
330 
331 void
332 hxge_save_cntrs(p_hxge_t hxgep)
333 {
334 	p_hxge_stats_t	statsp;
335 	hpi_handle_t	handle;
336 
337 	vmac_tx_frame_cnt_t tx_frame_cnt;
338 	vmac_tx_byte_cnt_t tx_byte_cnt;
339 	vmac_rx_frame_cnt_t rx_frame_cnt;
340 	vmac_rx_byte_cnt_t rx_byte_cnt;
341 	vmac_rx_drop_fr_cnt_t rx_drop_fr_cnt;
342 	vmac_rx_drop_byte_cnt_t rx_drop_byte_cnt;
343 	vmac_rx_crc_cnt_t rx_crc_cnt;
344 	vmac_rx_pause_cnt_t rx_pause_cnt;
345 	vmac_rx_bcast_fr_cnt_t rx_bcast_fr_cnt;
346 	vmac_rx_mcast_fr_cnt_t rx_mcast_fr_cnt;
347 
348 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_save_cntrs"));
349 
350 	statsp = (p_hxge_stats_t)hxgep->statsp;
351 	handle = hxgep->hpi_handle;
352 
353 	HXGE_REG_RD64(handle, VMAC_TX_FRAME_CNT, &tx_frame_cnt.value);
354 	HXGE_REG_RD64(handle, VMAC_TX_BYTE_CNT, &tx_byte_cnt.value);
355 	HXGE_REG_RD64(handle, VMAC_RX_FRAME_CNT, &rx_frame_cnt.value);
356 	HXGE_REG_RD64(handle, VMAC_RX_BYTE_CNT, &rx_byte_cnt.value);
357 	HXGE_REG_RD64(handle, VMAC_RX_DROP_FR_CNT, &rx_drop_fr_cnt.value);
358 	HXGE_REG_RD64(handle, VMAC_RX_DROP_BYTE_CNT, &rx_drop_byte_cnt.value);
359 	HXGE_REG_RD64(handle, VMAC_RX_CRC_CNT, &rx_crc_cnt.value);
360 	HXGE_REG_RD64(handle, VMAC_RX_PAUSE_CNT, &rx_pause_cnt.value);
361 	HXGE_REG_RD64(handle, VMAC_RX_BCAST_FR_CNT, &rx_bcast_fr_cnt.value);
362 	HXGE_REG_RD64(handle, VMAC_RX_MCAST_FR_CNT, &rx_mcast_fr_cnt.value);
363 
364 	statsp->vmac_stats.tx_frame_cnt += tx_frame_cnt.bits.tx_frame_cnt;
365 	statsp->vmac_stats.tx_byte_cnt += tx_byte_cnt.bits.tx_byte_cnt;
366 	statsp->vmac_stats.rx_frame_cnt += rx_frame_cnt.bits.rx_frame_cnt;
367 	statsp->vmac_stats.rx_byte_cnt += rx_byte_cnt.bits.rx_byte_cnt;
368 	statsp->vmac_stats.rx_drop_frame_cnt +=
369 	    rx_drop_fr_cnt.bits.rx_drop_frame_cnt;
370 	statsp->vmac_stats.rx_drop_byte_cnt +=
371 	    rx_drop_byte_cnt.bits.rx_drop_byte_cnt;
372 	statsp->vmac_stats.rx_crc_cnt += rx_crc_cnt.bits.rx_crc_cnt;
373 	statsp->vmac_stats.rx_pause_cnt += rx_pause_cnt.bits.rx_pause_cnt;
374 	statsp->vmac_stats.rx_bcast_fr_cnt +=
375 	    rx_bcast_fr_cnt.bits.rx_bcast_fr_cnt;
376 	statsp->vmac_stats.rx_mcast_fr_cnt +=
377 	    rx_mcast_fr_cnt.bits.rx_mcast_fr_cnt;
378 
379 hxge_save_cntrs_exit:
380 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_save_cntrs"));
381 }
382 
383 int
384 hxge_vmac_set_framesize(p_hxge_t hxgep)
385 {
386 	int	status = 0;
387 
388 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "==> hxge_vmac_set_framesize"));
389 
390 	RW_ENTER_WRITER(&hxgep->filter_lock);
391 	(void) hxge_rx_vmac_disable(hxgep);
392 	(void) hxge_tx_vmac_disable(hxgep);
393 
394 	/*
395 	 * Apply the new jumbo parameter here which is contained in hxgep
396 	 * data structure (hxgep->vmac.maxframesize);
397 	 * The order of the following two calls is important.
398 	 */
399 	(void) hxge_tx_vmac_enable(hxgep);
400 	(void) hxge_rx_vmac_enable(hxgep);
401 	RW_EXIT(&hxgep->filter_lock);
402 
403 	HXGE_DEBUG_MSG((hxgep, NDD_CTL, "<== hxge_vmac_set_framesize"));
404 	return (status);
405 }
406