xref: /linux/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /***********************license start***************
2  * Author: Cavium Networks
3  *
4  * Contact: support@caviumnetworks.com
5  * This file is part of the OCTEON SDK
6  *
7  * Copyright (c) 2003-2008 Cavium Networks
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this file; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  * or visit http://www.gnu.org/licenses/.
23  *
24  * This file may also be available under a different license from Cavium.
25  * Contact Cavium Networks for more information
26  ***********************license end**************************************/
27 
28 /*
29  * Functions for RGMII/GMII/MII initialization, configuration,
30  * and monitoring.
31  */
32 #include <asm/octeon/octeon.h>
33 
34 #include <asm/octeon/cvmx-config.h>
35 
36 #include <asm/octeon/cvmx-pko.h>
37 #include <asm/octeon/cvmx-helper.h>
38 #include <asm/octeon/cvmx-helper-board.h>
39 
40 #include <asm/octeon/cvmx-npi-defs.h>
41 #include <asm/octeon/cvmx-gmxx-defs.h>
42 #include <asm/octeon/cvmx-asxx-defs.h>
43 #include <asm/octeon/cvmx-dbg-defs.h>
44 
45 void __cvmx_interrupt_gmxx_enable(int interface);
46 void __cvmx_interrupt_asxx_enable(int block);
47 
48 /**
49  * Probe RGMII ports and determine the number present
50  *
51  * @interface: Interface to probe
52  *
53  * Returns Number of RGMII/GMII/MII ports (0-4).
54  */
55 int __cvmx_helper_rgmii_probe(int interface)
56 {
57 	int num_ports = 0;
58 	union cvmx_gmxx_inf_mode mode;
59 	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
60 
61 	if (mode.s.type) {
62 		if (OCTEON_IS_MODEL(OCTEON_CN38XX)
63 		    || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
64 			cvmx_dprintf("ERROR: RGMII initialize called in "
65 				     "SPI interface\n");
66 		} else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
67 			   || OCTEON_IS_MODEL(OCTEON_CN30XX)
68 			   || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
69 			/*
70 			 * On these chips "type" says we're in
71 			 * GMII/MII mode. This limits us to 2 ports
72 			 */
73 			num_ports = 2;
74 		} else {
75 			cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
76 				     __func__);
77 		}
78 	} else {
79 		if (OCTEON_IS_MODEL(OCTEON_CN38XX)
80 		    || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
81 			num_ports = 4;
82 		} else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
83 			   || OCTEON_IS_MODEL(OCTEON_CN30XX)
84 			   || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
85 			num_ports = 3;
86 		} else {
87 			cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
88 				     __func__);
89 		}
90 	}
91 	return num_ports;
92 }
93 
94 /**
95  * Put an RGMII interface in loopback mode. Internal packets sent
96  * out will be received back again on the same port. Externally
97  * received packets will echo back out.
98  *
99  * @port:   IPD port number to loop.
100  */
101 void cvmx_helper_rgmii_internal_loopback(int port)
102 {
103 	int interface = (port >> 4) & 1;
104 	int index = port & 0xf;
105 	uint64_t tmp;
106 
107 	union cvmx_gmxx_prtx_cfg gmx_cfg;
108 	gmx_cfg.u64 = 0;
109 	gmx_cfg.s.duplex = 1;
110 	gmx_cfg.s.slottime = 1;
111 	gmx_cfg.s.speed = 1;
112 	cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
113 	cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
114 	cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
115 	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
116 	tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
117 	cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
118 	tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
119 	cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
120 	tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
121 	cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
122 	gmx_cfg.s.en = 1;
123 	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
124 }
125 
126 /**
127  * Workaround ASX setup errata with CN38XX pass1
128  *
129  * @interface: Interface to setup
130  * @port:      Port to setup (0..3)
131  * @cpu_clock_hz:
132  *		    Chip frequency in Hertz
133  *
134  * Returns Zero on success, negative on failure
135  */
136 static int __cvmx_helper_errata_asx_pass1(int interface, int port,
137 					  int cpu_clock_hz)
138 {
139 	/* Set hi water mark as per errata GMX-4 */
140 	if (cpu_clock_hz >= 325000000 && cpu_clock_hz < 375000000)
141 		cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 12);
142 	else if (cpu_clock_hz >= 375000000 && cpu_clock_hz < 437000000)
143 		cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 11);
144 	else if (cpu_clock_hz >= 437000000 && cpu_clock_hz < 550000000)
145 		cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 10);
146 	else if (cpu_clock_hz >= 550000000 && cpu_clock_hz < 687000000)
147 		cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 9);
148 	else
149 		cvmx_dprintf("Illegal clock frequency (%d). "
150 			"CVMX_ASXX_TX_HI_WATERX not set\n", cpu_clock_hz);
151 	return 0;
152 }
153 
154 /**
155  * Configure all of the ASX, GMX, and PKO registers required
156  * to get RGMII to function on the supplied interface.
157  *
158  * @interface: PKO Interface to configure (0 or 1)
159  *
160  * Returns Zero on success
161  */
162 int __cvmx_helper_rgmii_enable(int interface)
163 {
164 	int num_ports = cvmx_helper_ports_on_interface(interface);
165 	int port;
166 	struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get();
167 	union cvmx_gmxx_inf_mode mode;
168 	union cvmx_asxx_tx_prt_en asx_tx;
169 	union cvmx_asxx_rx_prt_en asx_rx;
170 
171 	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
172 
173 	if (mode.s.en == 0)
174 		return -1;
175 	if ((OCTEON_IS_MODEL(OCTEON_CN38XX) ||
176 	     OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1)
177 		/* Ignore SPI interfaces */
178 		return -1;
179 
180 	/* Configure the ASX registers needed to use the RGMII ports */
181 	asx_tx.u64 = 0;
182 	asx_tx.s.prt_en = cvmx_build_mask(num_ports);
183 	cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
184 
185 	asx_rx.u64 = 0;
186 	asx_rx.s.prt_en = cvmx_build_mask(num_ports);
187 	cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
188 
189 	/* Configure the GMX registers needed to use the RGMII ports */
190 	for (port = 0; port < num_ports; port++) {
191 		/* Setting of CVMX_GMXX_TXX_THRESH has been moved to
192 		   __cvmx_helper_setup_gmx() */
193 
194 		if (cvmx_octeon_is_pass1())
195 			__cvmx_helper_errata_asx_pass1(interface, port,
196 						       sys_info_ptr->
197 						       cpu_clock_hz);
198 		else {
199 			/*
200 			 * Configure more flexible RGMII preamble
201 			 * checking. Pass 1 doesn't support this
202 			 * feature.
203 			 */
204 			union cvmx_gmxx_rxx_frm_ctl frm_ctl;
205 			frm_ctl.u64 =
206 			    cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
207 					  (port, interface));
208 			/* New field, so must be compile time */
209 			frm_ctl.s.pre_free = 1;
210 			cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface),
211 				       frm_ctl.u64);
212 		}
213 
214 		/*
215 		 * Each pause frame transmitted will ask for about 10M
216 		 * bit times before resume.  If buffer space comes
217 		 * available before that time has expired, an XON
218 		 * pause frame (0 time) will be transmitted to restart
219 		 * the flow.
220 		 */
221 		cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface),
222 			       20000);
223 		cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL
224 			       (port, interface), 19000);
225 
226 		if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
227 			cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
228 				       16);
229 			cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
230 				       16);
231 		} else {
232 			cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
233 				       24);
234 			cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
235 				       24);
236 		}
237 	}
238 
239 	__cvmx_helper_setup_gmx(interface, num_ports);
240 
241 	/* enable the ports now */
242 	for (port = 0; port < num_ports; port++) {
243 		union cvmx_gmxx_prtx_cfg gmx_cfg;
244 
245 		gmx_cfg.u64 =
246 		    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
247 		gmx_cfg.s.en = 1;
248 		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface),
249 			       gmx_cfg.u64);
250 	}
251 	__cvmx_interrupt_asxx_enable(interface);
252 	__cvmx_interrupt_gmxx_enable(interface);
253 
254 	return 0;
255 }
256 
257 /**
258  * Return the link state of an IPD/PKO port as returned by
259  * auto negotiation. The result of this function may not match
260  * Octeon's link config if auto negotiation has changed since
261  * the last call to cvmx_helper_link_set().
262  *
263  * @ipd_port: IPD/PKO port to query
264  *
265  * Returns Link state
266  */
267 cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
268 {
269 	int interface = cvmx_helper_get_interface_num(ipd_port);
270 	int index = cvmx_helper_get_interface_index_num(ipd_port);
271 	union cvmx_asxx_prt_loop asxx_prt_loop;
272 
273 	asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
274 	if (asxx_prt_loop.s.int_loop & (1 << index)) {
275 		/* Force 1Gbps full duplex on internal loopback */
276 		cvmx_helper_link_info_t result;
277 		result.u64 = 0;
278 		result.s.full_duplex = 1;
279 		result.s.link_up = 1;
280 		result.s.speed = 1000;
281 		return result;
282 	} else
283 		return __cvmx_helper_board_link_get(ipd_port);
284 }
285 
286 /**
287  * Configure an IPD/PKO port for the specified link state. This
288  * function does not influence auto negotiation at the PHY level.
289  * The passed link state must always match the link state returned
290  * by cvmx_helper_link_get().
291  *
292  * @ipd_port:  IPD/PKO port to configure
293  * @link_info: The new link state
294  *
295  * Returns Zero on success, negative on failure
296  */
297 int __cvmx_helper_rgmii_link_set(int ipd_port,
298 				 cvmx_helper_link_info_t link_info)
299 {
300 	int result = 0;
301 	int interface = cvmx_helper_get_interface_num(ipd_port);
302 	int index = cvmx_helper_get_interface_index_num(ipd_port);
303 	union cvmx_gmxx_prtx_cfg original_gmx_cfg;
304 	union cvmx_gmxx_prtx_cfg new_gmx_cfg;
305 	union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
306 	union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
307 	union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
308 	union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
309 	int i;
310 
311 	/* Ignore speed sets in the simulator */
312 	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
313 		return 0;
314 
315 	/* Read the current settings so we know the current enable state */
316 	original_gmx_cfg.u64 =
317 	    cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
318 	new_gmx_cfg = original_gmx_cfg;
319 
320 	/* Disable the lowest level RX */
321 	cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
322 		       cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) &
323 				     ~(1 << index));
324 
325 	memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
326 	/* Disable all queues so that TX should become idle */
327 	for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
328 		int queue = cvmx_pko_get_base_queue(ipd_port) + i;
329 		cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
330 		pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
331 		pko_mem_queue_qos.s.pid = ipd_port;
332 		pko_mem_queue_qos.s.qid = queue;
333 		pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
334 		pko_mem_queue_qos.s.qos_mask = 0;
335 		cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
336 	}
337 
338 	/* Disable backpressure */
339 	gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
340 	gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
341 	gmx_tx_ovr_bp.s.bp &= ~(1 << index);
342 	gmx_tx_ovr_bp.s.en |= 1 << index;
343 	cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
344 	cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
345 
346 	/*
347 	 * Poll the GMX state machine waiting for it to become
348 	 * idle. Preferably we should only change speed when it is
349 	 * idle. If it doesn't become idle we will still do the speed
350 	 * change, but there is a slight chance that GMX will
351 	 * lockup.
352 	 */
353 	cvmx_write_csr(CVMX_NPI_DBG_SELECT,
354 		       interface * 0x800 + index * 0x100 + 0x880);
355 	CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7,
356 			==, 0, 10000);
357 	CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf,
358 			==, 0, 10000);
359 
360 	/* Disable the port before we make any changes */
361 	new_gmx_cfg.s.en = 0;
362 	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
363 	cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
364 
365 	/* Set full/half duplex */
366 	if (cvmx_octeon_is_pass1())
367 		/* Half duplex is broken for 38XX Pass 1 */
368 		new_gmx_cfg.s.duplex = 1;
369 	else if (!link_info.s.link_up)
370 		/* Force full duplex on down links */
371 		new_gmx_cfg.s.duplex = 1;
372 	else
373 		new_gmx_cfg.s.duplex = link_info.s.full_duplex;
374 
375 	/* Set the link speed. Anything unknown is set to 1Gbps */
376 	if (link_info.s.speed == 10) {
377 		new_gmx_cfg.s.slottime = 0;
378 		new_gmx_cfg.s.speed = 0;
379 	} else if (link_info.s.speed == 100) {
380 		new_gmx_cfg.s.slottime = 0;
381 		new_gmx_cfg.s.speed = 0;
382 	} else {
383 		new_gmx_cfg.s.slottime = 1;
384 		new_gmx_cfg.s.speed = 1;
385 	}
386 
387 	/* Adjust the clocks */
388 	if (link_info.s.speed == 10) {
389 		cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
390 		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
391 		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
392 	} else if (link_info.s.speed == 100) {
393 		cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
394 		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
395 		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
396 	} else {
397 		cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
398 		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
399 		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
400 	}
401 
402 	if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
403 		if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) {
404 			union cvmx_gmxx_inf_mode mode;
405 			mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
406 
407 	/*
408 	 * Port	 .en  .type  .p0mii  Configuration
409 	 * ----	 ---  -----  ------  -----------------------------------------
410 	 *  X	   0	 X	X    All links are disabled.
411 	 *  0	   1	 X	0    Port 0 is RGMII
412 	 *  0	   1	 X	1    Port 0 is MII
413 	 *  1	   1	 0	X    Ports 1 and 2 are configured as RGMII ports.
414 	 *  1	   1	 1	X    Port 1: GMII/MII; Port 2: disabled. GMII or
415 	 *			     MII port is selected by GMX_PRT1_CFG[SPEED].
416 	 */
417 
418 			/* In MII mode, CLK_CNT = 1. */
419 			if (((index == 0) && (mode.s.p0mii == 1))
420 			    || ((index != 0) && (mode.s.type == 1))) {
421 				cvmx_write_csr(CVMX_GMXX_TXX_CLK
422 					       (index, interface), 1);
423 			}
424 		}
425 	}
426 
427 	/* Do a read to make sure all setup stuff is complete */
428 	cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
429 
430 	/* Save the new GMX setting without enabling the port */
431 	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
432 
433 	/* Enable the lowest level RX */
434 	cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
435 		       cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 <<
436 									index));
437 
438 	/* Re-enable the TX path */
439 	for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
440 		int queue = cvmx_pko_get_base_queue(ipd_port) + i;
441 		cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
442 		cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS,
443 			       pko_mem_queue_qos_save[i].u64);
444 	}
445 
446 	/* Restore backpressure */
447 	cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
448 
449 	/* Restore the GMX enable state. Port config is complete */
450 	new_gmx_cfg.s.en = original_gmx_cfg.s.en;
451 	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
452 
453 	return result;
454 }
455 
456 /**
457  * Configure a port for internal and/or external loopback. Internal loopback
458  * causes packets sent by the port to be received by Octeon. External loopback
459  * causes packets received from the wire to sent out again.
460  *
461  * @ipd_port: IPD/PKO port to loopback.
462  * @enable_internal:
463  *		   Non zero if you want internal loopback
464  * @enable_external:
465  *		   Non zero if you want external loopback
466  *
467  * Returns Zero on success, negative on failure.
468  */
469 int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
470 					   int enable_external)
471 {
472 	int interface = cvmx_helper_get_interface_num(ipd_port);
473 	int index = cvmx_helper_get_interface_index_num(ipd_port);
474 	int original_enable;
475 	union cvmx_gmxx_prtx_cfg gmx_cfg;
476 	union cvmx_asxx_prt_loop asxx_prt_loop;
477 
478 	/* Read the current enable state and save it */
479 	gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
480 	original_enable = gmx_cfg.s.en;
481 	/* Force port to be disabled */
482 	gmx_cfg.s.en = 0;
483 	if (enable_internal) {
484 		/* Force speed if we're doing internal loopback */
485 		gmx_cfg.s.duplex = 1;
486 		gmx_cfg.s.slottime = 1;
487 		gmx_cfg.s.speed = 1;
488 		cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
489 		cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
490 		cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
491 	}
492 	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
493 
494 	/* Set the loopback bits */
495 	asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
496 	if (enable_internal)
497 		asxx_prt_loop.s.int_loop |= 1 << index;
498 	else
499 		asxx_prt_loop.s.int_loop &= ~(1 << index);
500 	if (enable_external)
501 		asxx_prt_loop.s.ext_loop |= 1 << index;
502 	else
503 		asxx_prt_loop.s.ext_loop &= ~(1 << index);
504 	cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
505 
506 	/* Force enables in internal loopback */
507 	if (enable_internal) {
508 		uint64_t tmp;
509 		tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
510 		cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface),
511 			       (1 << index) | tmp);
512 		tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
513 		cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
514 			       (1 << index) | tmp);
515 		original_enable = 1;
516 	}
517 
518 	/* Restore the enable state */
519 	gmx_cfg.s.en = original_enable;
520 	cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
521 	return 0;
522 }
523