1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Copyright (c) 2002-2006 Neterion, Inc.
22 */
23
24 /*
25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
27 */
28
29 #include "xgehal-device.h"
30 #include "xgehal-channel.h"
31 #include "xgehal-fifo.h"
32 #include "xgehal-ring.h"
33 #include "xgehal-driver.h"
34 #include "xgehal-mgmt.h"
35
36 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
37 #define END_SIGN 0x0
38
39 #ifdef XGE_HAL_HERC_EMULATION
40 #undef XGE_HAL_PROCESS_LINK_INT_IN_ISR
41 #endif
42
43 /*
44 * Jenkins hash key length(in bytes)
45 */
46 #define XGE_HAL_JHASH_MSG_LEN 50
47
48 /*
49 * mix(a,b,c) used in Jenkins hash algorithm
50 */
51 #define mix(a,b,c) { \
52 a -= b; a -= c; a ^= (c>>13); \
53 b -= c; b -= a; b ^= (a<<8); \
54 c -= a; c -= b; c ^= (b>>13); \
55 a -= b; a -= c; a ^= (c>>12); \
56 b -= c; b -= a; b ^= (a<<16); \
57 c -= a; c -= b; c ^= (b>>5); \
58 a -= b; a -= c; a ^= (c>>3); \
59 b -= c; b -= a; b ^= (a<<10); \
60 c -= a; c -= b; c ^= (b>>15); \
61 }
62
63
64 /*
65 * __hal_device_event_queued
66 * @data: pointer to xge_hal_device_t structure
67 *
68 * Will be called when new event succesfully queued.
69 */
70 void
__hal_device_event_queued(void * data,int event_type)71 __hal_device_event_queued(void *data, int event_type)
72 {
73 xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC);
74 if (g_xge_hal_driver->uld_callbacks.event_queued) {
75 g_xge_hal_driver->uld_callbacks.event_queued(data, event_type);
76 }
77 }
78
79 /*
80 * __hal_pio_mem_write32_upper
81 *
82 * Endiann-aware implementation of xge_os_pio_mem_write32().
83 * Since Xframe has 64bit registers, we differintiate uppper and lower
84 * parts.
85 */
86 void
__hal_pio_mem_write32_upper(pci_dev_h pdev,pci_reg_h regh,u32 val,void * addr)87 __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr)
88 {
89 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
90 xge_os_pio_mem_write32(pdev, regh, val, addr);
91 #else
92 xge_os_pio_mem_write32(pdev, regh, val, (void *)((char *)addr + 4));
93 #endif
94 }
95
96 /*
97 * __hal_pio_mem_write32_upper
98 *
99 * Endiann-aware implementation of xge_os_pio_mem_write32().
100 * Since Xframe has 64bit registers, we differintiate uppper and lower
101 * parts.
102 */
103 void
__hal_pio_mem_write32_lower(pci_dev_h pdev,pci_reg_h regh,u32 val,void * addr)104 __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val,
105 void *addr)
106 {
107 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
108 xge_os_pio_mem_write32(pdev, regh, val,
109 (void *) ((char *)addr + 4));
110 #else
111 xge_os_pio_mem_write32(pdev, regh, val, addr);
112 #endif
113 }
114
115 /*
116 * __hal_device_register_poll
117 * @hldev: pointer to xge_hal_device_t structure
118 * @reg: register to poll for
119 * @op: 0 - bit reset, 1 - bit set
120 * @mask: mask for logical "and" condition based on %op
121 * @max_millis: maximum time to try to poll in milliseconds
122 *
123 * Will poll certain register for specified amount of time.
124 * Will poll until masked bit is not cleared.
125 */
126 xge_hal_status_e
__hal_device_register_poll(xge_hal_device_t * hldev,u64 * reg,int op,u64 mask,int max_millis)127 __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg,
128 int op, u64 mask, int max_millis)
129 {
130 u64 val64;
131 int i = 0;
132 xge_hal_status_e ret = XGE_HAL_FAIL;
133
134 xge_os_udelay(10);
135
136 do {
137 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
138 if (op == 0 && !(val64 & mask))
139 return XGE_HAL_OK;
140 else if (op == 1 && (val64 & mask) == mask)
141 return XGE_HAL_OK;
142 xge_os_udelay(100);
143 } while (++i <= 9);
144
145 do {
146 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
147 if (op == 0 && !(val64 & mask))
148 return XGE_HAL_OK;
149 else if (op == 1 && (val64 & mask) == mask)
150 return XGE_HAL_OK;
151 xge_os_udelay(1000);
152 } while (++i < max_millis);
153
154 return ret;
155 }
156
157 /*
158 * __hal_device_wait_quiescent
159 * @hldev: the device
160 * @hw_status: hw_status in case of error
161 *
162 * Will wait until device is quiescent for some blocks.
163 */
164 static xge_hal_status_e
__hal_device_wait_quiescent(xge_hal_device_t * hldev,u64 * hw_status)165 __hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status)
166 {
167 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
168
169 /* poll and wait first */
170 #ifdef XGE_HAL_HERC_EMULATION
171 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
172 (XGE_HAL_ADAPTER_STATUS_TDMA_READY |
173 XGE_HAL_ADAPTER_STATUS_RDMA_READY |
174 XGE_HAL_ADAPTER_STATUS_PFC_READY |
175 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
176 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
177 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
178 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
179 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK),
180 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
181 #else
182 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
183 (XGE_HAL_ADAPTER_STATUS_TDMA_READY |
184 XGE_HAL_ADAPTER_STATUS_RDMA_READY |
185 XGE_HAL_ADAPTER_STATUS_PFC_READY |
186 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
187 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
188 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
189 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
190 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK |
191 XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK),
192 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
193 #endif
194
195 return xge_hal_device_status(hldev, hw_status);
196 }
197
198 /**
199 * xge_hal_device_is_slot_freeze
200 * @devh: the device
201 *
202 * Returns non-zero if the slot is freezed.
203 * The determination is made based on the adapter_status
204 * register which will never give all FFs, unless PCI read
205 * cannot go through.
206 */
207 int
xge_hal_device_is_slot_freeze(xge_hal_device_h devh)208 xge_hal_device_is_slot_freeze(xge_hal_device_h devh)
209 {
210 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
211 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
212 u16 device_id;
213 u64 adapter_status =
214 xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
215 &bar0->adapter_status);
216 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
217 xge_offsetof(xge_hal_pci_config_le_t, device_id),
218 &device_id);
219 #ifdef TX_DEBUG
220 if (adapter_status == XGE_HAL_ALL_FOXES)
221 {
222 u64 dummy;
223 dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
224 &bar0->pcc_enable);
225 printf(">>> Slot is frozen!\n");
226 brkpoint(0);
227 }
228 #endif
229 return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff));
230 }
231
232
233 /*
234 * __hal_device_led_actifity_fix
235 * @hldev: pointer to xge_hal_device_t structure
236 *
237 * SXE-002: Configure link and activity LED to turn it off
238 */
239 static void
__hal_device_led_actifity_fix(xge_hal_device_t * hldev)240 __hal_device_led_actifity_fix(xge_hal_device_t *hldev)
241 {
242 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
243 u16 subid;
244 u64 val64;
245
246 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
247 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid);
248
249 /*
250 * In the case of Herc, there is a new register named beacon control
251 * is added which was not present in Xena.
252 * Beacon control register in Herc is at the same offset as
253 * gpio control register in Xena. It means they are one and same in
254 * the case of Xena. Also, gpio control register offset in Herc and
255 * Xena is different.
256 * The current register map represents Herc(It means we have
257 * both beacon and gpio control registers in register map).
258 * WRT transition from Xena to Herc, all the code in Xena which was
259 * using gpio control register for LED handling would have to
260 * use beacon control register in Herc and the rest of the code
261 * which uses gpio control in Xena would use the same register
262 * in Herc.
263 * WRT LED handling(following code), In the case of Herc, beacon
264 * control register has to be used. This is applicable for Xena also,
265 * since it represents the gpio control register in Xena.
266 */
267 if ((subid & 0xFF) >= 0x07) {
268 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
269 &bar0->beacon_control);
270 val64 |= 0x0000800000000000ULL;
271 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
272 val64, &bar0->beacon_control);
273 val64 = 0x0411040400000000ULL;
274 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
275 (void *) ((u8 *)bar0 + 0x2700));
276 }
277 }
278
279 /* Constants for Fixing the MacAddress problem seen mostly on
280 * Alpha machines.
281 */
282 static u64 xena_fix_mac[] = {
283 0x0060000000000000ULL, 0x0060600000000000ULL,
284 0x0040600000000000ULL, 0x0000600000000000ULL,
285 0x0020600000000000ULL, 0x0060600000000000ULL,
286 0x0020600000000000ULL, 0x0060600000000000ULL,
287 0x0020600000000000ULL, 0x0060600000000000ULL,
288 0x0020600000000000ULL, 0x0060600000000000ULL,
289 0x0020600000000000ULL, 0x0060600000000000ULL,
290 0x0020600000000000ULL, 0x0060600000000000ULL,
291 0x0020600000000000ULL, 0x0060600000000000ULL,
292 0x0020600000000000ULL, 0x0060600000000000ULL,
293 0x0020600000000000ULL, 0x0060600000000000ULL,
294 0x0020600000000000ULL, 0x0060600000000000ULL,
295 0x0020600000000000ULL, 0x0000600000000000ULL,
296 0x0040600000000000ULL, 0x0060600000000000ULL,
297 END_SIGN
298 };
299
300 /*
301 * __hal_device_fix_mac
302 * @hldev: HAL device handle.
303 *
304 * Fix for all "FFs" MAC address problems observed on Alpha platforms.
305 */
306 static void
__hal_device_xena_fix_mac(xge_hal_device_t * hldev)307 __hal_device_xena_fix_mac(xge_hal_device_t *hldev)
308 {
309 int i = 0;
310 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
311
312 /*
313 * In the case of Herc, there is a new register named beacon control
314 * is added which was not present in Xena.
315 * Beacon control register in Herc is at the same offset as
316 * gpio control register in Xena. It means they are one and same in
317 * the case of Xena. Also, gpio control register offset in Herc and
318 * Xena is different.
319 * The current register map represents Herc(It means we have
320 * both beacon and gpio control registers in register map).
321 * WRT transition from Xena to Herc, all the code in Xena which was
322 * using gpio control register for LED handling would have to
323 * use beacon control register in Herc and the rest of the code
324 * which uses gpio control in Xena would use the same register
325 * in Herc.
326 * In the following code(xena_fix_mac), beacon control register has
327 * to be used in the case of Xena, since it represents gpio control
328 * register. In the case of Herc, there is no change required.
329 */
330 while (xena_fix_mac[i] != END_SIGN) {
331 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
332 xena_fix_mac[i++], &bar0->beacon_control);
333 xge_os_mdelay(1);
334 }
335 }
336
337 /*
338 * xge_hal_device_bcast_enable
339 * @hldev: HAL device handle.
340 *
341 * Enable receiving broadcasts.
342 * The host must first write RMAC_CFG_KEY "key"
343 * register, and then - MAC_CFG register.
344 */
345 void
xge_hal_device_bcast_enable(xge_hal_device_h devh)346 xge_hal_device_bcast_enable(xge_hal_device_h devh)
347 {
348 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
349 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
350 u64 val64;
351
352 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
353 &bar0->mac_cfg);
354 val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE;
355
356 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
357 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
358
359 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
360 (u32)(val64 >> 32), &bar0->mac_cfg);
361
362 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
363 (unsigned long long)val64,
364 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
365 }
366
367 /*
368 * xge_hal_device_bcast_disable
369 * @hldev: HAL device handle.
370 *
371 * Disable receiving broadcasts.
372 * The host must first write RMAC_CFG_KEY "key"
373 * register, and then - MAC_CFG register.
374 */
375 void
xge_hal_device_bcast_disable(xge_hal_device_h devh)376 xge_hal_device_bcast_disable(xge_hal_device_h devh)
377 {
378 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
379 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
380 u64 val64;
381
382 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
383 &bar0->mac_cfg);
384
385 val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE);
386 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
387 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
388
389 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
390 (u32)(val64 >> 32), &bar0->mac_cfg);
391
392 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
393 (unsigned long long)val64,
394 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
395 }
396
397 /*
398 * __hal_device_shared_splits_configure
399 * @hldev: HAL device handle.
400 *
401 * TxDMA will stop Read request if the number of read split had exceeded
402 * the limit set by shared_splits
403 */
404 static void
__hal_device_shared_splits_configure(xge_hal_device_t * hldev)405 __hal_device_shared_splits_configure(xge_hal_device_t *hldev)
406 {
407 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
408 u64 val64;
409
410 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
411 &bar0->pic_control);
412 val64 |=
413 XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits);
414 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
415 &bar0->pic_control);
416 xge_debug_device(XGE_TRACE, "%s", "shared splits configured");
417 }
418
419 /*
420 * __hal_device_rmac_padding_configure
421 * @hldev: HAL device handle.
422 *
423 * Configure RMAC frame padding. Depends on configuration, it
424 * can be send to host or removed by MAC.
425 */
426 static void
__hal_device_rmac_padding_configure(xge_hal_device_t * hldev)427 __hal_device_rmac_padding_configure(xge_hal_device_t *hldev)
428 {
429 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
430 u64 val64;
431
432 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
433 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
434 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
435 &bar0->mac_cfg);
436 val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE );
437 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE );
438 val64 |= XGE_HAL_MAC_CFG_TMAC_APPEND_PAD;
439
440 /*
441 * If the RTH enable bit is not set, strip the FCS
442 */
443 if (!hldev->config.rth_en ||
444 !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
445 &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) {
446 val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS;
447 }
448
449 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD );
450 val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM;
451
452 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
453 (u32)(val64 >> 32), (char*)&bar0->mac_cfg);
454 xge_os_mdelay(1);
455
456 xge_debug_device(XGE_TRACE,
457 "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured",
458 (unsigned long long)val64);
459 }
460
461 /*
462 * __hal_device_pause_frames_configure
463 * @hldev: HAL device handle.
464 *
465 * Set Pause threshold.
466 *
467 * Pause frame is generated if the amount of data outstanding
468 * on any queue exceeded the ratio of
469 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
470 */
471 static void
__hal_device_pause_frames_configure(xge_hal_device_t * hldev)472 __hal_device_pause_frames_configure(xge_hal_device_t *hldev)
473 {
474 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
475 int i;
476 u64 val64;
477
478 switch (hldev->config.mac.media) {
479 case XGE_HAL_MEDIA_SR:
480 case XGE_HAL_MEDIA_SW:
481 val64=0xfffbfffbfffbfffbULL;
482 break;
483 case XGE_HAL_MEDIA_LR:
484 case XGE_HAL_MEDIA_LW:
485 val64=0xffbbffbbffbbffbbULL;
486 break;
487 case XGE_HAL_MEDIA_ER:
488 case XGE_HAL_MEDIA_EW:
489 default:
490 val64=0xffbbffbbffbbffbbULL;
491 break;
492 }
493
494 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
495 val64, &bar0->mc_pause_thresh_q0q3);
496 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
497 val64, &bar0->mc_pause_thresh_q4q7);
498
499 /* Set the time value to be inserted in the pause frame generated
500 * by Xframe */
501 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
502 &bar0->rmac_pause_cfg);
503 if (hldev->config.mac.rmac_pause_gen_en)
504 val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN;
505 else
506 val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN);
507 if (hldev->config.mac.rmac_pause_rcv_en)
508 val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN;
509 else
510 val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN);
511 val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff));
512 val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time);
513 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
514 &bar0->rmac_pause_cfg);
515
516 val64 = 0;
517 for (i = 0; i<4; i++) {
518 val64 |=
519 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3)
520 <<(i*2*8));
521 }
522 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
523 &bar0->mc_pause_thresh_q0q3);
524
525 val64 = 0;
526 for (i = 0; i<4; i++) {
527 val64 |=
528 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7)
529 <<(i*2*8));
530 }
531 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
532 &bar0->mc_pause_thresh_q4q7);
533 xge_debug_device(XGE_TRACE, "%s", "pause frames configured");
534 }
535
536 /*
537 * Herc's clock rate doubled, unless the slot is 33MHz.
538 */
__hal_fix_time_ival_herc(xge_hal_device_t * hldev,unsigned int time_ival)539 unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev,
540 unsigned int time_ival)
541 {
542 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
543 return time_ival;
544
545 xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC);
546
547 if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN &&
548 hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ)
549 time_ival *= 2;
550
551 return time_ival;
552 }
553
554
555 /*
556 * __hal_device_bus_master_disable
557 * @hldev: HAL device handle.
558 *
559 * Disable bus mastership.
560 */
561 static void
__hal_device_bus_master_disable(xge_hal_device_t * hldev)562 __hal_device_bus_master_disable (xge_hal_device_t *hldev)
563 {
564 u16 cmd;
565 u16 bus_master = 4;
566
567 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
568 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
569 cmd &= ~bus_master;
570 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
571 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
572 }
573
574 /*
575 * __hal_device_bus_master_enable
576 * @hldev: HAL device handle.
577 *
578 * Disable bus mastership.
579 */
580 static void
__hal_device_bus_master_enable(xge_hal_device_t * hldev)581 __hal_device_bus_master_enable (xge_hal_device_t *hldev)
582 {
583 u16 cmd;
584 u16 bus_master = 4;
585
586 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
587 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
588
589 /* already enabled? do nothing */
590 if (cmd & bus_master)
591 return;
592
593 cmd |= bus_master;
594 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
595 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
596 }
597 /*
598 * __hal_device_intr_mgmt
599 * @hldev: HAL device handle.
600 * @mask: mask indicating which Intr block must be modified.
601 * @flag: if true - enable, otherwise - disable interrupts.
602 *
603 * Disable or enable device interrupts. Mask is used to specify
604 * which hardware blocks should produce interrupts. For details
605 * please refer to Xframe User Guide.
606 */
607 static void
__hal_device_intr_mgmt(xge_hal_device_t * hldev,u64 mask,int flag)608 __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag)
609 {
610 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
611 u64 val64 = 0, temp64 = 0;
612 u64 gim, gim_saved;
613
614 gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev,
615 hldev->regh0, &bar0->general_int_mask);
616
617 /* Top level interrupt classification */
618 /* PIC Interrupts */
619 if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) {
620 /* Enable PIC Intrs in the general intr mask register */
621 val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/;
622 if (flag) {
623 gim &= ~((u64) val64);
624 temp64 = xge_os_pio_mem_read64(hldev->pdev,
625 hldev->regh0, &bar0->pic_int_mask);
626
627 temp64 &= ~XGE_HAL_PIC_INT_TX;
628 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
629 if (xge_hal_device_check_id(hldev) ==
630 XGE_HAL_CARD_HERC) {
631 temp64 &= ~XGE_HAL_PIC_INT_MISC;
632 }
633 #endif
634 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
635 temp64, &bar0->pic_int_mask);
636 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
637 if (xge_hal_device_check_id(hldev) ==
638 XGE_HAL_CARD_HERC) {
639 /*
640 * Unmask only Link Up interrupt
641 */
642 temp64 = xge_os_pio_mem_read64(hldev->pdev,
643 hldev->regh0, &bar0->misc_int_mask);
644 temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
645 xge_os_pio_mem_write64(hldev->pdev,
646 hldev->regh0, temp64,
647 &bar0->misc_int_mask);
648 xge_debug_device(XGE_TRACE,
649 "unmask link up flag "XGE_OS_LLXFMT,
650 (unsigned long long)temp64);
651 }
652 #endif
653 } else { /* flag == 0 */
654
655 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
656 if (xge_hal_device_check_id(hldev) ==
657 XGE_HAL_CARD_HERC) {
658 /*
659 * Mask both Link Up and Down interrupts
660 */
661 temp64 = xge_os_pio_mem_read64(hldev->pdev,
662 hldev->regh0, &bar0->misc_int_mask);
663 temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
664 temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
665 xge_os_pio_mem_write64(hldev->pdev,
666 hldev->regh0, temp64,
667 &bar0->misc_int_mask);
668 xge_debug_device(XGE_TRACE,
669 "mask link up/down flag "XGE_OS_LLXFMT,
670 (unsigned long long)temp64);
671 }
672 #endif
673 /* Disable PIC Intrs in the general intr mask
674 * register */
675 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
676 XGE_HAL_ALL_INTRS_DIS,
677 &bar0->pic_int_mask);
678 gim |= val64;
679 }
680 }
681
682 /* DMA Interrupts */
683 /* Enabling/Disabling Tx DMA interrupts */
684 if (mask & XGE_HAL_TX_DMA_INTR) {
685 /* Enable TxDMA Intrs in the general intr mask register */
686 val64 = XGE_HAL_TXDMA_INT_M;
687 if (flag) {
688 gim &= ~((u64) val64);
689 /* Enable all TxDMA interrupts */
690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
691 0x0, &bar0->txdma_int_mask);
692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
693 0x0, &bar0->pfc_err_mask);
694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
695 0x0, &bar0->tda_err_mask);
696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
697 0x0, &bar0->pcc_err_mask);
698 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
699 0x0, &bar0->tti_err_mask);
700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
701 0x0, &bar0->lso_err_mask);
702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
703 0x0, &bar0->tpa_err_mask);
704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
705 0x0, &bar0->sm_err_mask);
706
707 } else { /* flag == 0 */
708
709 /* Disable TxDMA Intrs in the general intr mask
710 * register */
711 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
712 XGE_HAL_ALL_INTRS_DIS,
713 &bar0->txdma_int_mask);
714 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
715 XGE_HAL_ALL_INTRS_DIS,
716 &bar0->pfc_err_mask);
717
718 gim |= val64;
719 }
720 }
721
722 /* Enabling/Disabling Rx DMA interrupts */
723 if (mask & XGE_HAL_RX_DMA_INTR) {
724 /* Enable RxDMA Intrs in the general intr mask register */
725 val64 = XGE_HAL_RXDMA_INT_M;
726 if (flag) {
727
728 gim &= ~((u64) val64);
729 /* All RxDMA block interrupts are disabled for now
730 * TODO */
731 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
732 XGE_HAL_ALL_INTRS_DIS,
733 &bar0->rxdma_int_mask);
734
735 } else { /* flag == 0 */
736
737 /* Disable RxDMA Intrs in the general intr mask
738 * register */
739 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
740 XGE_HAL_ALL_INTRS_DIS,
741 &bar0->rxdma_int_mask);
742
743 gim |= val64;
744 }
745 }
746
747 /* MAC Interrupts */
748 /* Enabling/Disabling MAC interrupts */
749 if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) {
750 val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M;
751 if (flag) {
752
753 gim &= ~((u64) val64);
754
755 /* All MAC block error inter. are disabled for now. */
756 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
757 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
758 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
759 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
760
761 } else { /* flag == 0 */
762
763 /* Disable MAC Intrs in the general intr mask
764 * register */
765 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
766 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
767 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
768 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
769
770 gim |= val64;
771 }
772 }
773
774 /* XGXS Interrupts */
775 if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) {
776 val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M;
777 if (flag) {
778
779 gim &= ~((u64) val64);
780 /* All XGXS block error interrupts are disabled for now
781 * TODO */
782 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
783 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
784
785 } else { /* flag == 0 */
786
787 /* Disable MC Intrs in the general intr mask register */
788 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
789 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
790
791 gim |= val64;
792 }
793 }
794
795 /* Memory Controller(MC) interrupts */
796 if (mask & XGE_HAL_MC_INTR) {
797 val64 = XGE_HAL_MC_INT_M;
798 if (flag) {
799
800 gim &= ~((u64) val64);
801
802 /* Enable all MC blocks error interrupts */
803 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
804 0x0ULL, &bar0->mc_int_mask);
805
806 } else { /* flag == 0 */
807
808 /* Disable MC Intrs in the general intr mask
809 * register */
810 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
811 XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask);
812
813 gim |= val64;
814 }
815 }
816
817
818 /* Tx traffic interrupts */
819 if (mask & XGE_HAL_TX_TRAFFIC_INTR) {
820 val64 = XGE_HAL_TXTRAFFIC_INT_M;
821 if (flag) {
822
823 gim &= ~((u64) val64);
824
825 /* Enable all the Tx side interrupts */
826 /* '0' Enables all 64 TX interrupt levels. */
827 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
828 &bar0->tx_traffic_mask);
829
830 } else { /* flag == 0 */
831
832 /* Disable Tx Traffic Intrs in the general intr mask
833 * register. */
834 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
835 XGE_HAL_ALL_INTRS_DIS,
836 &bar0->tx_traffic_mask);
837 gim |= val64;
838 }
839 }
840
841 /* Rx traffic interrupts */
842 if (mask & XGE_HAL_RX_TRAFFIC_INTR) {
843 val64 = XGE_HAL_RXTRAFFIC_INT_M;
844 if (flag) {
845 gim &= ~((u64) val64);
846 /* '0' Enables all 8 RX interrupt levels. */
847 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
848 &bar0->rx_traffic_mask);
849
850 } else { /* flag == 0 */
851
852 /* Disable Rx Traffic Intrs in the general intr mask
853 * register.
854 */
855 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
856 XGE_HAL_ALL_INTRS_DIS,
857 &bar0->rx_traffic_mask);
858
859 gim |= val64;
860 }
861 }
862
863 /* Sched Timer interrupt */
864 if (mask & XGE_HAL_SCHED_INTR) {
865 if (flag) {
866 temp64 = xge_os_pio_mem_read64(hldev->pdev,
867 hldev->regh0, &bar0->txpic_int_mask);
868 temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR;
869 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
870 temp64, &bar0->txpic_int_mask);
871
872 xge_hal_device_sched_timer(hldev,
873 hldev->config.sched_timer_us,
874 hldev->config.sched_timer_one_shot);
875 } else {
876 temp64 = xge_os_pio_mem_read64(hldev->pdev,
877 hldev->regh0, &bar0->txpic_int_mask);
878 temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR;
879
880 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
881 temp64, &bar0->txpic_int_mask);
882
883 xge_hal_device_sched_timer(hldev,
884 XGE_HAL_SCHED_TIMER_DISABLED,
885 XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE);
886 }
887 }
888
889 if (gim != gim_saved) {
890 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim,
891 &bar0->general_int_mask);
892 xge_debug_device(XGE_TRACE, "general_int_mask updated "
893 XGE_OS_LLXFMT" => "XGE_OS_LLXFMT,
894 (unsigned long long)gim_saved, (unsigned long long)gim);
895 }
896 }
897
898 /*
899 * __hal_device_bimodal_configure
900 * @hldev: HAL device handle.
901 *
902 * Bimodal parameters initialization.
903 */
904 static void
__hal_device_bimodal_configure(xge_hal_device_t * hldev)905 __hal_device_bimodal_configure(xge_hal_device_t *hldev)
906 {
907 int i;
908
909 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
910 xge_hal_tti_config_t *tti;
911 xge_hal_rti_config_t *rti;
912
913 if (!hldev->config.ring.queue[i].configured)
914 continue;
915 rti = &hldev->config.ring.queue[i].rti;
916 tti = &hldev->bimodal_tti[i];
917
918 tti->enabled = 1;
919 tti->urange_a = hldev->bimodal_urange_a_en * 10;
920 tti->urange_b = 20;
921 tti->urange_c = 30;
922 tti->ufc_a = hldev->bimodal_urange_a_en * 8;
923 tti->ufc_b = 16;
924 tti->ufc_c = 32;
925 tti->ufc_d = 64;
926 tti->timer_val_us = hldev->bimodal_timer_val_us;
927 tti->timer_ac_en = 1;
928 tti->timer_ci_en = 0;
929
930 rti->urange_a = 10;
931 rti->urange_b = 20;
932 rti->urange_c = 30;
933 rti->ufc_a = 1; /* <= for netpipe type of tests */
934 rti->ufc_b = 4;
935 rti->ufc_c = 4;
936 rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */
937 rti->timer_ac_en = 1;
938 rti->timer_val_us = 5; /* for optimal bus efficiency usage */
939 }
940 }
941
942 /*
943 * __hal_device_tti_apply
944 * @hldev: HAL device handle.
945 *
946 * apply TTI configuration.
947 */
948 static xge_hal_status_e
__hal_device_tti_apply(xge_hal_device_t * hldev,xge_hal_tti_config_t * tti,int num,int runtime)949 __hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti,
950 int num, int runtime)
951 {
952 u64 val64, data1 = 0, data2 = 0;
953 xge_hal_pci_bar0_t *bar0;
954
955 if (runtime)
956 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
957 else
958 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
959
960 if (tti->timer_val_us) {
961 unsigned int tx_interval;
962
963 if (hldev->config.pci_freq_mherz) {
964 tx_interval = hldev->config.pci_freq_mherz *
965 tti->timer_val_us / 64;
966 tx_interval =
967 __hal_fix_time_ival_herc(hldev,
968 tx_interval);
969 } else {
970 tx_interval = tti->timer_val_us;
971 }
972 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval);
973 if (tti->timer_ac_en) {
974 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN;
975 }
976 if (tti->timer_ci_en) {
977 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN;
978 }
979
980 if (!runtime) {
981 xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s",
982 num, tx_interval, tti->timer_ci_en ?
983 "enabled": "disabled");
984 }
985 }
986
987 if (tti->urange_a ||
988 tti->urange_b ||
989 tti->urange_c ||
990 tti->ufc_a ||
991 tti->ufc_b ||
992 tti->ufc_c ||
993 tti->ufc_d ) {
994 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) |
995 XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) |
996 XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c);
997
998 data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) |
999 XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) |
1000 XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) |
1001 XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d);
1002 }
1003
1004 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1005 data1, &bar0->tti_data1_mem);
1006 (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1007 &bar0->tti_data1_mem);
1008 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1009 data2, &bar0->tti_data2_mem);
1010 (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1011 &bar0->tti_data2_mem);
1012 xge_os_wmb();
1013
1014 val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD |
1015 XGE_HAL_TTI_CMD_MEM_OFFSET(num);
1016 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1017 &bar0->tti_command_mem);
1018
1019 if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem,
1020 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD,
1021 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1022 /* upper layer may require to repeat */
1023 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1024 }
1025
1026 if (!runtime) {
1027 xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x"
1028 XGE_OS_LLXFMT, num,
1029 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
1030 hldev->regh0, &bar0->tti_data1_mem));
1031 }
1032
1033 return XGE_HAL_OK;
1034 }
1035
1036 /*
1037 * __hal_device_tti_configure
1038 * @hldev: HAL device handle.
1039 *
1040 * TTI Initialization.
1041 * Initialize Transmit Traffic Interrupt Scheme.
1042 */
1043 static xge_hal_status_e
__hal_device_tti_configure(xge_hal_device_t * hldev,int runtime)1044 __hal_device_tti_configure(xge_hal_device_t *hldev, int runtime)
1045 {
1046 int i;
1047
1048 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
1049 int j;
1050
1051 if (!hldev->config.fifo.queue[i].configured)
1052 continue;
1053
1054 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
1055 xge_hal_status_e status;
1056
1057 if (!hldev->config.fifo.queue[i].tti[j].enabled)
1058 continue;
1059
1060 /* at least some TTI enabled. Record it. */
1061 hldev->tti_enabled = 1;
1062
1063 status = __hal_device_tti_apply(hldev,
1064 &hldev->config.fifo.queue[i].tti[j],
1065 i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime);
1066 if (status != XGE_HAL_OK)
1067 return status;
1068 }
1069 }
1070
1071 /* processing bimodal TTIs */
1072 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
1073 xge_hal_status_e status;
1074
1075 if (!hldev->bimodal_tti[i].enabled)
1076 continue;
1077
1078 /* at least some bimodal TTI enabled. Record it. */
1079 hldev->tti_enabled = 1;
1080
1081 status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i],
1082 XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime);
1083 if (status != XGE_HAL_OK)
1084 return status;
1085
1086 }
1087
1088 return XGE_HAL_OK;
1089 }
1090
1091 /*
1092 * __hal_device_rti_configure
1093 * @hldev: HAL device handle.
1094 *
1095 * RTI Initialization.
1096 * Initialize Receive Traffic Interrupt Scheme.
1097 */
1098 xge_hal_status_e
__hal_device_rti_configure(xge_hal_device_t * hldev,int runtime)1099 __hal_device_rti_configure(xge_hal_device_t *hldev, int runtime)
1100 {
1101 xge_hal_pci_bar0_t *bar0;
1102 u64 val64, data1 = 0, data2 = 0;
1103 int i;
1104
1105 if (runtime) {
1106 /*
1107 * we don't want to re-configure RTI in case when
1108 * bimodal interrupts are in use. Instead reconfigure TTI
1109 * with new RTI values.
1110 */
1111 if (hldev->config.bimodal_interrupts) {
1112 __hal_device_bimodal_configure(hldev);
1113 return __hal_device_tti_configure(hldev, 1);
1114 }
1115 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
1116 } else
1117 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1118
1119 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
1120 xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti;
1121
1122 if (!hldev->config.ring.queue[i].configured)
1123 continue;
1124
1125 if (rti->timer_val_us) {
1126 unsigned int rx_interval;
1127
1128 if (hldev->config.pci_freq_mherz) {
1129 rx_interval = hldev->config.pci_freq_mherz *
1130 rti->timer_val_us / 8;
1131 rx_interval =
1132 __hal_fix_time_ival_herc(hldev,
1133 rx_interval);
1134 } else {
1135 rx_interval = rti->timer_val_us;
1136 }
1137 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval);
1138 if (rti->timer_ac_en) {
1139 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN;
1140 }
1141 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN;
1142 }
1143
1144 if (rti->urange_a ||
1145 rti->urange_b ||
1146 rti->urange_c ||
1147 rti->ufc_a ||
1148 rti->ufc_b ||
1149 rti->ufc_c ||
1150 rti->ufc_d) {
1151 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) |
1152 XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) |
1153 XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c);
1154
1155 data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) |
1156 XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) |
1157 XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) |
1158 XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d);
1159 }
1160
1161 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1162 data1, &bar0->rti_data1_mem);
1163 (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1164 &bar0->rti_data1_mem);
1165 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1166 data2, &bar0->rti_data2_mem);
1167 (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1168 &bar0->rti_data2_mem);
1169
1170 xge_os_wmb();
1171
1172 val64 = XGE_HAL_RTI_CMD_MEM_WE |
1173 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD;
1174 val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i);
1175 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1176 &bar0->rti_command_mem);
1177
1178 if (!runtime && __hal_device_register_poll(hldev,
1179 &bar0->rti_command_mem, 0,
1180 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD,
1181 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1182 /* upper layer may require to repeat */
1183 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1184 }
1185
1186 if (!runtime) {
1187 xge_debug_device(XGE_TRACE,
1188 "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT,
1189 i,
1190 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
1191 hldev->regh0, &bar0->rti_data1_mem));
1192 }
1193 }
1194
1195 return XGE_HAL_OK;
1196 }
1197
1198
1199 /* Constants to be programmed into the Xena's registers to configure
1200 * the XAUI. */
1201 static u64 default_xena_mdio_cfg[] = {
1202 /* Reset PMA PLL */
1203 0xC001010000000000ULL, 0xC0010100000000E0ULL,
1204 0xC0010100008000E4ULL,
1205 /* Remove Reset from PMA PLL */
1206 0xC001010000000000ULL, 0xC0010100000000E0ULL,
1207 0xC0010100000000E4ULL,
1208 END_SIGN
1209 };
1210
1211 static u64 default_herc_mdio_cfg[] = {
1212 END_SIGN
1213 };
1214
1215 static u64 default_xena_dtx_cfg[] = {
1216 0x8000051500000000ULL, 0x80000515000000E0ULL,
1217 0x80000515D93500E4ULL, 0x8001051500000000ULL,
1218 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
1219 0x8002051500000000ULL, 0x80020515000000E0ULL,
1220 0x80020515F21000E4ULL,
1221 /* Set PADLOOPBACKN */
1222 0x8002051500000000ULL, 0x80020515000000E0ULL,
1223 0x80020515B20000E4ULL, 0x8003051500000000ULL,
1224 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
1225 0x8004051500000000ULL, 0x80040515000000E0ULL,
1226 0x80040515B20000E4ULL, 0x8005051500000000ULL,
1227 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
1228 SWITCH_SIGN,
1229 /* Remove PADLOOPBACKN */
1230 0x8002051500000000ULL, 0x80020515000000E0ULL,
1231 0x80020515F20000E4ULL, 0x8003051500000000ULL,
1232 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
1233 0x8004051500000000ULL, 0x80040515000000E0ULL,
1234 0x80040515F20000E4ULL, 0x8005051500000000ULL,
1235 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
1236 END_SIGN
1237 };
1238
1239 /*
1240 static u64 default_herc_dtx_cfg[] = {
1241 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
1242 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
1243 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
1244 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
1245 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
1246 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1247 END_SIGN
1248 };
1249 */
1250
1251 static u64 default_herc_dtx_cfg[] = {
1252 0x8000051536750000ULL, 0x80000515367500E0ULL,
1253 0x8000051536750004ULL, 0x80000515367500E4ULL,
1254
1255 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
1256 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
1257
1258 0x801205150D440000ULL, 0x801205150D4400E0ULL,
1259 0x801205150D440004ULL, 0x801205150D4400E4ULL,
1260
1261 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
1262 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1263 END_SIGN
1264 };
1265
1266
1267 void
__hal_serial_mem_write64(xge_hal_device_t * hldev,u64 value,u64 * reg)1268 __hal_serial_mem_write64(xge_hal_device_t *hldev, u64 value, u64 *reg)
1269 {
1270 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
1271 (u32)(value>>32), reg);
1272 xge_os_wmb();
1273 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
1274 (u32)value, reg);
1275 xge_os_wmb();
1276 xge_os_mdelay(1);
1277 }
1278
1279 u64
__hal_serial_mem_read64(xge_hal_device_t * hldev,u64 * reg)1280 __hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg)
1281 {
1282 u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1283 reg);
1284 xge_os_mdelay(1);
1285 return val64;
1286 }
1287
1288 /*
1289 * __hal_device_xaui_configure
1290 * @hldev: HAL device handle.
1291 *
1292 * Configure XAUI Interface of Xena.
1293 *
1294 * To Configure the Xena's XAUI, one has to write a series
1295 * of 64 bit values into two registers in a particular
1296 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
1297 * which will be defined in the array of configuration values
1298 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
1299 * to switch writing from one regsiter to another. We continue
1300 * writing these values until we encounter the 'END_SIGN' macro.
1301 * For example, After making a series of 21 writes into
1302 * dtx_control register the 'SWITCH_SIGN' appears and hence we
1303 * start writing into mdio_control until we encounter END_SIGN.
1304 */
1305 static void
__hal_device_xaui_configure(xge_hal_device_t * hldev)1306 __hal_device_xaui_configure(xge_hal_device_t *hldev)
1307 {
1308 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1309 int mdio_cnt = 0, dtx_cnt = 0;
1310 u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL;
1311
1312 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
1313 default_dtx_cfg = default_xena_dtx_cfg;
1314 default_mdio_cfg = default_xena_mdio_cfg;
1315 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
1316 default_dtx_cfg = default_herc_dtx_cfg;
1317 default_mdio_cfg = default_herc_mdio_cfg;
1318 } else {
1319 xge_assert(default_dtx_cfg);
1320 return;
1321 }
1322
1323 do {
1324 dtx_cfg:
1325 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
1326 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
1327 dtx_cnt++;
1328 goto mdio_cfg;
1329 }
1330 __hal_serial_mem_write64(hldev, default_dtx_cfg[dtx_cnt],
1331 &bar0->dtx_control);
1332 dtx_cnt++;
1333 }
1334 mdio_cfg:
1335 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
1336 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
1337 mdio_cnt++;
1338 goto dtx_cfg;
1339 }
1340 __hal_serial_mem_write64(hldev, default_mdio_cfg[mdio_cnt],
1341 &bar0->mdio_control);
1342 mdio_cnt++;
1343 }
1344 } while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
1345 (default_mdio_cfg[mdio_cnt] == END_SIGN)) );
1346
1347 xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured");
1348 }
1349
1350 /*
1351 * __hal_device_mac_link_util_set
1352 * @hldev: HAL device handle.
1353 *
1354 * Set sampling rate to calculate link utilization.
1355 */
1356 static void
__hal_device_mac_link_util_set(xge_hal_device_t * hldev)1357 __hal_device_mac_link_util_set(xge_hal_device_t *hldev)
1358 {
1359 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1360 u64 val64;
1361
1362 val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL(
1363 hldev->config.mac.tmac_util_period) |
1364 XGE_HAL_MAC_RX_LINK_UTIL_VAL(
1365 hldev->config.mac.rmac_util_period);
1366 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1367 &bar0->mac_link_util);
1368 xge_debug_device(XGE_TRACE, "%s",
1369 "bandwidth link utilization configured");
1370 }
1371
1372 /*
1373 * __hal_device_set_swapper
1374 * @hldev: HAL device handle.
1375 *
1376 * Set the Xframe's byte "swapper" in accordance with
1377 * endianness of the host.
1378 */
1379 xge_hal_status_e
__hal_device_set_swapper(xge_hal_device_t * hldev)1380 __hal_device_set_swapper(xge_hal_device_t *hldev)
1381 {
1382 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1383 u64 val64;
1384
1385 /*
1386 * from 32bit errarta:
1387 *
1388 * The SWAPPER_CONTROL register determines how the adapter accesses
1389 * host memory as well as how it responds to read and write requests
1390 * from the host system. Writes to this register should be performed
1391 * carefully, since the byte swappers could reverse the order of bytes.
1392 * When configuring this register keep in mind that writes to the PIF
1393 * read and write swappers could reverse the order of the upper and
1394 * lower 32-bit words. This means that the driver may have to write
1395 * to the upper 32 bits of the SWAPPER_CONTROL twice in order to
1396 * configure the entire register. */
1397
1398 /*
1399 * The device by default set to a big endian format, so a big endian
1400 * driver need not set anything.
1401 */
1402
1403 #if defined(XGE_HAL_CUSTOM_HW_SWAPPER)
1404
1405 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1406 0xffffffffffffffffULL, &bar0->swapper_ctrl);
1407
1408 val64 = XGE_HAL_CUSTOM_HW_SWAPPER;
1409
1410 xge_os_wmb();
1411 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1412 &bar0->swapper_ctrl);
1413
1414 xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT,
1415 (unsigned long long)val64);
1416
1417 #elif !defined(XGE_OS_HOST_BIG_ENDIAN)
1418
1419 /*
1420 * Initially we enable all bits to make it accessible by the driver,
1421 * then we selectively enable only those bits that we want to set.
1422 * i.e. force swapper to swap for the first time since second write
1423 * will overwrite with the final settings.
1424 *
1425 * Use only for little endian platforms.
1426 */
1427 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1428 0xffffffffffffffffULL, &bar0->swapper_ctrl);
1429 xge_os_wmb();
1430 val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE |
1431 XGE_HAL_SWAPPER_CTRL_PIF_R_SE |
1432 XGE_HAL_SWAPPER_CTRL_PIF_W_FE |
1433 XGE_HAL_SWAPPER_CTRL_PIF_W_SE |
1434 XGE_HAL_SWAPPER_CTRL_RTH_FE |
1435 XGE_HAL_SWAPPER_CTRL_RTH_SE |
1436 XGE_HAL_SWAPPER_CTRL_TXP_FE |
1437 XGE_HAL_SWAPPER_CTRL_TXP_SE |
1438 XGE_HAL_SWAPPER_CTRL_TXD_R_FE |
1439 XGE_HAL_SWAPPER_CTRL_TXD_R_SE |
1440 XGE_HAL_SWAPPER_CTRL_TXD_W_FE |
1441 XGE_HAL_SWAPPER_CTRL_TXD_W_SE |
1442 XGE_HAL_SWAPPER_CTRL_TXF_R_FE |
1443 XGE_HAL_SWAPPER_CTRL_RXD_R_FE |
1444 XGE_HAL_SWAPPER_CTRL_RXD_R_SE |
1445 XGE_HAL_SWAPPER_CTRL_RXD_W_FE |
1446 XGE_HAL_SWAPPER_CTRL_RXD_W_SE |
1447 XGE_HAL_SWAPPER_CTRL_RXF_W_FE |
1448 XGE_HAL_SWAPPER_CTRL_XMSI_FE |
1449 XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE);
1450
1451 /*
1452 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
1453 val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE;
1454 } */
1455 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
1456 &bar0->swapper_ctrl);
1457 xge_os_wmb();
1458 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
1459 &bar0->swapper_ctrl);
1460 xge_os_wmb();
1461 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
1462 &bar0->swapper_ctrl);
1463 xge_debug_device(XGE_TRACE, "%s", "using little endian set");
1464 #endif
1465
1466 /* Verifying if endian settings are accurate by reading a feedback
1467 * register. */
1468 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1469 &bar0->pif_rd_swapper_fb);
1470 if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) {
1471 xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT,
1472 (unsigned long long) val64);
1473 return XGE_HAL_ERR_SWAPPER_CTRL;
1474 }
1475
1476 xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled");
1477
1478 return XGE_HAL_OK;
1479 }
1480
1481 /*
1482 * __hal_device_rts_mac_configure - Configure RTS steering based on
1483 * destination mac address.
1484 * @hldev: HAL device handle.
1485 *
1486 */
1487 xge_hal_status_e
__hal_device_rts_mac_configure(xge_hal_device_t * hldev)1488 __hal_device_rts_mac_configure(xge_hal_device_t *hldev)
1489 {
1490 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1491 u64 val64;
1492
1493 if (!hldev->config.rts_mac_en) {
1494 return XGE_HAL_OK;
1495 }
1496
1497 /*
1498 * Set the receive traffic steering mode from default(classic)
1499 * to enhanced.
1500 */
1501 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1502 &bar0->rts_ctrl);
1503 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1504 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1505 val64, &bar0->rts_ctrl);
1506 return XGE_HAL_OK;
1507 }
1508
1509 /*
1510 * __hal_device_rts_port_configure - Configure RTS steering based on
1511 * destination or source port number.
1512 * @hldev: HAL device handle.
1513 *
1514 */
1515 xge_hal_status_e
__hal_device_rts_port_configure(xge_hal_device_t * hldev)1516 __hal_device_rts_port_configure(xge_hal_device_t *hldev)
1517 {
1518 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1519 u64 val64;
1520 int rnum;
1521
1522 if (!hldev->config.rts_port_en) {
1523 return XGE_HAL_OK;
1524 }
1525
1526 /*
1527 * Set the receive traffic steering mode from default(classic)
1528 * to enhanced.
1529 */
1530 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1531 &bar0->rts_ctrl);
1532 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1533 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1534 val64, &bar0->rts_ctrl);
1535
1536 /*
1537 * Initiate port steering according to per-ring configuration
1538 */
1539 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) {
1540 int pnum;
1541 xge_hal_ring_queue_t *queue = &hldev->config.ring.queue[rnum];
1542
1543 if (!queue->configured || queue->rts_port_en)
1544 continue;
1545
1546 for (pnum = 0; pnum < XGE_HAL_MAX_STEERABLE_PORTS; pnum++) {
1547 xge_hal_rts_port_t *port = &queue->rts_ports[pnum];
1548
1549 /*
1550 * Skip and clear empty ports
1551 */
1552 if (!port->num) {
1553 /*
1554 * Clear CAM memory
1555 */
1556 xge_os_pio_mem_write64(hldev->pdev,
1557 hldev->regh0, 0ULL,
1558 &bar0->rts_pn_cam_data);
1559
1560 val64 = BIT(7) | BIT(15);
1561 } else {
1562 /*
1563 * Assign new Port values according
1564 * to configuration
1565 */
1566 val64 = vBIT(port->num,8,16) |
1567 vBIT(rnum,37,3) | BIT(63);
1568 if (port->src)
1569 val64 = BIT(47);
1570 if (!port->udp)
1571 val64 = BIT(7);
1572 xge_os_pio_mem_write64(hldev->pdev,
1573 hldev->regh0, val64,
1574 &bar0->rts_pn_cam_data);
1575
1576 val64 = BIT(7) | BIT(15) | vBIT(pnum,24,8);
1577 }
1578
1579 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1580 val64, &bar0->rts_pn_cam_ctrl);
1581
1582 /* poll until done */
1583 if (__hal_device_register_poll(hldev,
1584 &bar0->rts_pn_cam_ctrl, 0,
1585 XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED,
1586 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) !=
1587 XGE_HAL_OK) {
1588 /* upper layer may require to repeat */
1589 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1590 }
1591 }
1592 }
1593 return XGE_HAL_OK;
1594 }
1595
1596 /*
1597 * __hal_device_rts_qos_configure - Configure RTS steering based on
1598 * qos.
1599 * @hldev: HAL device handle.
1600 *
1601 */
1602 xge_hal_status_e
__hal_device_rts_qos_configure(xge_hal_device_t * hldev)1603 __hal_device_rts_qos_configure(xge_hal_device_t *hldev)
1604 {
1605 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1606 u64 val64;
1607 int j, rx_ring_num;
1608
1609 if (!hldev->config.rts_qos_en) {
1610 return XGE_HAL_OK;
1611 }
1612
1613 /* First clear the RTS_DS_MEM_DATA */
1614 val64 = 0;
1615 for (j = 0; j < 64; j++ )
1616 {
1617 /* First clear the value */
1618 val64 = XGE_HAL_RTS_DS_MEM_DATA(0);
1619
1620 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1621 &bar0->rts_ds_mem_data);
1622
1623 val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE |
1624 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
1625 XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j );
1626
1627 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1628 &bar0->rts_ds_mem_ctrl);
1629
1630
1631 /* poll until done */
1632 if (__hal_device_register_poll(hldev,
1633 &bar0->rts_ds_mem_ctrl, 0,
1634 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
1635 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1636 /* upper layer may require to repeat */
1637 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1638 }
1639
1640 }
1641
1642 rx_ring_num = 0;
1643 for (j = 0; j < XGE_HAL_MAX_RING_NUM; j++) {
1644 if (hldev->config.ring.queue[j].configured)
1645 rx_ring_num++;
1646 }
1647
1648 switch (rx_ring_num) {
1649 case 1:
1650 val64 = 0x0;
1651 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1652 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1653 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1654 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1655 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1656 break;
1657 case 2:
1658 val64 = 0x0001000100010001ULL;
1659 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1660 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1661 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1662 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1663 val64 = 0x0001000100000000ULL;
1664 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1665 break;
1666 case 3:
1667 val64 = 0x0001020001020001ULL;
1668 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1669 val64 = 0x0200010200010200ULL;
1670 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1671 val64 = 0x0102000102000102ULL;
1672 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1673 val64 = 0x0001020001020001ULL;
1674 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1675 val64 = 0x0200010200000000ULL;
1676 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1677 break;
1678 case 4:
1679 val64 = 0x0001020300010203ULL;
1680 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1681 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1682 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1683 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1684 val64 = 0x0001020300000000ULL;
1685 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1686 break;
1687 case 5:
1688 val64 = 0x0001020304000102ULL;
1689 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1690 val64 = 0x0304000102030400ULL;
1691 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1692 val64 = 0x0102030400010203ULL;
1693 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1694 val64 = 0x0400010203040001ULL;
1695 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1696 val64 = 0x0203040000000000ULL;
1697 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1698 break;
1699 case 6:
1700 val64 = 0x0001020304050001ULL;
1701 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1702 val64 = 0x0203040500010203ULL;
1703 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1704 val64 = 0x0405000102030405ULL;
1705 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1706 val64 = 0x0001020304050001ULL;
1707 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1708 val64 = 0x0203040500000000ULL;
1709 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1710 break;
1711 case 7:
1712 val64 = 0x0001020304050600ULL;
1713 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1714 val64 = 0x0102030405060001ULL;
1715 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1716 val64 = 0x0203040506000102ULL;
1717 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1718 val64 = 0x0304050600010203ULL;
1719 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1720 val64 = 0x0405060000000000ULL;
1721 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1722 break;
1723 case 8:
1724 val64 = 0x0001020304050607ULL;
1725 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1726 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1727 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1728 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1729 val64 = 0x0001020300000000ULL;
1730 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1731 break;
1732 }
1733
1734 return XGE_HAL_OK;
1735 }
1736
1737 /*
1738 * xge__hal_device_rts_mac_enable
1739 *
1740 * @devh: HAL device handle.
1741 * @index: index number where the MAC addr will be stored
1742 * @macaddr: MAC address
1743 *
1744 * - Enable RTS steering for the given MAC address. This function has to be
1745 * called with lock acquired.
1746 *
1747 * NOTE:
1748 * 1. ULD has to call this function with the index value which
1749 * statisfies the following condition:
1750 * ring_num = (index % 8)
1751 * 2.ULD also needs to make sure that the index is not
1752 * occupied by any MAC address. If that index has any MAC address
1753 * it will be overwritten and HAL will not check for it.
1754 *
1755 */
1756 xge_hal_status_e
xge_hal_device_rts_mac_enable(xge_hal_device_h devh,int index,macaddr_t macaddr)1757 xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr)
1758 {
1759 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
1760 xge_hal_status_e status;
1761
1762 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1763
1764 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
1765 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
1766
1767 if ( index >= max_addr )
1768 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
1769
1770 /*
1771 * Set the MAC address at the given location marked by index.
1772 */
1773 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
1774 if (status != XGE_HAL_OK) {
1775 xge_debug_device(XGE_ERR, "%s",
1776 "Not able to set the mac addr");
1777 return status;
1778 }
1779
1780 return xge_hal_device_rts_section_enable(hldev, index);
1781 }
1782
1783 /*
1784 * xge__hal_device_rts_mac_disable
1785 * @hldev: HAL device handle.
1786 * @index: index number where to disable the MAC addr
1787 *
1788 * Disable RTS Steering based on the MAC address.
1789 * This function should be called with lock acquired.
1790 *
1791 */
1792 xge_hal_status_e
xge_hal_device_rts_mac_disable(xge_hal_device_h devh,int index)1793 xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index)
1794 {
1795 xge_hal_status_e status;
1796 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1797 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
1798
1799 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1800
1801 xge_debug_ll(XGE_TRACE, "the index value is %d ", index);
1802
1803 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
1804 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
1805
1806 if ( index >= max_addr )
1807 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
1808
1809 /*
1810 * Disable MAC address @ given index location
1811 */
1812 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
1813 if (status != XGE_HAL_OK) {
1814 xge_debug_device(XGE_ERR, "%s",
1815 "Not able to set the mac addr");
1816 return status;
1817 }
1818
1819 return XGE_HAL_OK;
1820 }
1821
1822
1823 /*
1824 * __hal_device_rth_configure - Configure RTH for the device
1825 * @hldev: HAL device handle.
1826 *
1827 * Using IT (Indirection Table).
1828 */
1829 xge_hal_status_e
__hal_device_rth_it_configure(xge_hal_device_t * hldev)1830 __hal_device_rth_it_configure(xge_hal_device_t *hldev)
1831 {
1832 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1833 u64 val64;
1834 int rings[XGE_HAL_MAX_RING_NUM]={0};
1835 int rnum;
1836 int rmax;
1837 int buckets_num;
1838 int bucket;
1839
1840 if (!hldev->config.rth_en) {
1841 return XGE_HAL_OK;
1842 }
1843
1844 /*
1845 * Set the receive traffic steering mode from default(classic)
1846 * to enhanced.
1847 */
1848 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1849 &bar0->rts_ctrl);
1850 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1851 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1852 val64, &bar0->rts_ctrl);
1853
1854 buckets_num = (1 << hldev->config.rth_bucket_size);
1855
1856 rmax=0;
1857 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) {
1858 if (hldev->config.ring.queue[rnum].configured &&
1859 hldev->config.ring.queue[rnum].rth_en)
1860 rings[rmax++] = rnum;
1861 }
1862
1863 rnum = 0;
1864 /* for starters: fill in all the buckets with rings "equally" */
1865 for (bucket = 0; bucket < buckets_num; bucket++) {
1866
1867 if (rnum == rmax)
1868 rnum = 0;
1869
1870 /* write data */
1871 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
1872 XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]);
1873 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1874 &bar0->rts_rth_map_mem_data);
1875
1876 /* execute */
1877 val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
1878 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
1879 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket);
1880 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1881 &bar0->rts_rth_map_mem_ctrl);
1882
1883 /* poll until done */
1884 if (__hal_device_register_poll(hldev,
1885 &bar0->rts_rth_map_mem_ctrl, 0,
1886 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
1887 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1888 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1889 }
1890
1891 rnum++;
1892 }
1893
1894 val64 = XGE_HAL_RTS_RTH_EN;
1895 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size);
1896 val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN |
1897 XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN |
1898 XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN;
1899
1900 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1901 &bar0->rts_rth_cfg);
1902
1903 xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d",
1904 hldev->config.rth_bucket_size);
1905
1906 return XGE_HAL_OK;
1907 }
1908
1909
1910 /*
1911 * __hal_spdm_entry_add - Add a new entry to the SPDM table.
1912 *
1913 * Add a new entry to the SPDM table
1914 *
1915 * This function add a new entry to the SPDM table.
1916 *
1917 * Note:
1918 * This function should be called with spdm_lock.
1919 *
1920 * See also: xge_hal_spdm_entry_add , xge_hal_spdm_entry_remove.
1921 */
1922 static xge_hal_status_e
__hal_spdm_entry_add(xge_hal_device_t * hldev,xge_hal_ipaddr_t * src_ip,xge_hal_ipaddr_t * dst_ip,u16 l4_sp,u16 l4_dp,u8 is_tcp,u8 is_ipv4,u8 tgt_queue,u32 jhash_value,u16 spdm_entry)1923 __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip,
1924 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp,
1925 u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry)
1926 {
1927 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1928 u64 val64;
1929 u64 spdm_line_arr[8];
1930 u8 line_no;
1931
1932 /*
1933 * Clear the SPDM READY bit
1934 */
1935 val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
1936 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1937 &bar0->rxpic_int_reg);
1938
1939 xge_debug_device(XGE_TRACE,
1940 "L4 SP %x:DP %x: hash %x tgt_queue %d ",
1941 l4_sp, l4_dp, jhash_value, tgt_queue);
1942
1943 xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr));
1944
1945 /*
1946 * Construct the SPDM entry.
1947 */
1948 spdm_line_arr[0] = vBIT(l4_sp,0,16) |
1949 vBIT(l4_dp,16,32) |
1950 vBIT(tgt_queue,53,3) |
1951 vBIT(is_tcp,59,1) |
1952 vBIT(is_ipv4,63,1);
1953
1954
1955 if (is_ipv4) {
1956 spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) |
1957 vBIT(dst_ip->ipv4.addr,32,32);
1958
1959 } else {
1960 xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8);
1961 xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8);
1962 xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8);
1963 xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8);
1964 }
1965
1966 spdm_line_arr[7] = vBIT(jhash_value,0,32) |
1967 BIT(63); /* entry enable bit */
1968
1969 /*
1970 * Add the entry to the SPDM table
1971 */
1972 for(line_no = 0; line_no < 8; line_no++) {
1973 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1974 spdm_line_arr[line_no],
1975 (void *)((char *)hldev->spdm_mem_base +
1976 (spdm_entry * 64) +
1977 (line_no * 8)));
1978 }
1979
1980 /*
1981 * Wait for the operation to be completed.
1982 */
1983 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
1984 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
1985 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1986 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1987 }
1988
1989 /*
1990 * Add this information to a local SPDM table. The purpose of
1991 * maintaining a local SPDM table is to avoid a search in the
1992 * adapter SPDM table for spdm entry lookup which is very costly
1993 * in terms of time.
1994 */
1995 hldev->spdm_table[spdm_entry]->in_use = 1;
1996 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip,
1997 sizeof(xge_hal_ipaddr_t));
1998 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip,
1999 sizeof(xge_hal_ipaddr_t));
2000 hldev->spdm_table[spdm_entry]->l4_sp = l4_sp;
2001 hldev->spdm_table[spdm_entry]->l4_dp = l4_dp;
2002 hldev->spdm_table[spdm_entry]->is_tcp = is_tcp;
2003 hldev->spdm_table[spdm_entry]->is_ipv4 = is_ipv4;
2004 hldev->spdm_table[spdm_entry]->tgt_queue = tgt_queue;
2005 hldev->spdm_table[spdm_entry]->jhash_value = jhash_value;
2006 hldev->spdm_table[spdm_entry]->spdm_entry = spdm_entry;
2007
2008 return XGE_HAL_OK;
2009 }
2010
2011 /*
2012 * __hal_device_rth_spdm_configure - Configure RTH for the device
2013 * @hldev: HAL device handle.
2014 *
2015 * Using SPDM (Socket-Pair Direct Match).
2016 */
2017 xge_hal_status_e
__hal_device_rth_spdm_configure(xge_hal_device_t * hldev)2018 __hal_device_rth_spdm_configure(xge_hal_device_t *hldev)
2019 {
2020 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
2021 u64 val64;
2022 u8 spdm_bar_num;
2023 u32 spdm_bar_offset;
2024 int spdm_table_size;
2025 int i;
2026
2027 if (!hldev->config.rth_spdm_en) {
2028 return XGE_HAL_OK;
2029 }
2030
2031 /*
2032 * Retrieve the base address of SPDM Table.
2033 */
2034 val64 = xge_os_pio_mem_read64(hldev->pdev,
2035 hldev->regh0, &bar0->spdm_bir_offset);
2036
2037 spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64);
2038 spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64);
2039
2040
2041 /*
2042 * spdm_bar_num specifies the PCI bar num register used to
2043 * address the memory space. spdm_bar_offset specifies the offset
2044 * of the SPDM memory with in the bar num memory space.
2045 */
2046 switch (spdm_bar_num) {
2047 case 0:
2048 {
2049 hldev->spdm_mem_base = (char *)bar0 +
2050 (spdm_bar_offset * 8);
2051 break;
2052 }
2053 case 1:
2054 {
2055 char *bar1 = (char *)hldev->bar1;
2056 hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8);
2057 break;
2058 }
2059 default:
2060 xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1)));
2061 }
2062
2063 /*
2064 * Retrieve the size of SPDM table(number of entries).
2065 */
2066 val64 = xge_os_pio_mem_read64(hldev->pdev,
2067 hldev->regh0, &bar0->spdm_structure);
2068 hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64);
2069
2070
2071 spdm_table_size = hldev->spdm_max_entries *
2072 sizeof(xge_hal_spdm_entry_t);
2073 if (hldev->spdm_table == NULL) {
2074 void *mem;
2075
2076 /*
2077 * Allocate memory to hold the copy of SPDM table.
2078 */
2079 if ((hldev->spdm_table = (xge_hal_spdm_entry_t **)
2080 xge_os_malloc(
2081 hldev->pdev,
2082 (sizeof(xge_hal_spdm_entry_t *) *
2083 hldev->spdm_max_entries))) == NULL) {
2084 return XGE_HAL_ERR_OUT_OF_MEMORY;
2085 }
2086
2087 if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL)
2088 {
2089 xge_os_free(hldev->pdev, hldev->spdm_table,
2090 (sizeof(xge_hal_spdm_entry_t *) *
2091 hldev->spdm_max_entries));
2092 return XGE_HAL_ERR_OUT_OF_MEMORY;
2093 }
2094
2095 xge_os_memzero(mem, spdm_table_size);
2096 for (i = 0; i < hldev->spdm_max_entries; i++) {
2097 hldev->spdm_table[i] = (xge_hal_spdm_entry_t *)
2098 ((char *)mem +
2099 i * sizeof(xge_hal_spdm_entry_t));
2100 }
2101 xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev);
2102 } else {
2103 /*
2104 * We are here because the host driver tries to
2105 * do a soft reset on the device.
2106 * Since the device soft reset clears the SPDM table, copy
2107 * the entries from the local SPDM table to the actual one.
2108 */
2109 xge_os_spin_lock(&hldev->spdm_lock);
2110 for (i = 0; i < hldev->spdm_max_entries; i++) {
2111 xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i];
2112
2113 if (spdm_entry->in_use) {
2114 if (__hal_spdm_entry_add(hldev,
2115 &spdm_entry->src_ip,
2116 &spdm_entry->dst_ip,
2117 spdm_entry->l4_sp,
2118 spdm_entry->l4_dp,
2119 spdm_entry->is_tcp,
2120 spdm_entry->is_ipv4,
2121 spdm_entry->tgt_queue,
2122 spdm_entry->jhash_value,
2123 spdm_entry->spdm_entry)
2124 != XGE_HAL_OK) {
2125 /* Log an warning */
2126 xge_debug_device(XGE_ERR,
2127 "SPDM table update from local"
2128 " memory failed");
2129 }
2130 }
2131 }
2132 xge_os_spin_unlock(&hldev->spdm_lock);
2133 }
2134
2135 /*
2136 * Set the receive traffic steering mode from default(classic)
2137 * to enhanced.
2138 */
2139 val64 = xge_os_pio_mem_read64(hldev->pdev,
2140 hldev->regh0, &bar0->rts_ctrl);
2141 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
2142 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2143 val64, &bar0->rts_ctrl);
2144
2145 /*
2146 * We may not need to configure rts_rth_jhash_cfg register as the
2147 * default values are good enough to calculate the hash.
2148 */
2149
2150 /*
2151 * As of now, set all the rth mask registers to zero. TODO.
2152 */
2153 for(i = 0; i < 5; i++) {
2154 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2155 0, &bar0->rts_rth_hash_mask[i]);
2156 }
2157
2158 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2159 0, &bar0->rts_rth_hash_mask_5);
2160
2161 if (hldev->config.rth_spdm_use_l4) {
2162 val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4;
2163 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2164 val64, &bar0->rts_rth_status);
2165 }
2166
2167 val64 = XGE_HAL_RTS_RTH_EN;
2168 val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN;
2169 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2170 &bar0->rts_rth_cfg);
2171
2172
2173 return XGE_HAL_OK;
2174 }
2175
2176 /*
2177 * __hal_device_pci_init
2178 * @hldev: HAL device handle.
2179 *
2180 * Initialize certain PCI/PCI-X configuration registers
2181 * with recommended values. Save config space for future hw resets.
2182 */
2183 static void
__hal_device_pci_init(xge_hal_device_t * hldev)2184 __hal_device_pci_init(xge_hal_device_t *hldev)
2185 {
2186 int i, pcisize = 0;
2187 u16 cmd = 0;
2188 u8 val;
2189
2190 /* Store PCI device ID and revision for future references where in we
2191 * decide Xena revision using PCI sub system ID */
2192 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
2193 xge_offsetof(xge_hal_pci_config_le_t, device_id),
2194 &hldev->device_id);
2195 xge_os_pci_read8(hldev->pdev,hldev->cfgh,
2196 xge_offsetof(xge_hal_pci_config_le_t, revision),
2197 &hldev->revision);
2198
2199 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
2200 pcisize = XGE_HAL_PCISIZE_HERC;
2201 else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
2202 pcisize = XGE_HAL_PCISIZE_XENA;
2203
2204 /* save original PCI config space to restore it on device_terminate() */
2205 for (i = 0; i < pcisize; i++) {
2206 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
2207 (u32*)&hldev->pci_config_space_bios + i);
2208 }
2209
2210 /* Set the PErr Repconse bit and SERR in PCI command register. */
2211 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2212 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
2213 cmd |= 0x140;
2214 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2215 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
2216
2217 /* Set user spcecified value for the PCI Latency Timer */
2218 if (hldev->config.latency_timer &&
2219 hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) {
2220 xge_os_pci_write8(hldev->pdev, hldev->cfgh,
2221 xge_offsetof(xge_hal_pci_config_le_t,
2222 latency_timer),
2223 (u8)hldev->config.latency_timer);
2224 }
2225 /* Read back latency timer to reflect it into user level */
2226 xge_os_pci_read8(hldev->pdev, hldev->cfgh,
2227 xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val);
2228 hldev->config.latency_timer = val;
2229
2230 /* Enable Data Parity Error Recovery in PCI-X command register. */
2231 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2232 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2233 cmd |= 1;
2234 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2235 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
2236
2237 /* Set MMRB count in PCI-X command register. */
2238 if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) {
2239 cmd &= 0xFFF3;
2240 cmd |= hldev->config.mmrb_count << 2;
2241 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2242 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2243 cmd);
2244 }
2245 /* Read back MMRB count to reflect it into user level */
2246 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2247 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2248 &cmd);
2249 cmd &= 0x000C;
2250 hldev->config.mmrb_count = cmd>>2;
2251
2252 /* Setting Maximum outstanding splits based on system type. */
2253 if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) {
2254 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2255 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2256 &cmd);
2257 cmd &= 0xFF8F;
2258 cmd |= hldev->config.max_splits_trans << 4;
2259 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2260 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2261 cmd);
2262 }
2263
2264 /* Read back max split trans to reflect it into user level */
2265 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2266 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2267 cmd &= 0x0070;
2268 hldev->config.max_splits_trans = cmd>>4;
2269
2270 /* Forcibly disabling relaxed ordering capability of the card. */
2271 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2272 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2273 cmd &= 0xFFFD;
2274 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2275 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
2276
2277 /* save PCI config space for future resets */
2278 for (i = 0; i < pcisize; i++) {
2279 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
2280 (u32*)&hldev->pci_config_space + i);
2281 }
2282 }
2283
2284 /*
2285 * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency
2286 * and mode.
2287 * @devh: HAL device handle.
2288 * @pci_mode: pointer to a variable of enumerated type
2289 * xge_hal_pci_mode_e{}.
2290 * @bus_frequency: pointer to a variable of enumerated type
2291 * xge_hal_pci_bus_frequency_e{}.
2292 * @bus_width: pointer to a variable of enumerated type
2293 * xge_hal_pci_bus_width_e{}.
2294 *
2295 * Get pci mode, frequency, and PCI bus width.
2296 *
2297 * Returns: one of the xge_hal_status_e{} enumerated types.
2298 * XGE_HAL_OK - for success.
2299 * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card.
2300 * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card.
2301 *
2302 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
2303 */
2304 static xge_hal_status_e
__hal_device_pci_info_get(xge_hal_device_h devh,xge_hal_pci_mode_e * pci_mode,xge_hal_pci_bus_frequency_e * bus_frequency,xge_hal_pci_bus_width_e * bus_width)2305 __hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
2306 xge_hal_pci_bus_frequency_e *bus_frequency,
2307 xge_hal_pci_bus_width_e *bus_width)
2308 {
2309 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
2310 xge_hal_status_e rc_status = XGE_HAL_OK;
2311 xge_hal_card_e card_id = xge_hal_device_check_id (devh);
2312
2313 #ifdef XGE_HAL_HERC_EMULATION
2314 hldev->config.pci_freq_mherz =
2315 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2316 *bus_frequency =
2317 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2318 *pci_mode = XGE_HAL_PCI_66MHZ_MODE;
2319 #else
2320 if (card_id == XGE_HAL_CARD_HERC) {
2321 xge_hal_pci_bar0_t *bar0 =
2322 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2323 u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2324 &bar0->pci_info);
2325 if (XGE_HAL_PCI_32_BIT & pci_info)
2326 *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT;
2327 else
2328 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT;
2329 switch((pci_info & XGE_HAL_PCI_INFO)>>60)
2330 {
2331 case XGE_HAL_PCI_33MHZ_MODE:
2332 *bus_frequency =
2333 XGE_HAL_PCI_BUS_FREQUENCY_33MHZ;
2334 *pci_mode = XGE_HAL_PCI_33MHZ_MODE;
2335 break;
2336 case XGE_HAL_PCI_66MHZ_MODE:
2337 *bus_frequency =
2338 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2339 *pci_mode = XGE_HAL_PCI_66MHZ_MODE;
2340 break;
2341 case XGE_HAL_PCIX_M1_66MHZ_MODE:
2342 *bus_frequency =
2343 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2344 *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE;
2345 break;
2346 case XGE_HAL_PCIX_M1_100MHZ_MODE:
2347 *bus_frequency =
2348 XGE_HAL_PCI_BUS_FREQUENCY_100MHZ;
2349 *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE;
2350 break;
2351 case XGE_HAL_PCIX_M1_133MHZ_MODE:
2352 *bus_frequency =
2353 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2354 *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE;
2355 break;
2356 case XGE_HAL_PCIX_M2_66MHZ_MODE:
2357 *bus_frequency =
2358 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2359 *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE;
2360 break;
2361 case XGE_HAL_PCIX_M2_100MHZ_MODE:
2362 *bus_frequency =
2363 XGE_HAL_PCI_BUS_FREQUENCY_200MHZ;
2364 *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE;
2365 break;
2366 case XGE_HAL_PCIX_M2_133MHZ_MODE:
2367 *bus_frequency =
2368 XGE_HAL_PCI_BUS_FREQUENCY_266MHZ;
2369 *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE;
2370 break;
2371 case XGE_HAL_PCIX_M1_RESERVED:
2372 case XGE_HAL_PCIX_M1_66MHZ_NS:
2373 case XGE_HAL_PCIX_M1_100MHZ_NS:
2374 case XGE_HAL_PCIX_M1_133MHZ_NS:
2375 case XGE_HAL_PCIX_M2_RESERVED:
2376 case XGE_HAL_PCIX_533_RESERVED:
2377 default:
2378 rc_status = XGE_HAL_ERR_INVALID_PCI_INFO;
2379 xge_debug_device(XGE_ERR,
2380 "invalid pci info "XGE_OS_LLXFMT,
2381 (unsigned long long)pci_info);
2382 break;
2383 }
2384 if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO)
2385 xge_debug_device(XGE_TRACE, "PCI info: mode %d width "
2386 "%d frequency %d", *pci_mode, *bus_width,
2387 *bus_frequency);
2388 if (hldev->config.pci_freq_mherz ==
2389 XGE_HAL_DEFAULT_USE_HARDCODE) {
2390 hldev->config.pci_freq_mherz = *bus_frequency;
2391 }
2392 }
2393 /* for XENA, we report PCI mode, only. PCI bus frequency, and bus width
2394 * are set to unknown */
2395 else if (card_id == XGE_HAL_CARD_XENA) {
2396 u32 pcix_status;
2397 u8 dev_num, bus_num;
2398 /* initialize defaults for XENA */
2399 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
2400 *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
2401 xge_os_pci_read32(hldev->pdev, hldev->cfgh,
2402 xge_offsetof(xge_hal_pci_config_le_t, pcix_status),
2403 &pcix_status);
2404 dev_num = (u8)((pcix_status & 0xF8) >> 3);
2405 bus_num = (u8)((pcix_status & 0xFF00) >> 8);
2406 if (dev_num == 0 && bus_num == 0)
2407 *pci_mode = XGE_HAL_PCI_BASIC_MODE;
2408 else
2409 *pci_mode = XGE_HAL_PCIX_BASIC_MODE;
2410 xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode);
2411 if (hldev->config.pci_freq_mherz ==
2412 XGE_HAL_DEFAULT_USE_HARDCODE) {
2413 /*
2414 * There is no way to detect BUS frequency on Xena,
2415 * so, in case of automatic configuration we hopelessly
2416 * assume 133MHZ.
2417 */
2418 hldev->config.pci_freq_mherz =
2419 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2420 }
2421 } else if (card_id == XGE_HAL_CARD_TITAN) {
2422 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT;
2423 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_250MHZ;
2424 if (hldev->config.pci_freq_mherz ==
2425 XGE_HAL_DEFAULT_USE_HARDCODE) {
2426 hldev->config.pci_freq_mherz = *bus_frequency;
2427 }
2428 } else{
2429 rc_status = XGE_HAL_ERR_BAD_DEVICE_ID;
2430 xge_debug_device(XGE_ERR, "invalid device id %d", card_id);
2431 }
2432 #endif
2433
2434 return rc_status;
2435 }
2436
2437 /*
2438 * __hal_device_handle_link_up_ind
2439 * @hldev: HAL device handle.
2440 *
2441 * Link up indication handler. The function is invoked by HAL when
2442 * Xframe indicates that the link is up for programmable amount of time.
2443 */
2444 static int
__hal_device_handle_link_up_ind(xge_hal_device_t * hldev)2445 __hal_device_handle_link_up_ind(xge_hal_device_t *hldev)
2446 {
2447 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2448 u64 val64;
2449
2450 /*
2451 * If the previous link state is not down, return.
2452 */
2453 if (hldev->link_state == XGE_HAL_LINK_UP) {
2454 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2455 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
2456 val64 = xge_os_pio_mem_read64(
2457 hldev->pdev, hldev->regh0,
2458 &bar0->misc_int_mask);
2459 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2460 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2461 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2462 val64, &bar0->misc_int_mask);
2463 }
2464 #endif
2465 xge_debug_device(XGE_TRACE,
2466 "link up indication while link is up, ignoring..");
2467 return 0;
2468 }
2469
2470 /* Now re-enable it as due to noise, hardware turned it off */
2471 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2472 &bar0->adapter_control);
2473 val64 |= XGE_HAL_ADAPTER_CNTL_EN;
2474 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
2475 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2476 &bar0->adapter_control);
2477
2478 /* Turn on the Laser */
2479 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2480 &bar0->adapter_control);
2481 val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON |
2482 XGE_HAL_ADAPTER_LED_ON);
2483 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2484 &bar0->adapter_control);
2485
2486 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2487 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2488 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2489 &bar0->adapter_status);
2490 if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2491 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) {
2492 xge_debug_device(XGE_TRACE, "%s",
2493 "fail to transition link to up...");
2494 return 0;
2495 }
2496 else {
2497 /*
2498 * Mask the Link Up interrupt and unmask the Link Down
2499 * interrupt.
2500 */
2501 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2502 &bar0->misc_int_mask);
2503 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2504 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2505 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2506 &bar0->misc_int_mask);
2507 xge_debug_device(XGE_TRACE, "calling link up..");
2508 hldev->link_state = XGE_HAL_LINK_UP;
2509
2510 /* notify ULD */
2511 if (g_xge_hal_driver->uld_callbacks.link_up) {
2512 g_xge_hal_driver->uld_callbacks.link_up(
2513 hldev->upper_layer_info);
2514 }
2515 return 1;
2516 }
2517 }
2518 #endif
2519 xge_os_mdelay(1);
2520 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
2521 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2522 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
2523 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
2524
2525 /* notify ULD */
2526 (void) xge_queue_produce_context(hldev->queueh,
2527 XGE_HAL_EVENT_LINK_IS_UP, hldev);
2528 /* link is up after been enabled */
2529 return 1;
2530 } else {
2531 xge_debug_device(XGE_TRACE, "%s",
2532 "fail to transition link to up...");
2533 return 0;
2534 }
2535 }
2536
2537 /*
2538 * __hal_device_handle_link_down_ind
2539 * @hldev: HAL device handle.
2540 *
2541 * Link down indication handler. The function is invoked by HAL when
2542 * Xframe indicates that the link is down.
2543 */
2544 static int
__hal_device_handle_link_down_ind(xge_hal_device_t * hldev)2545 __hal_device_handle_link_down_ind(xge_hal_device_t *hldev)
2546 {
2547 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2548 u64 val64;
2549
2550 /*
2551 * If the previous link state is not up, return.
2552 */
2553 if (hldev->link_state == XGE_HAL_LINK_DOWN) {
2554 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2555 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
2556 val64 = xge_os_pio_mem_read64(
2557 hldev->pdev, hldev->regh0,
2558 &bar0->misc_int_mask);
2559 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2560 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2561 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2562 val64, &bar0->misc_int_mask);
2563 }
2564 #endif
2565 xge_debug_device(XGE_TRACE,
2566 "link down indication while link is down, ignoring..");
2567 return 0;
2568 }
2569 xge_os_mdelay(1);
2570
2571 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2572 &bar0->adapter_control);
2573
2574 /* try to debounce the link only if the adapter is enabled. */
2575 if (val64 & XGE_HAL_ADAPTER_CNTL_EN) {
2576 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
2577 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2578 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
2579 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
2580 xge_debug_device(XGE_TRACE,
2581 "link is actually up (possible noisy link?), ignoring.");
2582 return(0);
2583 }
2584 }
2585
2586 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2587 &bar0->adapter_control);
2588 /* turn off LED */
2589 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
2590 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2591 &bar0->adapter_control);
2592
2593 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2594 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2595 /*
2596 * Mask the Link Down interrupt and unmask the Link up
2597 * interrupt
2598 */
2599 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2600 &bar0->misc_int_mask);
2601 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2602 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2603 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2604 &bar0->misc_int_mask);
2605
2606 /* link is down */
2607 xge_debug_device(XGE_TRACE, "calling link down..");
2608 hldev->link_state = XGE_HAL_LINK_DOWN;
2609
2610 /* notify ULD */
2611 if (g_xge_hal_driver->uld_callbacks.link_down) {
2612 g_xge_hal_driver->uld_callbacks.link_down(
2613 hldev->upper_layer_info);
2614 }
2615 return 1;
2616 }
2617 #endif
2618 /* notify ULD */
2619 (void) xge_queue_produce_context(hldev->queueh,
2620 XGE_HAL_EVENT_LINK_IS_DOWN, hldev);
2621 /* link is down */
2622 return 1;
2623 }
2624 /*
2625 * __hal_device_handle_link_state_change
2626 * @hldev: HAL device handle.
2627 *
2628 * Link state change handler. The function is invoked by HAL when
2629 * Xframe indicates link state change condition. The code here makes sure to
2630 * 1) ignore redundant state change indications;
2631 * 2) execute link-up sequence, and handle the failure to bring the link up;
2632 * 3) generate XGE_HAL_LINK_UP/DOWN event for the subsequent handling by
2633 * upper-layer driver (ULD).
2634 */
2635 static int
__hal_device_handle_link_state_change(xge_hal_device_t * hldev)2636 __hal_device_handle_link_state_change(xge_hal_device_t *hldev)
2637 {
2638 u64 hw_status;
2639 int hw_link_state;
2640 int retcode;
2641 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2642 u64 val64;
2643 int i = 0;
2644
2645 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2646 &bar0->adapter_control);
2647
2648 /* If the adapter is not enabled but the hal thinks we are in the up
2649 * state then transition to the down state.
2650 */
2651 if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) &&
2652 (hldev->link_state == XGE_HAL_LINK_UP) ) {
2653 return(__hal_device_handle_link_down_ind(hldev));
2654 }
2655
2656 do {
2657 xge_os_mdelay(1);
2658 (void) xge_hal_device_status(hldev, &hw_status);
2659 hw_link_state = (hw_status &
2660 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2661 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ?
2662 XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP;
2663
2664 /* check if the current link state is still considered
2665 * to be changed. This way we will make sure that this is
2666 * not a noise which needs to be filtered out */
2667 if (hldev->link_state == hw_link_state)
2668 break;
2669 } while (i++ < hldev->config.link_valid_cnt);
2670
2671 /* If the current link state is same as previous, just return */
2672 if (hldev->link_state == hw_link_state)
2673 retcode = 0;
2674 /* detected state change */
2675 else if (hw_link_state == XGE_HAL_LINK_UP)
2676 retcode = __hal_device_handle_link_up_ind(hldev);
2677 else
2678 retcode = __hal_device_handle_link_down_ind(hldev);
2679 return retcode;
2680 }
2681
2682 /*
2683 *
2684 */
2685 static void
__hal_device_handle_serr(xge_hal_device_t * hldev,char * reg,u64 value)2686 __hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value)
2687 {
2688 hldev->stats.sw_dev_err_stats.serr_cnt++;
2689 if (hldev->config.dump_on_serr) {
2690 #ifdef XGE_HAL_USE_MGMT_AUX
2691 (void) xge_hal_aux_device_dump(hldev);
2692 #endif
2693 }
2694
2695 (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev,
2696 1, sizeof(u64), (void *)&value);
2697
2698 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2699 (unsigned long long) value);
2700 }
2701
2702 /*
2703 *
2704 */
2705 static void
__hal_device_handle_eccerr(xge_hal_device_t * hldev,char * reg,u64 value)2706 __hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value)
2707 {
2708 if (hldev->config.dump_on_eccerr) {
2709 #ifdef XGE_HAL_USE_MGMT_AUX
2710 (void) xge_hal_aux_device_dump(hldev);
2711 #endif
2712 }
2713
2714 /* Herc smart enough to recover on its own! */
2715 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
2716 (void) xge_queue_produce(hldev->queueh,
2717 XGE_HAL_EVENT_ECCERR, hldev,
2718 1, sizeof(u64), (void *)&value);
2719 }
2720
2721 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2722 (unsigned long long) value);
2723 }
2724
2725 /*
2726 *
2727 */
2728 static void
__hal_device_handle_parityerr(xge_hal_device_t * hldev,char * reg,u64 value)2729 __hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value)
2730 {
2731 if (hldev->config.dump_on_parityerr) {
2732 #ifdef XGE_HAL_USE_MGMT_AUX
2733 (void) xge_hal_aux_device_dump(hldev);
2734 #endif
2735 }
2736 (void) xge_queue_produce_context(hldev->queueh,
2737 XGE_HAL_EVENT_PARITYERR, hldev);
2738
2739 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2740 (unsigned long long) value);
2741 }
2742
2743 /*
2744 *
2745 */
2746 static void
__hal_device_handle_targetabort(xge_hal_device_t * hldev)2747 __hal_device_handle_targetabort(xge_hal_device_t *hldev)
2748 {
2749 (void) xge_queue_produce_context(hldev->queueh,
2750 XGE_HAL_EVENT_TARGETABORT, hldev);
2751 }
2752
2753
2754 /*
2755 * __hal_device_hw_initialize
2756 * @hldev: HAL device handle.
2757 *
2758 * Initialize Xframe hardware.
2759 */
2760 static xge_hal_status_e
__hal_device_hw_initialize(xge_hal_device_t * hldev)2761 __hal_device_hw_initialize(xge_hal_device_t *hldev)
2762 {
2763 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2764 xge_hal_status_e status;
2765 u64 val64;
2766
2767 /* Set proper endian settings and verify the same by reading the PIF
2768 * Feed-back register. */
2769 status = __hal_device_set_swapper(hldev);
2770 if (status != XGE_HAL_OK) {
2771 return status;
2772 }
2773
2774 /* update the pci mode, frequency, and width */
2775 if (__hal_device_pci_info_get(hldev, &hldev->pci_mode,
2776 &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){
2777 hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE;
2778 hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
2779 hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
2780 /*
2781 * FIXME: this cannot happen.
2782 * But if it happens we cannot continue just like that
2783 */
2784 xge_debug_device(XGE_ERR, "unable to get pci info");
2785 }
2786
2787 if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) ||
2788 (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) ||
2789 (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) {
2790 /* PCI optimization: set TxReqTimeOut
2791 * register (0x800+0x120) to 0x1ff or
2792 * something close to this.
2793 * Note: not to be used for PCI-X! */
2794
2795 val64 = XGE_HAL_TXREQTO_VAL(0x1FF);
2796 val64 |= XGE_HAL_TXREQTO_EN;
2797 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2798 &bar0->txreqtimeout);
2799
2800 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
2801 &bar0->read_retry_delay);
2802
2803 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
2804 &bar0->write_retry_delay);
2805
2806 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode");
2807 }
2808
2809 if (hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_266MHZ ||
2810 hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_250MHZ) {
2811
2812 /* Optimizing for PCI-X 266/250 */
2813
2814 val64 = XGE_HAL_TXREQTO_VAL(0x7F);
2815 val64 |= XGE_HAL_TXREQTO_EN;
2816 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2817 &bar0->txreqtimeout);
2818
2819 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI-X 266/250 modes");
2820 }
2821
2822 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2823 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL,
2824 &bar0->read_retry_delay);
2825
2826 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL,
2827 &bar0->write_retry_delay);
2828 }
2829
2830 /* added this to set the no of bytes used to update lso_bytes_sent
2831 returned TxD0 */
2832 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2833 &bar0->pic_control_2);
2834 val64 &= ~XGE_HAL_TXD_WRITE_BC(0x2);
2835 val64 |= XGE_HAL_TXD_WRITE_BC(0x4);
2836 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2837 &bar0->pic_control_2);
2838 /* added this to clear the EOI_RESET field while leaving XGXS_RESET
2839 * in reset, then a 1-second delay */
2840 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2841 XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset);
2842 xge_os_mdelay(1000);
2843
2844 /* Clear the XGXS_RESET field of the SW_RESET register in order to
2845 * release the XGXS from reset. Its reset value is 0xA5; write 0x00
2846 * to activate the XGXS. The core requires a minimum 500 us reset.*/
2847 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset);
2848 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2849 &bar0->sw_reset);
2850 xge_os_mdelay(1);
2851
2852 /* read registers in all blocks */
2853 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2854 &bar0->mac_int_mask);
2855 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2856 &bar0->mc_int_mask);
2857 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2858 &bar0->xgxs_int_mask);
2859
2860 /* set default MTU and steer based on length*/
2861 __hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work
2862
2863 if (hldev->config.mac.rmac_bcast_en) {
2864 xge_hal_device_bcast_enable(hldev);
2865 } else {
2866 xge_hal_device_bcast_disable(hldev);
2867 }
2868
2869 #ifndef XGE_HAL_HERC_EMULATION
2870 __hal_device_xaui_configure(hldev);
2871 #endif
2872 __hal_device_mac_link_util_set(hldev);
2873
2874 __hal_device_mac_link_util_set(hldev);
2875
2876 /*
2877 * Keep its PCI REQ# line asserted during a write
2878 * transaction up to the end of the transaction
2879 */
2880 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2881 &bar0->misc_control);
2882
2883 val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN;
2884
2885 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2886 val64, &bar0->misc_control);
2887
2888 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2889 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2890 &bar0->misc_control);
2891
2892 val64 |= XGE_HAL_MISC_CONTROL_LINK_FAULT;
2893
2894 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2895 val64, &bar0->misc_control);
2896 }
2897
2898 /*
2899 * bimodal interrupts is when all Rx traffic interrupts
2900 * will go to TTI, so we need to adjust RTI settings and
2901 * use adaptive TTI timer. We need to make sure RTI is
2902 * properly configured to sane value which will not
2903 * distrupt bimodal behavior.
2904 */
2905 if (hldev->config.bimodal_interrupts) {
2906 int i;
2907
2908 /* force polling_cnt to be "0", otherwise
2909 * IRQ workload statistics will be screwed. This could
2910 * be worked out in TXPIC handler later. */
2911 hldev->config.isr_polling_cnt = 0;
2912 hldev->config.sched_timer_us = 10000;
2913
2914 /* disable all TTI < 56 */
2915 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
2916 int j;
2917 if (!hldev->config.fifo.queue[i].configured)
2918 continue;
2919 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
2920 if (hldev->config.fifo.queue[i].tti[j].enabled)
2921 hldev->config.fifo.queue[i].tti[j].enabled = 0;
2922 }
2923 }
2924
2925 /* now configure bimodal interrupts */
2926 __hal_device_bimodal_configure(hldev);
2927 }
2928
2929 status = __hal_device_tti_configure(hldev, 0);
2930 if (status != XGE_HAL_OK)
2931 return status;
2932
2933 status = __hal_device_rti_configure(hldev, 0);
2934 if (status != XGE_HAL_OK)
2935 return status;
2936
2937 status = __hal_device_rth_it_configure(hldev);
2938 if (status != XGE_HAL_OK)
2939 return status;
2940
2941 status = __hal_device_rth_spdm_configure(hldev);
2942 if (status != XGE_HAL_OK)
2943 return status;
2944
2945 status = __hal_device_rts_mac_configure(hldev);
2946 if (status != XGE_HAL_OK) {
2947 xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed ");
2948 return status;
2949 }
2950
2951 status = __hal_device_rts_port_configure(hldev);
2952 if (status != XGE_HAL_OK) {
2953 xge_debug_device(XGE_ERR, "__hal_device_rts_port_configure Failed ");
2954 return status;
2955 }
2956
2957 status = __hal_device_rts_qos_configure(hldev);
2958 if (status != XGE_HAL_OK) {
2959 xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed ");
2960 return status;
2961 }
2962
2963 __hal_device_pause_frames_configure(hldev);
2964 __hal_device_rmac_padding_configure(hldev);
2965 __hal_device_shared_splits_configure(hldev);
2966
2967 /* make sure all interrupts going to be disabled at the moment */
2968 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
2969
2970 /* SXE-008 Transmit DMA arbitration issue */
2971 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
2972 hldev->revision < 4) {
2973 xge_os_pio_mem_write64(hldev->pdev,hldev->regh0,
2974 XGE_HAL_ADAPTER_PCC_ENABLE_FOUR,
2975 &bar0->pcc_enable);
2976 }
2977 __hal_fifo_hw_initialize(hldev);
2978 __hal_ring_hw_initialize(hldev);
2979
2980 if (__hal_device_wait_quiescent(hldev, &val64)) {
2981 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
2982 }
2983
2984 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
2985 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
2986 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
2987 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
2988 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
2989 }
2990
2991 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent",
2992 (unsigned long long)(ulong_t)hldev);
2993
2994 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX ||
2995 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI) {
2996 /*
2997 * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL
2998 * is disabled.
2999 */
3000 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3001 &bar0->pic_control);
3002 val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT);
3003 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
3004 &bar0->pic_control);
3005 }
3006
3007 hldev->hw_is_initialized = 1;
3008 hldev->terminating = 0;
3009 return XGE_HAL_OK;
3010 }
3011
3012 /*
3013 * __hal_device_reset - Reset device only.
3014 * @hldev: HAL device handle.
3015 *
3016 * Reset the device, and subsequently restore
3017 * the previously saved PCI configuration space.
3018 */
3019 #define XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT 50
3020 static xge_hal_status_e
__hal_device_reset(xge_hal_device_t * hldev)3021 __hal_device_reset(xge_hal_device_t *hldev)
3022 {
3023 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
3024 int i, j, swap_done, pcisize = 0;
3025 u64 val64, rawval = 0ULL;
3026
3027 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
3028 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3029 if ( hldev->bar2 ) {
3030 u64 *msix_vetor_table = (u64 *)hldev->bar2;
3031
3032 // 2 64bit words for each entry
3033 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2;
3034 i++) {
3035 hldev->msix_vector_table[i] =
3036 xge_os_pio_mem_read64(hldev->pdev,
3037 hldev->regh2, &msix_vetor_table[i]);
3038 }
3039 }
3040 }
3041 }
3042 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3043 &bar0->pif_rd_swapper_fb);
3044 swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB);
3045
3046 if (swap_done) {
3047 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
3048 (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset);
3049 } else {
3050 u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32);
3051 #if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN)
3052 /* swap it */
3053 val = (((val & (u32)0x000000ffUL) << 24) |
3054 ((val & (u32)0x0000ff00UL) << 8) |
3055 ((val & (u32)0x00ff0000UL) >> 8) |
3056 ((val & (u32)0xff000000UL) >> 24));
3057 #endif
3058 xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val,
3059 &bar0->sw_reset);
3060 }
3061
3062 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
3063 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
3064
3065 xge_os_mdelay(20); /* Wait for 20 ms after reset */
3066
3067 {
3068 /* Poll for no more than 1 second */
3069 for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++)
3070 {
3071 for (j = 0; j < pcisize; j++) {
3072 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
3073 *((u32*)&hldev->pci_config_space + j));
3074 }
3075
3076 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
3077 xge_offsetof(xge_hal_pci_config_le_t, device_id),
3078 &hldev->device_id);
3079
3080 if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN)
3081 break;
3082 xge_os_mdelay(20);
3083 }
3084 }
3085
3086 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN)
3087 {
3088 xge_debug_device(XGE_ERR, "device reset failed");
3089 return XGE_HAL_ERR_RESET_FAILED;
3090 }
3091
3092 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3093 int cnt = 0;
3094
3095 rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC;
3096 pcisize = XGE_HAL_PCISIZE_HERC;
3097 xge_os_mdelay(1);
3098 do {
3099 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3100 &bar0->sw_reset);
3101 if (val64 != rawval) {
3102 break;
3103 }
3104 cnt++;
3105 xge_os_mdelay(1); /* Wait for 1ms before retry */
3106 } while(cnt < 20);
3107 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
3108 rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA;
3109 pcisize = XGE_HAL_PCISIZE_XENA;
3110 xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS);
3111 }
3112
3113 /* Restore MSI-X vector table */
3114 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
3115 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3116 if ( hldev->bar2 ) {
3117 /*
3118 * 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 )
3119 * 98: PBATable 00000404 ( BIR:4 Offset:0x400 )
3120 */
3121 u64 *msix_vetor_table = (u64 *)hldev->bar2;
3122
3123 /* 2 64bit words for each entry */
3124 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2;
3125 i++) {
3126 xge_os_pio_mem_write64(hldev->pdev,
3127 hldev->regh2,
3128 hldev->msix_vector_table[i],
3129 &msix_vetor_table[i]);
3130 }
3131 }
3132 }
3133 }
3134
3135 hldev->link_state = XGE_HAL_LINK_DOWN;
3136 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3137 &bar0->sw_reset);
3138
3139 if (val64 != rawval) {
3140 xge_debug_device(XGE_ERR, "device has not been reset "
3141 "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT,
3142 (unsigned long long)val64, (unsigned long long)rawval);
3143 return XGE_HAL_ERR_RESET_FAILED;
3144 }
3145
3146 hldev->hw_is_initialized = 0;
3147 return XGE_HAL_OK;
3148 }
3149
3150 /*
3151 * __hal_device_poll - General private routine to poll the device.
3152 * @hldev: HAL device handle.
3153 *
3154 * Returns: one of the xge_hal_status_e{} enumerated types.
3155 * XGE_HAL_OK - for success.
3156 * XGE_HAL_ERR_CRITICAL - when encounters critical error.
3157 */
3158 static xge_hal_status_e
__hal_device_poll(xge_hal_device_t * hldev)3159 __hal_device_poll(xge_hal_device_t *hldev)
3160 {
3161 xge_hal_pci_bar0_t *bar0;
3162 u64 err_reg;
3163
3164 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
3165
3166 /* Handling SERR errors by forcing a H/W reset. */
3167 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3168 &bar0->serr_source);
3169 if (err_reg & XGE_HAL_SERR_SOURCE_ANY) {
3170 __hal_device_handle_serr(hldev, "serr_source", err_reg);
3171 return XGE_HAL_ERR_CRITICAL;
3172 }
3173
3174 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3175 &bar0->misc_int_reg);
3176
3177 if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) {
3178 hldev->stats.sw_dev_err_stats.parity_err_cnt++;
3179 __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg);
3180 return XGE_HAL_ERR_CRITICAL;
3181 }
3182
3183 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
3184 if ((xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) ||
3185 (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX))
3186 #endif
3187 {
3188
3189 /* Handling link status change error Intr */
3190 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3191 &bar0->mac_rmac_err_reg);
3192 if (__hal_device_handle_link_state_change(hldev))
3193 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3194 err_reg, &bar0->mac_rmac_err_reg);
3195 }
3196
3197 if (hldev->inject_serr != 0) {
3198 err_reg = hldev->inject_serr;
3199 hldev->inject_serr = 0;
3200 __hal_device_handle_serr(hldev, "inject_serr", err_reg);
3201 return XGE_HAL_ERR_CRITICAL;
3202 }
3203
3204 if (hldev->inject_ecc != 0) {
3205 err_reg = hldev->inject_ecc;
3206 hldev->inject_ecc = 0;
3207 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3208 __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg);
3209 return XGE_HAL_ERR_CRITICAL;
3210 }
3211
3212 if (hldev->inject_bad_tcode != 0) {
3213 u8 t_code = hldev->inject_bad_tcode;
3214 xge_hal_channel_t channel;
3215 xge_hal_fifo_txd_t txd;
3216 xge_hal_ring_rxd_1_t rxd;
3217
3218 channel.devh = hldev;
3219
3220 if (hldev->inject_bad_tcode_for_chan_type ==
3221 XGE_HAL_CHANNEL_TYPE_FIFO) {
3222 channel.type = XGE_HAL_CHANNEL_TYPE_FIFO;
3223
3224 } else {
3225 channel.type = XGE_HAL_CHANNEL_TYPE_RING;
3226 }
3227
3228 hldev->inject_bad_tcode = 0;
3229
3230 if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO)
3231 return xge_hal_device_handle_tcode(&channel, &txd,
3232 t_code);
3233 else
3234 return xge_hal_device_handle_tcode(&channel, &rxd,
3235 t_code);
3236 }
3237
3238 return XGE_HAL_OK;
3239 }
3240
3241 /*
3242 * __hal_verify_pcc_idle - Verify All Enbled PCC are IDLE or not
3243 * @hldev: HAL device handle.
3244 * @adp_status: Adapter Status value
3245 * Usage: See xge_hal_device_enable{}.
3246 */
3247 xge_hal_status_e
__hal_verify_pcc_idle(xge_hal_device_t * hldev,u64 adp_status)3248 __hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status)
3249 {
3250 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
3251 hldev->revision < 4) {
3252 /*
3253 * For Xena 1,2,3 we enable only 4 PCCs Due to
3254 * SXE-008 (Transmit DMA arbitration issue)
3255 */
3256 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE)
3257 != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) {
3258 xge_debug_device(XGE_TRACE, "%s",
3259 "PCC is not IDLE after adapter enabled!");
3260 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3261 }
3262 } else {
3263 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) !=
3264 XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) {
3265 xge_debug_device(XGE_TRACE, "%s",
3266 "PCC is not IDLE after adapter enabled!");
3267 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3268 }
3269 }
3270 return XGE_HAL_OK;
3271 }
3272
3273 static void
__hal_update_bimodal(xge_hal_device_t * hldev,int ring_no)3274 __hal_update_bimodal(xge_hal_device_t *hldev, int ring_no)
3275 {
3276 int tval, d, iwl_avg, len_avg, bytes_avg, bytes_hist, d_hist;
3277 int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg;
3278 int iwl_cnt, i;
3279
3280 #define _HIST_SIZE 50 /* 0.5 sec history */
3281 #define _HIST_ADJ_TIMER 1
3282 #define _STEP 2
3283
3284 static int bytes_avg_history[_HIST_SIZE] = {0};
3285 static int d_avg_history[_HIST_SIZE] = {0};
3286 static int history_idx = 0;
3287 static int pstep = 1;
3288 static int hist_adj_timer = 0;
3289
3290 /*
3291 * tval - current value of this bimodal timer
3292 */
3293 tval = hldev->bimodal_tti[ring_no].timer_val_us;
3294
3295 /*
3296 * d - how many interrupts we were getting since last
3297 * bimodal timer tick.
3298 */
3299 d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt -
3300 hldev->bimodal_intr_cnt;
3301
3302 /* advance bimodal interrupt counter */
3303 hldev->bimodal_intr_cnt =
3304 hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt;
3305
3306 /*
3307 * iwl_cnt - how many interrupts we've got since last
3308 * bimodal timer tick.
3309 */
3310 iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ?
3311 hldev->irq_workload_rxcnt[ring_no] : 1);
3312 iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ?
3313 hldev->irq_workload_txcnt[ring_no] : 1);
3314 iwl_cnt = iwl_rxcnt + iwl_txcnt;
3315 iwl_cnt = iwl_cnt; /* just to remove the lint warning */
3316
3317 /*
3318 * we need to take hldev->config.isr_polling_cnt into account
3319 * but for some reason this line causing GCC to produce wrong
3320 * code on Solaris. As of now, if bimodal_interrupts is configured
3321 * hldev->config.isr_polling_cnt is forced to be "0".
3322 *
3323 * iwl_cnt = iwl_cnt / (hldev->config.isr_polling_cnt + 1); */
3324
3325 /*
3326 * iwl_avg - how many RXDs on avarage been processed since
3327 * last bimodal timer tick. This indirectly includes
3328 * CPU utilizations.
3329 */
3330 iwl_rxavg = hldev->irq_workload_rxd[ring_no] / iwl_rxcnt;
3331 iwl_txavg = hldev->irq_workload_txd[ring_no] / iwl_txcnt;
3332 iwl_avg = iwl_rxavg + iwl_txavg;
3333 iwl_avg = iwl_avg == 0 ? 1 : iwl_avg;
3334
3335 /*
3336 * len_avg - how many bytes on avarage been processed since
3337 * last bimodal timer tick. i.e. avarage frame size.
3338 */
3339 len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] /
3340 (hldev->irq_workload_rxd[ring_no] ?
3341 hldev->irq_workload_rxd[ring_no] : 1);
3342 len_txavg = 1 + hldev->irq_workload_txlen[ring_no] /
3343 (hldev->irq_workload_txd[ring_no] ?
3344 hldev->irq_workload_txd[ring_no] : 1);
3345 len_avg = len_rxavg + len_txavg;
3346 if (len_avg < 60)
3347 len_avg = 60;
3348
3349 /* align on low boundary */
3350 if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us)
3351 tval = hldev->config.bimodal_timer_lo_us;
3352
3353 /* reset faster */
3354 if (iwl_avg == 1) {
3355 tval = hldev->config.bimodal_timer_lo_us;
3356 /* reset history */
3357 for (i = 0; i < _HIST_SIZE; i++)
3358 bytes_avg_history[i] = d_avg_history[i] = 0;
3359 history_idx = 0;
3360 pstep = 1;
3361 hist_adj_timer = 0;
3362 }
3363
3364 /* always try to ajust timer to the best throughput value */
3365 bytes_avg = iwl_avg * len_avg;
3366 history_idx %= _HIST_SIZE;
3367 bytes_avg_history[history_idx] = bytes_avg;
3368 d_avg_history[history_idx] = d;
3369 history_idx++;
3370 d_hist = bytes_hist = 0;
3371 for (i = 0; i < _HIST_SIZE; i++) {
3372 /* do not re-configure until history is gathered */
3373 if (!bytes_avg_history[i]) {
3374 tval = hldev->config.bimodal_timer_lo_us;
3375 goto _end;
3376 }
3377 bytes_hist += bytes_avg_history[i];
3378 d_hist += d_avg_history[i];
3379 }
3380 bytes_hist /= _HIST_SIZE;
3381 d_hist /= _HIST_SIZE;
3382
3383 // xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d",
3384 // d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg,
3385 // d_hist*bytes_hist, pstep);
3386
3387 /* make an adaptive step */
3388 if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) {
3389 pstep = !pstep;
3390 hist_adj_timer = 0;
3391 }
3392
3393 if (pstep &&
3394 (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) {
3395 tval += _STEP;
3396 hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++;
3397 } else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) {
3398 tval -= _STEP;
3399 hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++;
3400 }
3401
3402 /* enable TTI range A for better latencies */
3403 hldev->bimodal_urange_a_en = 0;
3404 if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2)
3405 hldev->bimodal_urange_a_en = 1;
3406
3407 _end:
3408 /* reset workload statistics counters */
3409 hldev->irq_workload_rxcnt[ring_no] = 0;
3410 hldev->irq_workload_rxd[ring_no] = 0;
3411 hldev->irq_workload_rxlen[ring_no] = 0;
3412 hldev->irq_workload_txcnt[ring_no] = 0;
3413 hldev->irq_workload_txd[ring_no] = 0;
3414 hldev->irq_workload_txlen[ring_no] = 0;
3415
3416 /* reconfigure TTI56 + ring_no with new timer value */
3417 hldev->bimodal_timer_val_us = tval;
3418 (void) __hal_device_rti_configure(hldev, 1);
3419 }
3420
3421 static void
__hal_update_rxufca(xge_hal_device_t * hldev,int ring_no)3422 __hal_update_rxufca(xge_hal_device_t *hldev, int ring_no)
3423 {
3424 int ufc, ic, i;
3425
3426 ufc = hldev->config.ring.queue[ring_no].rti.ufc_a;
3427 ic = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt;
3428
3429 /* urange_a adaptive coalescing */
3430 if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) {
3431 if (ic > hldev->rxufca_intr_thres) {
3432 if (ufc < hldev->config.rxufca_hi_lim) {
3433 ufc += 1;
3434 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
3435 hldev->config.ring.queue[i].rti.ufc_a = ufc;
3436 (void) __hal_device_rti_configure(hldev, 1);
3437 hldev->stats.sw_dev_info_stats.rxufca_hi_adjust_cnt++;
3438 }
3439 hldev->rxufca_intr_thres = ic +
3440 hldev->config.rxufca_intr_thres; /* def: 30 */
3441 } else {
3442 if (ufc > hldev->config.rxufca_lo_lim) {
3443 ufc -= 1;
3444 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
3445 hldev->config.ring.queue[i].rti.ufc_a = ufc;
3446 (void) __hal_device_rti_configure(hldev, 1);
3447 hldev->stats.sw_dev_info_stats.rxufca_lo_adjust_cnt++;
3448 }
3449 }
3450 hldev->rxufca_lbolt_time = hldev->rxufca_lbolt +
3451 hldev->config.rxufca_lbolt_period;
3452 }
3453 hldev->rxufca_lbolt++;
3454 }
3455
3456 /*
3457 * __hal_device_handle_mc - Handle MC interrupt reason
3458 * @hldev: HAL device handle.
3459 * @reason: interrupt reason
3460 */
3461 xge_hal_status_e
__hal_device_handle_mc(xge_hal_device_t * hldev,u64 reason)3462 __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason)
3463 {
3464 xge_hal_pci_bar0_t *isrbar0 =
3465 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3466 u64 val64;
3467
3468 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3469 &isrbar0->mc_int_status);
3470 if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT))
3471 return XGE_HAL_OK;
3472
3473 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3474 &isrbar0->mc_err_reg);
3475 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3476 val64, &isrbar0->mc_err_reg);
3477
3478 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L ||
3479 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U ||
3480 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 ||
3481 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 ||
3482 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
3483 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L ||
3484 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U ||
3485 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L ||
3486 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) {
3487 hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++;
3488 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3489 }
3490
3491 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L ||
3492 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U ||
3493 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
3494 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 ||
3495 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
3496 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L ||
3497 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U ||
3498 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L ||
3499 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) {
3500 hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++;
3501 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3502 }
3503
3504 if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) {
3505 hldev->stats.sw_dev_err_stats.sm_err_cnt++;
3506 }
3507
3508 /* those two should result in device reset */
3509 if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
3510 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) {
3511 __hal_device_handle_eccerr(hldev, "mc_err_reg", val64);
3512 return XGE_HAL_ERR_CRITICAL;
3513 }
3514
3515 return XGE_HAL_OK;
3516 }
3517
3518 /*
3519 * __hal_device_handle_pic - Handle non-traffic PIC interrupt reason
3520 * @hldev: HAL device handle.
3521 * @reason: interrupt reason
3522 */
3523 xge_hal_status_e
__hal_device_handle_pic(xge_hal_device_t * hldev,u64 reason)3524 __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason)
3525 {
3526 xge_hal_pci_bar0_t *isrbar0 =
3527 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3528 u64 val64;
3529
3530 if (reason & XGE_HAL_PIC_INT_FLSH) {
3531 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3532 &isrbar0->flsh_int_reg);
3533 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3534 val64, &isrbar0->flsh_int_reg);
3535 /* FIXME: handle register */
3536 }
3537 if (reason & XGE_HAL_PIC_INT_MDIO) {
3538 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3539 &isrbar0->mdio_int_reg);
3540 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3541 val64, &isrbar0->mdio_int_reg);
3542 /* FIXME: handle register */
3543 }
3544 if (reason & XGE_HAL_PIC_INT_IIC) {
3545 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3546 &isrbar0->iic_int_reg);
3547 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3548 val64, &isrbar0->iic_int_reg);
3549 /* FIXME: handle register */
3550 }
3551 if (reason & XGE_HAL_PIC_INT_MISC) {
3552 val64 = xge_os_pio_mem_read64(hldev->pdev,
3553 hldev->regh0, &isrbar0->misc_int_reg);
3554 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
3555 if ((xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) &&
3556 (hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)) {
3557 /* Check for Link interrupts. If both Link Up/Down
3558 * bits are set, clear both and check adapter status
3559 */
3560 if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) &&
3561 (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) {
3562 u64 temp64;
3563
3564 xge_debug_device(XGE_TRACE,
3565 "both link up and link down detected "XGE_OS_LLXFMT,
3566 (unsigned long long)val64);
3567
3568 temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT |
3569 XGE_HAL_MISC_INT_REG_LINK_UP_INT);
3570 xge_os_pio_mem_write64(hldev->pdev,
3571 hldev->regh0, temp64,
3572 &isrbar0->misc_int_reg);
3573 }
3574 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) {
3575 xge_debug_device(XGE_TRACE,
3576 "link up call request, misc_int "XGE_OS_LLXFMT,
3577 (unsigned long long)val64);
3578 __hal_device_handle_link_up_ind(hldev);
3579 }
3580 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){
3581 xge_debug_device(XGE_TRACE,
3582 "link down request, misc_int "XGE_OS_LLXFMT,
3583 (unsigned long long)val64);
3584 __hal_device_handle_link_down_ind(hldev);
3585 }
3586 } else
3587 #endif
3588 {
3589 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3590 val64, &isrbar0->misc_int_reg);
3591 }
3592 }
3593
3594 return XGE_HAL_OK;
3595 }
3596
3597 /*
3598 * __hal_device_handle_txpic - Handle TxPIC interrupt reason
3599 * @hldev: HAL device handle.
3600 * @reason: interrupt reason
3601 */
3602 xge_hal_status_e
__hal_device_handle_txpic(xge_hal_device_t * hldev,u64 reason)3603 __hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason)
3604 {
3605 xge_hal_status_e status = XGE_HAL_OK;
3606 xge_hal_pci_bar0_t *isrbar0 =
3607 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3608 volatile u64 val64;
3609
3610 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3611 &isrbar0->pic_int_status);
3612 if ( val64 & (XGE_HAL_PIC_INT_FLSH |
3613 XGE_HAL_PIC_INT_MDIO |
3614 XGE_HAL_PIC_INT_IIC |
3615 XGE_HAL_PIC_INT_MISC) ) {
3616 status = __hal_device_handle_pic(hldev, val64);
3617 xge_os_wmb();
3618 }
3619
3620 if (!(val64 & XGE_HAL_PIC_INT_TX))
3621 return status;
3622
3623 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3624 &isrbar0->txpic_int_reg);
3625 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3626 val64, &isrbar0->txpic_int_reg);
3627 xge_os_wmb();
3628
3629 if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) {
3630 int i;
3631
3632 if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL)
3633 g_xge_hal_driver->uld_callbacks.sched_timer(
3634 hldev, hldev->upper_layer_info);
3635 /*
3636 * This feature implements adaptive receive interrupt
3637 * coalecing. It is disabled by default. To enable it
3638 * set hldev->config.rxufca_lo_lim to be not equal to
3639 * hldev->config.rxufca_hi_lim.
3640 *
3641 * We are using HW timer for this feature, so
3642 * use needs to configure hldev->config.rxufca_lbolt_period
3643 * which is essentially a time slice of timer.
3644 *
3645 * For those who familiar with Linux, lbolt means jiffies
3646 * of this timer. I.e. timer tick.
3647 */
3648 if (hldev->config.rxufca_lo_lim !=
3649 hldev->config.rxufca_hi_lim &&
3650 hldev->config.rxufca_lo_lim != 0) {
3651 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
3652 if (!hldev->config.ring.queue[i].configured)
3653 continue;
3654 if (hldev->config.ring.queue[i].rti.urange_a)
3655 __hal_update_rxufca(hldev, i);
3656 }
3657 }
3658
3659 /*
3660 * This feature implements adaptive TTI timer re-calculation
3661 * based on host utilization, number of interrupt processed,
3662 * number of RXD per tick and avarage length of packets per
3663 * tick.
3664 */
3665 if (hldev->config.bimodal_interrupts) {
3666 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
3667 if (!hldev->config.ring.queue[i].configured)
3668 continue;
3669 if (hldev->bimodal_tti[i].enabled)
3670 __hal_update_bimodal(hldev, i);
3671 }
3672 }
3673 }
3674
3675 return XGE_HAL_OK;
3676 }
3677
3678 /*
3679 * __hal_device_handle_txdma - Handle TxDMA interrupt reason
3680 * @hldev: HAL device handle.
3681 * @reason: interrupt reason
3682 */
3683 xge_hal_status_e
__hal_device_handle_txdma(xge_hal_device_t * hldev,u64 reason)3684 __hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason)
3685 {
3686 xge_hal_pci_bar0_t *isrbar0 =
3687 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3688 u64 val64, temp64, err;
3689
3690 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3691 &isrbar0->txdma_int_status);
3692 if (val64 & XGE_HAL_TXDMA_PFC_INT) {
3693 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3694 &isrbar0->pfc_err_reg);
3695 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3696 err, &isrbar0->pfc_err_reg);
3697 hldev->stats.sw_dev_info_stats.pfc_err_cnt++;
3698 temp64 = XGE_HAL_PFC_ECC_DB_ERR|XGE_HAL_PFC_SM_ERR_ALARM
3699 |XGE_HAL_PFC_MISC_0_ERR|XGE_HAL_PFC_MISC_1_ERR
3700 |XGE_HAL_PFC_PCIX_ERR;
3701 if (val64 & temp64)
3702 goto reset;
3703 }
3704 if (val64 & XGE_HAL_TXDMA_TDA_INT) {
3705 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3706 &isrbar0->tda_err_reg);
3707 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3708 err, &isrbar0->tda_err_reg);
3709 hldev->stats.sw_dev_info_stats.tda_err_cnt++;
3710 temp64 = XGE_HAL_TDA_Fn_ECC_DB_ERR|XGE_HAL_TDA_SM0_ERR_ALARM
3711 |XGE_HAL_TDA_SM1_ERR_ALARM;
3712 if (val64 & temp64)
3713 goto reset;
3714 }
3715 if (val64 & XGE_HAL_TXDMA_PCC_INT) {
3716 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3717 &isrbar0->pcc_err_reg);
3718 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3719 err, &isrbar0->pcc_err_reg);
3720 hldev->stats.sw_dev_info_stats.pcc_err_cnt++;
3721 temp64 = XGE_HAL_PCC_FB_ECC_DB_ERR|XGE_HAL_PCC_TXB_ECC_DB_ERR
3722 |XGE_HAL_PCC_SM_ERR_ALARM|XGE_HAL_PCC_WR_ERR_ALARM
3723 |XGE_HAL_PCC_N_SERR|XGE_HAL_PCC_6_COF_OV_ERR
3724 |XGE_HAL_PCC_7_COF_OV_ERR|XGE_HAL_PCC_6_LSO_OV_ERR
3725 |XGE_HAL_PCC_7_LSO_OV_ERR;
3726 if (val64 & temp64)
3727 goto reset;
3728 }
3729 if (val64 & XGE_HAL_TXDMA_TTI_INT) {
3730 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3731 &isrbar0->tti_err_reg);
3732 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3733 err, &isrbar0->tti_err_reg);
3734 hldev->stats.sw_dev_info_stats.tti_err_cnt++;
3735 temp64 = XGE_HAL_TTI_SM_ERR_ALARM;
3736 if (val64 & temp64)
3737 goto reset;
3738 }
3739 if (val64 & XGE_HAL_TXDMA_LSO_INT) {
3740 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3741 &isrbar0->lso_err_reg);
3742 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3743 err, &isrbar0->lso_err_reg);
3744 hldev->stats.sw_dev_info_stats.lso_err_cnt++;
3745 temp64 = XGE_HAL_LSO6_ABORT|XGE_HAL_LSO7_ABORT
3746 |XGE_HAL_LSO6_SM_ERR_ALARM|XGE_HAL_LSO7_SM_ERR_ALARM;
3747 if (val64 & temp64)
3748 goto reset;
3749 }
3750 if (val64 & XGE_HAL_TXDMA_TPA_INT) {
3751 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3752 &isrbar0->tpa_err_reg);
3753 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3754 err, &isrbar0->tpa_err_reg);
3755 hldev->stats.sw_dev_info_stats.tpa_err_cnt++;
3756 temp64 = XGE_HAL_TPA_SM_ERR_ALARM;
3757 if (val64 & temp64)
3758 goto reset;
3759 }
3760 if (val64 & XGE_HAL_TXDMA_SM_INT) {
3761 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3762 &isrbar0->sm_err_reg);
3763 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3764 err, &isrbar0->sm_err_reg);
3765 hldev->stats.sw_dev_info_stats.sm_err_cnt++;
3766 temp64 = XGE_HAL_SM_SM_ERR_ALARM;
3767 if (val64 & temp64)
3768 goto reset;
3769 }
3770
3771 return XGE_HAL_OK;
3772
3773 reset : (void) xge_hal_device_reset(hldev);
3774 (void) xge_hal_device_enable(hldev);
3775 xge_hal_device_intr_enable(hldev);
3776 return XGE_HAL_OK;
3777 }
3778
3779 /*
3780 * __hal_device_handle_txmac - Handle TxMAC interrupt reason
3781 * @hldev: HAL device handle.
3782 * @reason: interrupt reason
3783 */
3784 xge_hal_status_e
__hal_device_handle_txmac(xge_hal_device_t * hldev,u64 reason)3785 __hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason)
3786 {
3787 xge_hal_pci_bar0_t *isrbar0 =
3788 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3789 u64 val64, temp64;
3790
3791 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3792 &isrbar0->mac_int_status);
3793 if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT))
3794 return XGE_HAL_OK;
3795
3796 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3797 &isrbar0->mac_tmac_err_reg);
3798 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3799 val64, &isrbar0->mac_tmac_err_reg);
3800 hldev->stats.sw_dev_info_stats.mac_tmac_err_cnt++;
3801 temp64 = XGE_HAL_TMAC_TX_BUF_OVRN|XGE_HAL_TMAC_TX_SM_ERR;
3802 if (val64 & temp64) {
3803 (void) xge_hal_device_reset(hldev);
3804 (void) xge_hal_device_enable(hldev);
3805 xge_hal_device_intr_enable(hldev);
3806 }
3807
3808 return XGE_HAL_OK;
3809 }
3810
3811 /*
3812 * __hal_device_handle_txxgxs - Handle TxXGXS interrupt reason
3813 * @hldev: HAL device handle.
3814 * @reason: interrupt reason
3815 */
3816 xge_hal_status_e
__hal_device_handle_txxgxs(xge_hal_device_t * hldev,u64 reason)3817 __hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason)
3818 {
3819 xge_hal_pci_bar0_t *isrbar0 =
3820 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3821 u64 val64, temp64;
3822
3823 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3824 &isrbar0->xgxs_int_status);
3825 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_TXGXS))
3826 return XGE_HAL_OK;
3827
3828 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3829 &isrbar0->xgxs_txgxs_err_reg);
3830 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3831 val64, &isrbar0->xgxs_txgxs_err_reg);
3832 hldev->stats.sw_dev_info_stats.xgxs_txgxs_err_cnt++;
3833 temp64 = XGE_HAL_TXGXS_ESTORE_UFLOW|XGE_HAL_TXGXS_TX_SM_ERR;
3834 if (val64 & temp64) {
3835 (void) xge_hal_device_reset(hldev);
3836 (void) xge_hal_device_enable(hldev);
3837 xge_hal_device_intr_enable(hldev);
3838 }
3839
3840 return XGE_HAL_OK;
3841 }
3842
3843 /*
3844 * __hal_device_handle_rxpic - Handle RxPIC interrupt reason
3845 * @hldev: HAL device handle.
3846 * @reason: interrupt reason
3847 */
3848 xge_hal_status_e
__hal_device_handle_rxpic(xge_hal_device_t * hldev,u64 reason)3849 __hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason)
3850 {
3851 /* FIXME: handle register */
3852
3853 return XGE_HAL_OK;
3854 }
3855
3856 /*
3857 * __hal_device_handle_rxdma - Handle RxDMA interrupt reason
3858 * @hldev: HAL device handle.
3859 * @reason: interrupt reason
3860 */
3861 xge_hal_status_e
__hal_device_handle_rxdma(xge_hal_device_t * hldev,u64 reason)3862 __hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason)
3863 {
3864 xge_hal_pci_bar0_t *isrbar0 =
3865 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3866 u64 val64, err, temp64;
3867
3868 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3869 &isrbar0->rxdma_int_status);
3870 if (val64 & XGE_HAL_RXDMA_RC_INT) {
3871 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3872 &isrbar0->rc_err_reg);
3873 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3874 err, &isrbar0->rc_err_reg);
3875 hldev->stats.sw_dev_info_stats.rc_err_cnt++;
3876 temp64 = XGE_HAL_RC_PRCn_ECC_DB_ERR|XGE_HAL_RC_FTC_ECC_DB_ERR
3877 |XGE_HAL_RC_PRCn_SM_ERR_ALARM
3878 |XGE_HAL_RC_FTC_SM_ERR_ALARM;
3879 if (val64 & temp64)
3880 goto reset;
3881 }
3882 if (val64 & XGE_HAL_RXDMA_RPA_INT) {
3883 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3884 &isrbar0->rpa_err_reg);
3885 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3886 err, &isrbar0->rpa_err_reg);
3887 hldev->stats.sw_dev_info_stats.rpa_err_cnt++;
3888 temp64 = XGE_HAL_RPA_SM_ERR_ALARM|XGE_HAL_RPA_CREDIT_ERR;
3889 if (val64 & temp64)
3890 goto reset;
3891 }
3892 if (val64 & XGE_HAL_RXDMA_RDA_INT) {
3893 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3894 &isrbar0->rda_err_reg);
3895 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3896 err, &isrbar0->rda_err_reg);
3897 hldev->stats.sw_dev_info_stats.rda_err_cnt++;
3898 temp64 = XGE_HAL_RDA_RXDn_ECC_DB_ERR
3899 |XGE_HAL_RDA_FRM_ECC_DB_N_AERR
3900 |XGE_HAL_RDA_SM1_ERR_ALARM|XGE_HAL_RDA_SM0_ERR_ALARM
3901 |XGE_HAL_RDA_RXD_ECC_DB_SERR;
3902 if (val64 & temp64)
3903 goto reset;
3904 }
3905 if (val64 & XGE_HAL_RXDMA_RTI_INT) {
3906 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3907 &isrbar0->rti_err_reg);
3908 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3909 err, &isrbar0->rti_err_reg);
3910 hldev->stats.sw_dev_info_stats.rti_err_cnt++;
3911 temp64 = XGE_HAL_RTI_SM_ERR_ALARM;
3912 if (val64 & temp64)
3913 goto reset;
3914 }
3915
3916 return XGE_HAL_OK;
3917
3918 reset : (void) xge_hal_device_reset(hldev);
3919 (void) xge_hal_device_enable(hldev);
3920 xge_hal_device_intr_enable(hldev);
3921 return XGE_HAL_OK;
3922 }
3923
3924 /*
3925 * __hal_device_handle_rxmac - Handle RxMAC interrupt reason
3926 * @hldev: HAL device handle.
3927 * @reason: interrupt reason
3928 */
3929 xge_hal_status_e
__hal_device_handle_rxmac(xge_hal_device_t * hldev,u64 reason)3930 __hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason)
3931 {
3932 xge_hal_pci_bar0_t *isrbar0 =
3933 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3934 u64 val64, temp64;
3935
3936 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3937 &isrbar0->mac_int_status);
3938 if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT))
3939 return XGE_HAL_OK;
3940
3941 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3942 &isrbar0->mac_rmac_err_reg);
3943 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3944 val64, &isrbar0->mac_rmac_err_reg);
3945 hldev->stats.sw_dev_info_stats.mac_rmac_err_cnt++;
3946 temp64 = XGE_HAL_RMAC_RX_BUFF_OVRN|XGE_HAL_RMAC_RX_SM_ERR;
3947 if (val64 & temp64) {
3948 (void) xge_hal_device_reset(hldev);
3949 (void) xge_hal_device_enable(hldev);
3950 xge_hal_device_intr_enable(hldev);
3951 }
3952
3953 return XGE_HAL_OK;
3954 }
3955
3956 /*
3957 * __hal_device_handle_rxxgxs - Handle RxXGXS interrupt reason
3958 * @hldev: HAL device handle.
3959 * @reason: interrupt reason
3960 */
3961 xge_hal_status_e
__hal_device_handle_rxxgxs(xge_hal_device_t * hldev,u64 reason)3962 __hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason)
3963 {
3964 xge_hal_pci_bar0_t *isrbar0 =
3965 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3966 u64 val64, temp64;
3967
3968 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3969 &isrbar0->xgxs_int_status);
3970 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_RXGXS))
3971 return XGE_HAL_OK;
3972
3973 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3974 &isrbar0->xgxs_rxgxs_err_reg);
3975 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3976 val64, &isrbar0->xgxs_rxgxs_err_reg);
3977 hldev->stats.sw_dev_info_stats.xgxs_rxgxs_err_cnt++;
3978 temp64 = XGE_HAL_RXGXS_ESTORE_OFLOW|XGE_HAL_RXGXS_RX_SM_ERR;
3979 if (val64 & temp64) {
3980 (void) xge_hal_device_reset(hldev);
3981 (void) xge_hal_device_enable(hldev);
3982 xge_hal_device_intr_enable(hldev);
3983 }
3984
3985 return XGE_HAL_OK;
3986 }
3987
3988 /**
3989 * xge_hal_device_enable - Enable device.
3990 * @hldev: HAL device handle.
3991 *
3992 * Enable the specified device: bring up the link/interface.
3993 * Returns: XGE_HAL_OK - success.
3994 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device
3995 * to a "quiescent" state.
3996 *
3997 * See also: xge_hal_status_e{}.
3998 *
3999 * Usage: See ex_open{}.
4000 */
4001 xge_hal_status_e
xge_hal_device_enable(xge_hal_device_t * hldev)4002 xge_hal_device_enable(xge_hal_device_t *hldev)
4003 {
4004 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4005 u64 val64;
4006 u64 adp_status;
4007 int i, j;
4008
4009 if (!hldev->hw_is_initialized) {
4010 xge_hal_status_e status;
4011
4012 status = __hal_device_hw_initialize(hldev);
4013 if (status != XGE_HAL_OK) {
4014 return status;
4015 }
4016 }
4017
4018 /*
4019 * Not needed in most cases, i.e.
4020 * when device_disable() is followed by reset -
4021 * the latter copies back PCI config space, along with
4022 * the bus mastership - see __hal_device_reset().
4023 * However, there are/may-in-future be other cases, and
4024 * does not hurt.
4025 */
4026 __hal_device_bus_master_enable(hldev);
4027
4028 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4029 /*
4030 * Configure the link stability period.
4031 */
4032 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4033 &bar0->misc_control);
4034 if (hldev->config.link_stability_period !=
4035 XGE_HAL_DEFAULT_USE_HARDCODE) {
4036
4037 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
4038 hldev->config.link_stability_period);
4039 } else {
4040 /*
4041 * Use the link stability period 1 ms as default
4042 */
4043 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
4044 XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD);
4045 }
4046 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4047 val64, &bar0->misc_control);
4048
4049 /*
4050 * Clearing any possible Link up/down interrupts that
4051 * could have popped up just before Enabling the card.
4052 */
4053 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4054 &bar0->misc_int_reg);
4055 if (val64) {
4056 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4057 val64, &bar0->misc_int_reg);
4058 xge_debug_device(XGE_TRACE, "%s","link state cleared");
4059 }
4060 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
4061 /*
4062 * Clearing any possible Link state change interrupts that
4063 * could have popped up just before Enabling the card.
4064 */
4065 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4066 &bar0->mac_rmac_err_reg);
4067 if (val64) {
4068 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4069 val64, &bar0->mac_rmac_err_reg);
4070 xge_debug_device(XGE_TRACE, "%s", "link state cleared");
4071 }
4072 }
4073
4074 if (__hal_device_wait_quiescent(hldev, &val64)) {
4075 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4076 }
4077
4078 /* Enabling Laser. */
4079 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4080 &bar0->adapter_control);
4081 val64 |= XGE_HAL_ADAPTER_EOI_TX_ON;
4082 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4083 &bar0->adapter_control);
4084
4085 /* let link establish */
4086 xge_os_mdelay(1);
4087
4088 /* set link down untill poll() routine will set it up (maybe) */
4089 hldev->link_state = XGE_HAL_LINK_DOWN;
4090
4091 /* If link is UP (adpter is connected) then enable the adapter */
4092 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4093 &bar0->adapter_status);
4094 if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4095 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) {
4096 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4097 &bar0->adapter_control);
4098 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
4099 } else {
4100 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4101 &bar0->adapter_control);
4102 val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON |
4103 XGE_HAL_ADAPTER_LED_ON );
4104 }
4105
4106 val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN; /* adapter enable */
4107 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
4108 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64,
4109 &bar0->adapter_control);
4110
4111 /* We spin here waiting for the Link to come up.
4112 * This is the fix for the Link being unstable after the reset. */
4113 i = 0;
4114 j = 0;
4115 do
4116 {
4117 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4118 &bar0->adapter_status);
4119
4120 /* Read the adapter control register for Adapter_enable bit */
4121 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4122 &bar0->adapter_control);
4123 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4124 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) &&
4125 (val64 & XGE_HAL_ADAPTER_CNTL_EN)) {
4126 j++;
4127 if (j >= hldev->config.link_valid_cnt) {
4128 if (xge_hal_device_status(hldev, &adp_status) ==
4129 XGE_HAL_OK) {
4130 if (__hal_verify_pcc_idle(hldev,
4131 adp_status) != XGE_HAL_OK) {
4132 return
4133 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4134 }
4135 xge_debug_device(XGE_TRACE,
4136 "adp_status: "XGE_OS_LLXFMT
4137 ", link is up on "
4138 "adapter enable!",
4139 (unsigned long long)adp_status);
4140 val64 = xge_os_pio_mem_read64(
4141 hldev->pdev,
4142 hldev->regh0,
4143 &bar0->adapter_control);
4144 val64 = val64|
4145 (XGE_HAL_ADAPTER_EOI_TX_ON |
4146 XGE_HAL_ADAPTER_LED_ON );
4147 xge_os_pio_mem_write64(hldev->pdev,
4148 hldev->regh0, val64,
4149 &bar0->adapter_control);
4150 xge_os_mdelay(1);
4151
4152 val64 = xge_os_pio_mem_read64(
4153 hldev->pdev,
4154 hldev->regh0,
4155 &bar0->adapter_control);
4156 break; /* out of for loop */
4157 } else {
4158 return
4159 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4160 }
4161 }
4162 } else {
4163 j = 0; /* Reset the count */
4164 /* Turn on the Laser */
4165 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4166 &bar0->adapter_control);
4167 val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON;
4168 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0,
4169 val64, &bar0->adapter_control);
4170
4171 xge_os_mdelay(1);
4172
4173 /* Now re-enable it as due to noise, hardware
4174 * turned it off */
4175 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4176 &bar0->adapter_control);
4177 val64 |= XGE_HAL_ADAPTER_CNTL_EN;
4178 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/
4179 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4180 &bar0->adapter_control);
4181 }
4182 xge_os_mdelay(1); /* Sleep for 1 msec */
4183 i++;
4184 } while (i < hldev->config.link_retry_cnt);
4185
4186 __hal_device_led_actifity_fix(hldev);
4187
4188 #ifndef XGE_HAL_PROCESS_LINK_INT_IN_ISR
4189 /* Here we are performing soft reset on XGXS to force link down.
4190 * Since link is already up, we will get link state change
4191 * poll notificatoin after adapter is enabled */
4192
4193 __hal_serial_mem_write64(hldev, 0x80010515001E0000ULL,
4194 &bar0->dtx_control);
4195 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4196
4197 __hal_serial_mem_write64(hldev, 0x80010515001E00E0ULL,
4198 &bar0->dtx_control);
4199 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4200
4201 __hal_serial_mem_write64(hldev, 0x80070515001F00E4ULL,
4202 &bar0->dtx_control);
4203 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4204
4205 xge_os_mdelay(100); /* Sleep for 500 msec */
4206 #else
4207 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
4208 #endif
4209 {
4210 /*
4211 * With some switches the link state change interrupt does not
4212 * occur even though the xgxs reset is done as per SPN-006. So,
4213 * poll the adapter status register and check if the link state
4214 * is ok.
4215 */
4216 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4217 &bar0->adapter_status);
4218 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4219 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
4220 {
4221 xge_debug_device(XGE_TRACE, "%s",
4222 "enable device causing link state change ind..");
4223 (void) __hal_device_handle_link_state_change(hldev);
4224 }
4225 }
4226
4227 if (hldev->config.stats_refresh_time_sec !=
4228 XGE_HAL_STATS_REFRESH_DISABLE)
4229 __hal_stats_enable(&hldev->stats);
4230
4231 return XGE_HAL_OK;
4232 }
4233
4234 /**
4235 * xge_hal_device_disable - Disable Xframe adapter.
4236 * @hldev: Device handle.
4237 *
4238 * Disable this device. To gracefully reset the adapter, the host should:
4239 *
4240 * - call xge_hal_device_disable();
4241 *
4242 * - call xge_hal_device_intr_disable();
4243 *
4244 * - close all opened channels and clean up outstanding resources;
4245 *
4246 * - do some work (error recovery, change mtu, reset, etc);
4247 *
4248 * - call xge_hal_device_enable();
4249 *
4250 * - open channels, replenish RxDs, etc.
4251 *
4252 * - call xge_hal_device_intr_enable().
4253 *
4254 * Note: Disabling the device does _not_ include disabling of interrupts.
4255 * After disabling the device stops receiving new frames but those frames
4256 * that were already in the pipe will keep coming for some few milliseconds.
4257 *
4258 * Returns: XGE_HAL_OK - success.
4259 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
4260 * a "quiescent" state.
4261 *
4262 * See also: xge_hal_status_e{}.
4263 */
4264 xge_hal_status_e
xge_hal_device_disable(xge_hal_device_t * hldev)4265 xge_hal_device_disable(xge_hal_device_t *hldev)
4266 {
4267 xge_hal_status_e status = XGE_HAL_OK;
4268 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4269 u64 val64;
4270
4271 xge_debug_device(XGE_TRACE, "%s", "turn off laser, cleanup hardware");
4272
4273 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4274 &bar0->adapter_control);
4275 val64 = val64 & (~XGE_HAL_ADAPTER_CNTL_EN);
4276 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4277 &bar0->adapter_control);
4278
4279 if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) {
4280 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4281 }
4282
4283 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
4284 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
4285 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4286 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
4287 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4288 }
4289
4290 if (hldev->config.stats_refresh_time_sec !=
4291 XGE_HAL_STATS_REFRESH_DISABLE)
4292 __hal_stats_disable(&hldev->stats);
4293 #ifdef XGE_DEBUG_ASSERT
4294 else
4295 xge_assert(!hldev->stats.is_enabled);
4296 #endif
4297
4298 #ifndef XGE_HAL_DONT_DISABLE_BUS_MASTER_ON_STOP
4299 __hal_device_bus_master_disable(hldev);
4300 #endif
4301
4302 return status;
4303 }
4304
4305 /**
4306 * xge_hal_device_reset - Reset device.
4307 * @hldev: HAL device handle.
4308 *
4309 * Soft-reset the device, reset the device stats except reset_cnt.
4310 *
4311 * After reset is done, will try to re-initialize HW.
4312 *
4313 * Returns: XGE_HAL_OK - success.
4314 * XGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized.
4315 * XGE_HAL_ERR_RESET_FAILED - Reset failed.
4316 *
4317 * See also: xge_hal_status_e{}.
4318 */
4319 xge_hal_status_e
xge_hal_device_reset(xge_hal_device_t * hldev)4320 xge_hal_device_reset(xge_hal_device_t *hldev)
4321 {
4322 xge_hal_status_e status;
4323
4324 /* increment the soft reset counter */
4325 u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt;
4326
4327 xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt);
4328
4329 if (!hldev->is_initialized)
4330 return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED;
4331
4332 /* actual "soft" reset of the adapter */
4333 status = __hal_device_reset(hldev);
4334
4335 /* reset all stats including saved */
4336 __hal_stats_soft_reset(hldev, 1);
4337
4338 /* increment reset counter */
4339 hldev->stats.sw_dev_info_stats.soft_reset_cnt = reset_cnt + 1;
4340
4341 /* re-initialize rxufca_intr_thres */
4342 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
4343
4344 hldev->reset_needed_after_close = 0;
4345
4346 return status;
4347 }
4348
4349 /**
4350 * xge_hal_device_status - Check whether Xframe hardware is ready for
4351 * operation.
4352 * @hldev: HAL device handle.
4353 * @hw_status: Xframe status register. Returned by HAL.
4354 *
4355 * Check whether Xframe hardware is ready for operation.
4356 * The checking includes TDMA, RDMA, PFC, PIC, MC_DRAM, and the rest
4357 * hardware functional blocks.
4358 *
4359 * Returns: XGE_HAL_OK if the device is ready for operation. Otherwise
4360 * returns XGE_HAL_FAIL. Also, fills in adapter status (in @hw_status).
4361 *
4362 * See also: xge_hal_status_e{}.
4363 * Usage: See ex_open{}.
4364 */
4365 xge_hal_status_e
xge_hal_device_status(xge_hal_device_t * hldev,u64 * hw_status)4366 xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status)
4367 {
4368 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4369 u64 tmp64;
4370
4371 tmp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4372 &bar0->adapter_status);
4373
4374 *hw_status = tmp64;
4375
4376 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) {
4377 xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!");
4378 return XGE_HAL_FAIL;
4379 }
4380 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) {
4381 xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!");
4382 return XGE_HAL_FAIL;
4383 }
4384 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) {
4385 xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!");
4386 return XGE_HAL_FAIL;
4387 }
4388 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
4389 xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!");
4390 return XGE_HAL_FAIL;
4391 }
4392 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) {
4393 xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!");
4394 return XGE_HAL_FAIL;
4395 }
4396 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) {
4397 xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!");
4398 return XGE_HAL_FAIL;
4399 }
4400 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) {
4401 xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!");
4402 return XGE_HAL_FAIL;
4403 }
4404 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) {
4405 xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!");
4406 return XGE_HAL_FAIL;
4407 }
4408 #ifndef XGE_HAL_HERC_EMULATION
4409 /*
4410 * Andrew: in PCI 33 mode, the P_PLL is not used, and therefore,
4411 * the the P_PLL_LOCK bit in the adapter_status register will
4412 * not be asserted.
4413 */
4414 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK) &&
4415 xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
4416 hldev->pci_mode != XGE_HAL_PCI_33MHZ_MODE) {
4417 xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!");
4418 return XGE_HAL_FAIL;
4419 }
4420 #endif
4421
4422 return XGE_HAL_OK;
4423 }
4424
4425 void
__hal_device_msi_intr_endis(xge_hal_device_t * hldev,int flag)4426 __hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag)
4427 {
4428 u16 msi_control_reg;
4429
4430 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
4431 xge_offsetof(xge_hal_pci_config_le_t,
4432 msi_control), &msi_control_reg);
4433
4434 if (flag)
4435 msi_control_reg |= 0x1;
4436 else
4437 msi_control_reg &= ~0x1;
4438
4439 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
4440 xge_offsetof(xge_hal_pci_config_le_t,
4441 msi_control), msi_control_reg);
4442 }
4443
4444 void
__hal_device_msix_intr_endis(xge_hal_device_t * hldev,xge_hal_channel_t * channel,int flag)4445 __hal_device_msix_intr_endis(xge_hal_device_t *hldev,
4446 xge_hal_channel_t *channel, int flag)
4447 {
4448 u64 val64;
4449 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
4450
4451 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4452 &bar0->xmsi_mask_reg);
4453
4454 if (flag)
4455 val64 &= ~(1LL << ( 63 - channel->msix_idx ));
4456 else
4457 val64 |= (1LL << ( 63 - channel->msix_idx ));
4458 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4459 &bar0->xmsi_mask_reg);
4460 }
4461
4462 /**
4463 * xge_hal_device_intr_enable - Enable Xframe interrupts.
4464 * @hldev: HAL device handle.
4465 * @op: One of the xge_hal_device_intr_e enumerated values specifying
4466 * the type(s) of interrupts to enable.
4467 *
4468 * Enable Xframe interrupts. The function is to be executed the last in
4469 * Xframe initialization sequence.
4470 *
4471 * See also: xge_hal_device_intr_disable()
4472 */
4473 void
xge_hal_device_intr_enable(xge_hal_device_t * hldev)4474 xge_hal_device_intr_enable(xge_hal_device_t *hldev)
4475 {
4476 xge_list_t *item;
4477 u64 val64;
4478
4479 /* PRC initialization and configuration */
4480 xge_list_for_each(item, &hldev->ring_channels) {
4481 xge_hal_channel_h channel;
4482 channel = xge_container_of(item, xge_hal_channel_t, item);
4483 __hal_ring_prc_enable(channel);
4484 }
4485
4486 /* enable traffic only interrupts */
4487 if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) {
4488 /*
4489 * make sure all interrupts going to be disabled if MSI
4490 * is enabled.
4491 */
4492 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
4493 __hal_device_intr_mgmt(hldev, XGE_HAL_TX_PIC_INTR, 1);
4494 #else
4495 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
4496 #endif
4497 } else {
4498 /*
4499 * Enable the Tx traffic interrupts only if the TTI feature is
4500 * enabled.
4501 */
4502 val64 = 0;
4503 if (hldev->tti_enabled)
4504 val64 = XGE_HAL_TX_TRAFFIC_INTR;
4505
4506 if (!hldev->config.bimodal_interrupts)
4507 val64 |= XGE_HAL_RX_TRAFFIC_INTR;
4508
4509 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
4510 val64 |= XGE_HAL_RX_TRAFFIC_INTR;
4511
4512 val64 |=XGE_HAL_TX_PIC_INTR |
4513 XGE_HAL_MC_INTR |
4514 XGE_HAL_TX_DMA_INTR |
4515 (hldev->config.sched_timer_us !=
4516 XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0);
4517 __hal_device_intr_mgmt(hldev, val64, 1);
4518 }
4519
4520 /*
4521 * Enable MSI-X interrupts
4522 */
4523 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
4524
4525 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4526 /*
4527 * To enable MSI-X, MSI also needs to be enabled,
4528 * due to a bug in the herc NIC.
4529 */
4530 __hal_device_msi_intr_endis(hldev, 1);
4531 }
4532
4533
4534 /* Enable the MSI-X interrupt for each configured channel */
4535 xge_list_for_each(item, &hldev->fifo_channels) {
4536 xge_hal_channel_t *channel;
4537
4538 channel = xge_container_of(item,
4539 xge_hal_channel_t, item);
4540
4541 /* 0 vector is reserved for alarms */
4542 if (!channel->msix_idx)
4543 continue;
4544
4545 __hal_device_msix_intr_endis(hldev, channel, 1);
4546 }
4547
4548 xge_list_for_each(item, &hldev->ring_channels) {
4549 xge_hal_channel_t *channel;
4550
4551 channel = xge_container_of(item,
4552 xge_hal_channel_t, item);
4553
4554 /* 0 vector is reserved for alarms */
4555 if (!channel->msix_idx)
4556 continue;
4557
4558 __hal_device_msix_intr_endis(hldev, channel, 1);
4559 }
4560 }
4561
4562 xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled");
4563 }
4564
4565
4566 /**
4567 * xge_hal_device_intr_disable - Disable Xframe interrupts.
4568 * @hldev: HAL device handle.
4569 * @op: One of the xge_hal_device_intr_e enumerated values specifying
4570 * the type(s) of interrupts to disable.
4571 *
4572 * Disable Xframe interrupts.
4573 *
4574 * See also: xge_hal_device_intr_enable()
4575 */
4576 void
xge_hal_device_intr_disable(xge_hal_device_t * hldev)4577 xge_hal_device_intr_disable(xge_hal_device_t *hldev)
4578 {
4579 xge_list_t *item;
4580 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4581 u64 val64;
4582
4583 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
4584
4585 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4586 /*
4587 * To disable MSI-X, MSI also needs to be disabled,
4588 * due to a bug in the herc NIC.
4589 */
4590 __hal_device_msi_intr_endis(hldev, 0);
4591 }
4592
4593 /* Disable the MSI-X interrupt for each configured channel */
4594 xge_list_for_each(item, &hldev->fifo_channels) {
4595 xge_hal_channel_t *channel;
4596
4597 channel = xge_container_of(item,
4598 xge_hal_channel_t, item);
4599
4600 /* 0 vector is reserved for alarms */
4601 if (!channel->msix_idx)
4602 continue;
4603
4604 __hal_device_msix_intr_endis(hldev, channel, 0);
4605
4606 }
4607
4608 xge_os_pio_mem_write64(hldev->pdev,
4609 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL,
4610 &bar0->tx_traffic_mask);
4611
4612 xge_list_for_each(item, &hldev->ring_channels) {
4613 xge_hal_channel_t *channel;
4614
4615 channel = xge_container_of(item,
4616 xge_hal_channel_t, item);
4617
4618 /* 0 vector is reserved for alarms */
4619 if (!channel->msix_idx)
4620 continue;
4621
4622 __hal_device_msix_intr_endis(hldev, channel, 0);
4623 }
4624
4625 xge_os_pio_mem_write64(hldev->pdev,
4626 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL,
4627 &bar0->rx_traffic_mask);
4628 }
4629
4630 /*
4631 * Disable traffic only interrupts.
4632 * Tx traffic interrupts are used only if the TTI feature is
4633 * enabled.
4634 */
4635 val64 = 0;
4636 if (hldev->tti_enabled)
4637 val64 = XGE_HAL_TX_TRAFFIC_INTR;
4638
4639 val64 |= XGE_HAL_RX_TRAFFIC_INTR |
4640 XGE_HAL_TX_PIC_INTR |
4641 XGE_HAL_MC_INTR |
4642 (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ?
4643 XGE_HAL_SCHED_INTR : 0);
4644 __hal_device_intr_mgmt(hldev, val64, 0);
4645
4646 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4647 0xFFFFFFFFFFFFFFFFULL,
4648 &bar0->general_int_mask);
4649
4650
4651 /* disable all configured PRCs */
4652 xge_list_for_each(item, &hldev->ring_channels) {
4653 xge_hal_channel_h channel;
4654 channel = xge_container_of(item, xge_hal_channel_t, item);
4655 __hal_ring_prc_disable(channel);
4656 }
4657
4658 xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled");
4659 }
4660
4661
4662 /**
4663 * xge_hal_device_mcast_enable - Enable Xframe multicast addresses.
4664 * @hldev: HAL device handle.
4665 *
4666 * Enable Xframe multicast addresses.
4667 * Returns: XGE_HAL_OK on success.
4668 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to enable mcast
4669 * feature within the time(timeout).
4670 *
4671 * See also: xge_hal_device_mcast_disable(), xge_hal_status_e{}.
4672 */
4673 xge_hal_status_e
xge_hal_device_mcast_enable(xge_hal_device_t * hldev)4674 xge_hal_device_mcast_enable(xge_hal_device_t *hldev)
4675 {
4676 u64 val64;
4677 xge_hal_pci_bar0_t *bar0;
4678 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
4679
4680 if (hldev == NULL)
4681 return XGE_HAL_ERR_INVALID_DEVICE;
4682
4683 if (hldev->mcast_refcnt)
4684 return XGE_HAL_OK;
4685
4686 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
4687 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
4688
4689 hldev->mcast_refcnt = 1;
4690
4691 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4692
4693 /* Enable all Multicast addresses */
4694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4695 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0x010203040506ULL),
4696 &bar0->rmac_addr_data0_mem);
4697 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4698 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL),
4699 &bar0->rmac_addr_data1_mem);
4700 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4701 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4702 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
4703 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4704 &bar0->rmac_addr_cmd_mem);
4705
4706 if (__hal_device_register_poll(hldev,
4707 &bar0->rmac_addr_cmd_mem, 0,
4708 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4709 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4710 /* upper layer may require to repeat */
4711 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4712 }
4713
4714 return XGE_HAL_OK;
4715 }
4716
4717 /**
4718 * xge_hal_device_mcast_disable - Disable Xframe multicast addresses.
4719 * @hldev: HAL device handle.
4720 *
4721 * Disable Xframe multicast addresses.
4722 * Returns: XGE_HAL_OK - success.
4723 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to disable mcast
4724 * feature within the time(timeout).
4725 *
4726 * See also: xge_hal_device_mcast_enable(), xge_hal_status_e{}.
4727 */
4728 xge_hal_status_e
xge_hal_device_mcast_disable(xge_hal_device_t * hldev)4729 xge_hal_device_mcast_disable(xge_hal_device_t *hldev)
4730 {
4731 u64 val64;
4732 xge_hal_pci_bar0_t *bar0;
4733 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
4734
4735 if (hldev == NULL)
4736 return XGE_HAL_ERR_INVALID_DEVICE;
4737
4738 if (hldev->mcast_refcnt == 0)
4739 return XGE_HAL_OK;
4740
4741 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
4742 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
4743
4744 hldev->mcast_refcnt = 0;
4745
4746 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4747
4748 /* Disable all Multicast addresses */
4749 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4750 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL),
4751 &bar0->rmac_addr_data0_mem);
4752 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4753 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0),
4754 &bar0->rmac_addr_data1_mem);
4755
4756 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4757 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4758 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
4759 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4760 &bar0->rmac_addr_cmd_mem);
4761
4762 if (__hal_device_register_poll(hldev,
4763 &bar0->rmac_addr_cmd_mem, 0,
4764 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4765 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4766 /* upper layer may require to repeat */
4767 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4768 }
4769
4770 return XGE_HAL_OK;
4771 }
4772
4773 /**
4774 * xge_hal_device_promisc_enable - Enable promiscuous mode.
4775 * @hldev: HAL device handle.
4776 *
4777 * Enable promiscuous mode of Xframe operation.
4778 *
4779 * See also: xge_hal_device_promisc_disable().
4780 */
4781 void
xge_hal_device_promisc_enable(xge_hal_device_t * hldev)4782 xge_hal_device_promisc_enable(xge_hal_device_t *hldev)
4783 {
4784 u64 val64;
4785 xge_hal_pci_bar0_t *bar0;
4786
4787 xge_assert(hldev);
4788
4789 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4790
4791 if (!hldev->is_promisc) {
4792 /* Put the NIC into promiscuous mode */
4793 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4794 &bar0->mac_cfg);
4795 val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
4796
4797 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4798 XGE_HAL_RMAC_CFG_KEY(0x4C0D),
4799 &bar0->rmac_cfg_key);
4800
4801 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
4802 (u32)(val64 >> 32),
4803 &bar0->mac_cfg);
4804
4805 hldev->is_promisc = 1;
4806 xge_debug_device(XGE_TRACE,
4807 "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled",
4808 (unsigned long long)val64);
4809 }
4810 }
4811
4812 /**
4813 * xge_hal_device_promisc_disable - Disable promiscuous mode.
4814 * @hldev: HAL device handle.
4815 *
4816 * Disable promiscuous mode of Xframe operation.
4817 *
4818 * See also: xge_hal_device_promisc_enable().
4819 */
4820 void
xge_hal_device_promisc_disable(xge_hal_device_t * hldev)4821 xge_hal_device_promisc_disable(xge_hal_device_t *hldev)
4822 {
4823 u64 val64;
4824 xge_hal_pci_bar0_t *bar0;
4825
4826 xge_assert(hldev);
4827
4828 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4829
4830 if (hldev->is_promisc) {
4831 /* Remove the NIC from promiscuous mode */
4832 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4833 &bar0->mac_cfg);
4834 val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
4835
4836 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4837 XGE_HAL_RMAC_CFG_KEY(0x4C0D),
4838 &bar0->rmac_cfg_key);
4839
4840 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
4841 (u32)(val64 >> 32),
4842 &bar0->mac_cfg);
4843
4844 hldev->is_promisc = 0;
4845 xge_debug_device(XGE_TRACE,
4846 "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled",
4847 (unsigned long long)val64);
4848 }
4849 }
4850
4851 /**
4852 * xge_hal_device_macaddr_get - Get MAC addresses.
4853 * @hldev: HAL device handle.
4854 * @index: MAC address index, in the range from 0 to
4855 * XGE_HAL_MAX_MAC_ADDRESSES.
4856 * @macaddr: MAC address. Returned by HAL.
4857 *
4858 * Retrieve one of the stored MAC addresses by reading non-volatile
4859 * memory on the chip.
4860 *
4861 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
4862 *
4863 * Returns: XGE_HAL_OK - success.
4864 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
4865 * address within the time(timeout).
4866 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
4867 *
4868 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}.
4869 */
4870 xge_hal_status_e
xge_hal_device_macaddr_get(xge_hal_device_t * hldev,int index,macaddr_t * macaddr)4871 xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index,
4872 macaddr_t *macaddr)
4873 {
4874 xge_hal_pci_bar0_t *bar0;
4875 u64 val64;
4876 int i;
4877
4878 if (hldev == NULL) {
4879 return XGE_HAL_ERR_INVALID_DEVICE;
4880 }
4881
4882 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4883
4884 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) {
4885 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
4886 }
4887
4888 #ifdef XGE_HAL_HERC_EMULATION
4889 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000010000000000,
4890 &bar0->rmac_addr_data0_mem);
4891 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000,
4892 &bar0->rmac_addr_data1_mem);
4893 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
4894 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4895 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index));
4896 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4897 &bar0->rmac_addr_cmd_mem);
4898
4899 /* poll until done */
4900 __hal_device_register_poll(hldev,
4901 &bar0->rmac_addr_cmd_mem, 0,
4902 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD,
4903 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS);
4904
4905 #endif
4906
4907 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
4908 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4909 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
4910 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4911 &bar0->rmac_addr_cmd_mem);
4912
4913 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
4914 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4915 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4916 /* upper layer may require to repeat */
4917 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4918 }
4919
4920 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4921 &bar0->rmac_addr_data0_mem);
4922 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4923 (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8)));
4924 }
4925
4926 #ifdef XGE_HAL_HERC_EMULATION
4927 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4928 (*macaddr)[i] = (u8)0;
4929 }
4930 (*macaddr)[1] = (u8)1;
4931
4932 #endif
4933
4934 return XGE_HAL_OK;
4935 }
4936
4937 /**
4938 * xge_hal_device_macaddr_set - Set MAC address.
4939 * @hldev: HAL device handle.
4940 * @index: MAC address index, in the range from 0 to
4941 * XGE_HAL_MAX_MAC_ADDRESSES.
4942 * @macaddr: New MAC address to configure.
4943 *
4944 * Configure one of the available MAC address "slots".
4945 *
4946 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
4947 *
4948 * Returns: XGE_HAL_OK - success.
4949 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac
4950 * address within the time(timeout).
4951 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
4952 *
4953 * See also: xge_hal_device_macaddr_get(), xge_hal_status_e{}.
4954 */
4955 xge_hal_status_e
xge_hal_device_macaddr_set(xge_hal_device_t * hldev,int index,macaddr_t macaddr)4956 xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index,
4957 macaddr_t macaddr)
4958 {
4959 xge_hal_pci_bar0_t *bar0 =
4960 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4961 u64 val64, temp64;
4962 int i;
4963
4964 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES )
4965 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
4966
4967 temp64 = 0;
4968 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4969 temp64 |= macaddr[i];
4970 temp64 <<= 8;
4971 }
4972 temp64 >>= 8;
4973
4974 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4975 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64),
4976 &bar0->rmac_addr_data0_mem);
4977
4978 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4979 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4980 &bar0->rmac_addr_data1_mem);
4981
4982 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4983 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4984 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
4985
4986 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4987 &bar0->rmac_addr_cmd_mem);
4988
4989 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
4990 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4991 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4992 /* upper layer may require to repeat */
4993 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4994 }
4995
4996 return XGE_HAL_OK;
4997 }
4998
4999 /**
5000 * xge_hal_device_macaddr_clear - Set MAC address.
5001 * @hldev: HAL device handle.
5002 * @index: MAC address index, in the range from 0 to
5003 * XGE_HAL_MAX_MAC_ADDRESSES.
5004 *
5005 * Clear one of the available MAC address "slots".
5006 *
5007 * Returns: XGE_HAL_OK - success.
5008 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac
5009 * address within the time(timeout).
5010 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
5011 *
5012 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}.
5013 */
5014 xge_hal_status_e
xge_hal_device_macaddr_clear(xge_hal_device_t * hldev,int index)5015 xge_hal_device_macaddr_clear(xge_hal_device_t *hldev, int index)
5016 {
5017 xge_hal_status_e status;
5018 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
5019
5020 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
5021 if (status != XGE_HAL_OK) {
5022 xge_debug_device(XGE_ERR, "%s",
5023 "Not able to set the mac addr");
5024 return status;
5025 }
5026
5027 return XGE_HAL_OK;
5028 }
5029
5030 /**
5031 * xge_hal_device_macaddr_find - Finds index in the rmac table.
5032 * @hldev: HAL device handle.
5033 * @wanted: Wanted MAC address.
5034 *
5035 * See also: xge_hal_device_macaddr_set().
5036 */
5037 int
xge_hal_device_macaddr_find(xge_hal_device_t * hldev,macaddr_t wanted)5038 xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted)
5039 {
5040 int i;
5041 macaddr_t macaddr;
5042
5043 if (hldev == NULL) {
5044 return XGE_HAL_ERR_INVALID_DEVICE;
5045 }
5046
5047 for (i=0; i<XGE_HAL_MAX_MAC_ADDRESSES; i++) {
5048 (void) xge_hal_device_macaddr_get(hldev, i, &macaddr);
5049 if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) {
5050 return i;
5051 }
5052 }
5053
5054 return -1;
5055 }
5056
5057 /**
5058 * xge_hal_device_mtu_set - Set MTU.
5059 * @hldev: HAL device handle.
5060 * @new_mtu: New MTU size to configure.
5061 *
5062 * Set new MTU value. Example, to use jumbo frames:
5063 * xge_hal_device_mtu_set(my_device, my_channel, 9600);
5064 *
5065 * Returns: XGE_HAL_OK on success.
5066 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control
5067 * register.
5068 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to initialize TTI/RTI
5069 * schemes.
5070 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
5071 * a "quiescent" state.
5072 */
5073 xge_hal_status_e
xge_hal_device_mtu_set(xge_hal_device_t * hldev,int new_mtu)5074 xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu)
5075 {
5076 xge_hal_status_e status;
5077
5078 /*
5079 * reset needed if 1) new MTU differs, and
5080 * 2a) device was closed or
5081 * 2b) device is being upped for first time.
5082 */
5083 if (hldev->config.mtu != new_mtu) {
5084 if (hldev->reset_needed_after_close ||
5085 !hldev->mtu_first_time_set) {
5086 status = xge_hal_device_reset(hldev);
5087 if (status != XGE_HAL_OK) {
5088 xge_debug_device(XGE_TRACE, "%s",
5089 "fatal: can not reset the device");
5090 return status;
5091 }
5092 }
5093 /* store the new MTU in device, reset will use it */
5094 hldev->config.mtu = new_mtu;
5095 xge_debug_device(XGE_TRACE, "new MTU %d applied",
5096 new_mtu);
5097 }
5098
5099 if (!hldev->mtu_first_time_set)
5100 hldev->mtu_first_time_set = 1;
5101
5102 return XGE_HAL_OK;
5103 }
5104
5105 /**
5106 * xge_hal_device_initialize - Initialize Xframe device.
5107 * @hldev: HAL device handle.
5108 * @attr: pointer to xge_hal_device_attr_t structure
5109 * @device_config: Configuration to be _applied_ to the device,
5110 * For the Xframe configuration "knobs" please
5111 * refer to xge_hal_device_config_t and Xframe
5112 * User Guide.
5113 *
5114 * Initialize Xframe device. Note that all the arguments of this public API
5115 * are 'IN', including @hldev. Upper-layer driver (ULD) cooperates with
5116 * OS to find new Xframe device, locate its PCI and memory spaces.
5117 *
5118 * When done, the ULD allocates sizeof(xge_hal_device_t) bytes for HAL
5119 * to enable the latter to perform Xframe hardware initialization.
5120 *
5121 * Returns: XGE_HAL_OK - success.
5122 * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - Driver is not initialized.
5123 * XGE_HAL_ERR_BAD_DEVICE_CONFIG - Device configuration params are not
5124 * valid.
5125 * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed.
5126 * XGE_HAL_ERR_BAD_SUBSYSTEM_ID - Device subsystem id is invalid.
5127 * XGE_HAL_ERR_INVALID_MAC_ADDRESS - Device mac address in not valid.
5128 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
5129 * address within the time(timeout) or TTI/RTI initialization failed.
5130 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control.
5131 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT -Device is not queiscent.
5132 *
5133 * See also: xge_hal_device_terminate(), xge_hal_status_e{}
5134 * xge_hal_device_attr_t{}.
5135 */
5136 xge_hal_status_e
xge_hal_device_initialize(xge_hal_device_t * hldev,xge_hal_device_attr_t * attr,xge_hal_device_config_t * device_config)5137 xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
5138 xge_hal_device_config_t *device_config)
5139 {
5140 int i;
5141 xge_hal_status_e status;
5142 xge_hal_channel_t *channel;
5143 u16 subsys_device;
5144 u16 subsys_vendor;
5145 int total_dram_size, ring_auto_dram_cfg, left_dram_size;
5146 int total_dram_size_max = 0;
5147
5148 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing",
5149 (unsigned long long)(ulong_t)hldev);
5150
5151 /* sanity check */
5152 if (g_xge_hal_driver == NULL ||
5153 !g_xge_hal_driver->is_initialized) {
5154 return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED;
5155 }
5156
5157 xge_os_memzero(hldev, sizeof(xge_hal_device_t));
5158
5159 /*
5160 * validate a common part of Xframe-I/II configuration
5161 * (and run check_card() later, once PCI inited - see below)
5162 */
5163 status = __hal_device_config_check_common(device_config);
5164 if (status != XGE_HAL_OK)
5165 return status;
5166
5167 /* apply config */
5168 xge_os_memcpy(&hldev->config, device_config,
5169 sizeof(xge_hal_device_config_t));
5170
5171 /* save original attr */
5172 xge_os_memcpy(&hldev->orig_attr, attr,
5173 sizeof(xge_hal_device_attr_t));
5174
5175 /* initialize rxufca_intr_thres */
5176 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
5177
5178 hldev->regh0 = attr->regh0;
5179 hldev->regh1 = attr->regh1;
5180 hldev->regh2 = attr->regh2;
5181 hldev->isrbar0 = hldev->bar0 = attr->bar0;
5182 hldev->bar1 = attr->bar1;
5183 hldev->bar2 = attr->bar2;
5184 hldev->pdev = attr->pdev;
5185 hldev->irqh = attr->irqh;
5186 hldev->cfgh = attr->cfgh;
5187
5188 /* set initial bimodal timer for bimodal adaptive schema */
5189 hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us;
5190
5191 hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh,
5192 g_xge_hal_driver->config.queue_size_initial,
5193 g_xge_hal_driver->config.queue_size_max,
5194 __hal_device_event_queued, hldev);
5195 if (hldev->queueh == NULL)
5196 return XGE_HAL_ERR_OUT_OF_MEMORY;
5197
5198 hldev->magic = XGE_HAL_MAGIC;
5199
5200 xge_assert(hldev->regh0);
5201 xge_assert(hldev->regh1);
5202 xge_assert(hldev->bar0);
5203 xge_assert(hldev->bar1);
5204 xge_assert(hldev->pdev);
5205 xge_assert(hldev->irqh);
5206 xge_assert(hldev->cfgh);
5207
5208 /* initialize some PCI/PCI-X fields of this PCI device. */
5209 __hal_device_pci_init(hldev);
5210
5211 /*
5212 * initlialize lists to properly handling a potential
5213 * terminate request
5214 */
5215 xge_list_init(&hldev->free_channels);
5216 xge_list_init(&hldev->fifo_channels);
5217 xge_list_init(&hldev->ring_channels);
5218
5219 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
5220 /* fixups for xena */
5221 hldev->config.rth_en = 0;
5222 hldev->config.rth_spdm_en = 0;
5223 hldev->config.rts_mac_en = 0;
5224 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA;
5225
5226 status = __hal_device_config_check_xena(device_config);
5227 if (status != XGE_HAL_OK) {
5228 xge_hal_device_terminate(hldev);
5229 return status;
5230 }
5231 if (hldev->config.bimodal_interrupts == 1) {
5232 xge_hal_device_terminate(hldev);
5233 return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED;
5234 } else if (hldev->config.bimodal_interrupts ==
5235 XGE_HAL_DEFAULT_USE_HARDCODE)
5236 hldev->config.bimodal_interrupts = 0;
5237 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
5238 /* fixups for herc */
5239 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC;
5240 status = __hal_device_config_check_herc(device_config);
5241 if (status != XGE_HAL_OK) {
5242 xge_hal_device_terminate(hldev);
5243 return status;
5244 }
5245 if (hldev->config.bimodal_interrupts ==
5246 XGE_HAL_DEFAULT_USE_HARDCODE)
5247 hldev->config.bimodal_interrupts = 1;
5248 } else {
5249 xge_debug_device(XGE_ERR,
5250 "detected unknown device_id 0x%x", hldev->device_id);
5251 xge_hal_device_terminate(hldev);
5252 return XGE_HAL_ERR_BAD_DEVICE_ID;
5253 }
5254
5255
5256 /* allocate and initialize FIFO types of channels according to
5257 * configuration */
5258 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
5259 if (!device_config->fifo.queue[i].configured)
5260 continue;
5261
5262 channel = __hal_channel_allocate(hldev, i,
5263 XGE_HAL_CHANNEL_TYPE_FIFO);
5264 if (channel == NULL) {
5265 xge_debug_device(XGE_ERR,
5266 "fifo: __hal_channel_allocate failed");
5267 xge_hal_device_terminate(hldev);
5268 return XGE_HAL_ERR_OUT_OF_MEMORY;
5269 }
5270 /* add new channel to the device */
5271 xge_list_insert(&channel->item, &hldev->free_channels);
5272 }
5273
5274 /*
5275 * automatic DRAM adjustment
5276 */
5277 total_dram_size = 0;
5278 ring_auto_dram_cfg = 0;
5279 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
5280 if (!device_config->ring.queue[i].configured)
5281 continue;
5282 if (device_config->ring.queue[i].dram_size_mb ==
5283 XGE_HAL_DEFAULT_USE_HARDCODE) {
5284 ring_auto_dram_cfg++;
5285 continue;
5286 }
5287 total_dram_size += device_config->ring.queue[i].dram_size_mb;
5288 }
5289 left_dram_size = total_dram_size_max - total_dram_size;
5290 if (left_dram_size < 0 ||
5291 (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0)) {
5292 xge_debug_device(XGE_ERR,
5293 "ring config: exceeded DRAM size %d MB",
5294 total_dram_size_max);
5295 xge_hal_device_terminate(hldev);
5296 return XGE_HAL_BADCFG_RING_QUEUE_SIZE;
5297 }
5298
5299 /*
5300 * allocate and initialize RING types of channels according to
5301 * configuration
5302 */
5303 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
5304 if (!device_config->ring.queue[i].configured)
5305 continue;
5306
5307 if (device_config->ring.queue[i].dram_size_mb ==
5308 XGE_HAL_DEFAULT_USE_HARDCODE) {
5309 hldev->config.ring.queue[i].dram_size_mb =
5310 device_config->ring.queue[i].dram_size_mb =
5311 left_dram_size / ring_auto_dram_cfg;
5312 }
5313
5314 channel = __hal_channel_allocate(hldev, i,
5315 XGE_HAL_CHANNEL_TYPE_RING);
5316 if (channel == NULL) {
5317 xge_debug_device(XGE_ERR,
5318 "ring: __hal_channel_allocate failed");
5319 xge_hal_device_terminate(hldev);
5320 return XGE_HAL_ERR_OUT_OF_MEMORY;
5321 }
5322 /* add new channel to the device */
5323 xge_list_insert(&channel->item, &hldev->free_channels);
5324 }
5325
5326 /* get subsystem IDs */
5327 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
5328 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id),
5329 &subsys_device);
5330 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
5331 xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id),
5332 &subsys_vendor);
5333 xge_debug_device(XGE_TRACE,
5334 "subsystem_id %04x:%04x",
5335 subsys_vendor, subsys_device);
5336
5337 /* reset device initially */
5338 (void) __hal_device_reset(hldev);
5339
5340 /* set host endian before, to assure proper action */
5341 status = __hal_device_set_swapper(hldev);
5342 if (status != XGE_HAL_OK) {
5343 xge_debug_device(XGE_ERR,
5344 "__hal_device_set_swapper failed");
5345 xge_hal_device_terminate(hldev);
5346 (void) __hal_device_reset(hldev);
5347 return status;
5348 }
5349
5350 #ifndef XGE_HAL_HERC_EMULATION
5351 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
5352 __hal_device_xena_fix_mac(hldev);
5353 #endif
5354
5355 /* MAC address initialization.
5356 * For now only one mac address will be read and used. */
5357 status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]);
5358 if (status != XGE_HAL_OK) {
5359 xge_debug_device(XGE_ERR,
5360 "xge_hal_device_macaddr_get failed");
5361 xge_hal_device_terminate(hldev);
5362 return status;
5363 }
5364
5365 if (hldev->macaddr[0][0] == 0xFF &&
5366 hldev->macaddr[0][1] == 0xFF &&
5367 hldev->macaddr[0][2] == 0xFF &&
5368 hldev->macaddr[0][3] == 0xFF &&
5369 hldev->macaddr[0][4] == 0xFF &&
5370 hldev->macaddr[0][5] == 0xFF) {
5371 xge_debug_device(XGE_ERR,
5372 "xge_hal_device_macaddr_get returns all FFs");
5373 xge_hal_device_terminate(hldev);
5374 return XGE_HAL_ERR_INVALID_MAC_ADDRESS;
5375 }
5376
5377 xge_debug_device(XGE_TRACE,
5378 "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
5379 hldev->macaddr[0][0], hldev->macaddr[0][1],
5380 hldev->macaddr[0][2], hldev->macaddr[0][3],
5381 hldev->macaddr[0][4], hldev->macaddr[0][5]);
5382
5383 status = __hal_stats_initialize(&hldev->stats, hldev);
5384 if (status != XGE_HAL_OK) {
5385 xge_debug_device(XGE_ERR,
5386 "__hal_stats_initialize failed");
5387 xge_hal_device_terminate(hldev);
5388 return status;
5389 }
5390
5391 status = __hal_device_hw_initialize(hldev);
5392 if (status != XGE_HAL_OK) {
5393 xge_debug_device(XGE_ERR,
5394 "__hal_device_hw_initialize failed");
5395 xge_hal_device_terminate(hldev);
5396 return status;
5397 }
5398 hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE);
5399 if (hldev->dump_buf == NULL) {
5400 xge_debug_device(XGE_ERR,
5401 "__hal_device_hw_initialize failed");
5402 xge_hal_device_terminate(hldev);
5403 return XGE_HAL_ERR_OUT_OF_MEMORY;
5404 }
5405
5406
5407 /* Xena-only: need to serialize fifo posts across all device fifos */
5408 #if defined(XGE_HAL_TX_MULTI_POST)
5409 xge_os_spin_lock_init(&hldev->xena_post_lock, hldev->pdev);
5410 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
5411 xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh);
5412 #endif
5413 /* Getting VPD data */
5414 __hal_device_get_vpd_data(hldev);
5415
5416 hldev->is_initialized = 1;
5417
5418 return XGE_HAL_OK;
5419 }
5420
5421 /**
5422 * xge_hal_device_terminating - Mark the device as 'terminating'.
5423 * @devh: HAL device handle.
5424 *
5425 * Mark the device as 'terminating', going to terminate. Can be used
5426 * to serialize termination with other running processes/contexts.
5427 *
5428 * See also: xge_hal_device_terminate().
5429 */
5430 void
xge_hal_device_terminating(xge_hal_device_h devh)5431 xge_hal_device_terminating(xge_hal_device_h devh)
5432 {
5433 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
5434 xge_list_t *item;
5435 xge_hal_channel_t *channel;
5436 #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5437 unsigned long flags = 0;
5438 #endif
5439
5440 /*
5441 * go through each opened tx channel and aquire
5442 * lock, so it will serialize with HAL termination flag
5443 */
5444 xge_list_for_each(item, &hldev->fifo_channels) {
5445 channel = xge_container_of(item, xge_hal_channel_t, item);
5446 #if defined(XGE_HAL_TX_MULTI_RESERVE)
5447 xge_os_spin_lock(&channel->reserve_lock);
5448 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5449 xge_os_spin_lock_irq(&channel->reserve_lock, flags);
5450 #endif
5451
5452 channel->terminating = 1;
5453
5454 #if defined(XGE_HAL_TX_MULTI_RESERVE)
5455 xge_os_spin_unlock(&channel->reserve_lock);
5456 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5457 xge_os_spin_unlock_irq(&channel->reserve_lock, flags);
5458 #endif
5459 }
5460
5461 hldev->terminating = 1;
5462 }
5463
5464 /**
5465 * xge_hal_device_terminate - Terminate Xframe device.
5466 * @hldev: HAL device handle.
5467 *
5468 * Terminate HAL device.
5469 *
5470 * See also: xge_hal_device_initialize().
5471 */
5472 void
xge_hal_device_terminate(xge_hal_device_t * hldev)5473 xge_hal_device_terminate(xge_hal_device_t *hldev)
5474 {
5475 xge_assert(g_xge_hal_driver != NULL);
5476 xge_assert(hldev != NULL);
5477 xge_assert(hldev->magic == XGE_HAL_MAGIC);
5478
5479 xge_queue_flush(hldev->queueh);
5480
5481 hldev->terminating = 1;
5482 hldev->is_initialized = 0;
5483 hldev->in_poll = 0;
5484 hldev->magic = XGE_HAL_DEAD;
5485
5486 #if defined(XGE_HAL_TX_MULTI_POST)
5487 xge_os_spin_lock_destroy(&hldev->xena_post_lock, hldev->pdev);
5488 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
5489 xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev);
5490 #endif
5491
5492 xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating",
5493 (unsigned long long)(ulong_t)hldev);
5494
5495 xge_assert(xge_list_is_empty(&hldev->fifo_channels));
5496 xge_assert(xge_list_is_empty(&hldev->ring_channels));
5497
5498 if (hldev->stats.is_initialized) {
5499 __hal_stats_terminate(&hldev->stats);
5500 }
5501
5502 /* close if open and free all channels */
5503 while (!xge_list_is_empty(&hldev->free_channels)) {
5504 xge_hal_channel_t *channel = (xge_hal_channel_t*)
5505 hldev->free_channels.next;
5506
5507 xge_assert(!channel->is_open);
5508 xge_list_remove(&channel->item);
5509 __hal_channel_free(channel);
5510 }
5511
5512 if (hldev->queueh) {
5513 xge_queue_destroy(hldev->queueh);
5514 }
5515
5516 if (hldev->spdm_table) {
5517 xge_os_free(hldev->pdev,
5518 hldev->spdm_table[0],
5519 (sizeof(xge_hal_spdm_entry_t) *
5520 hldev->spdm_max_entries));
5521 xge_os_free(hldev->pdev,
5522 hldev->spdm_table,
5523 (sizeof(xge_hal_spdm_entry_t *) *
5524 hldev->spdm_max_entries));
5525 xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev);
5526 hldev->spdm_table = NULL;
5527 }
5528
5529 if (hldev->dump_buf) {
5530 xge_os_free(hldev->pdev, hldev->dump_buf,
5531 XGE_HAL_DUMP_BUF_SIZE);
5532 hldev->dump_buf = NULL;
5533 }
5534
5535 if (hldev->device_id != 0) {
5536 int j, pcisize;
5537
5538 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
5539 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
5540 for (j = 0; j < pcisize; j++) {
5541 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
5542 *((u32*)&hldev->pci_config_space_bios + j));
5543 }
5544 }
5545 }
5546 /**
5547 * __hal_device_get_vpd_data - Getting vpd_data.
5548 *
5549 * @hldev: HAL device handle.
5550 *
5551 * Getting product name and serial number from vpd capabilites structure
5552 *
5553 */
5554 void
__hal_device_get_vpd_data(xge_hal_device_t * hldev)5555 __hal_device_get_vpd_data(xge_hal_device_t *hldev)
5556 {
5557 u8 * vpd_data;
5558 u8 data;
5559 int index = 0, count, fail = 0;
5560 u8 vpd_addr = XGE_HAL_CARD_XENA_VPD_ADDR;
5561 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
5562 vpd_addr = XGE_HAL_CARD_HERC_VPD_ADDR;
5563
5564 xge_os_strlcpy((char *) hldev->vpd_data.product_name,
5565 "10 Gigabit Ethernet Adapter",
5566 sizeof(hldev->vpd_data.product_name));
5567
5568 xge_os_strlcpy((char *) hldev->vpd_data.serial_num,
5569 "not available",
5570 sizeof(hldev->vpd_data.serial_num));
5571
5572 vpd_data = ( u8*) xge_os_malloc(hldev->pdev, XGE_HAL_VPD_BUFFER_SIZE + 16);
5573 if ( vpd_data == 0 )
5574 return;
5575
5576 for (index = 0; index < XGE_HAL_VPD_BUFFER_SIZE; index +=4 ) {
5577 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 2), (u8)index);
5578 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 2), &data);
5579 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 3), 0);
5580 for (count = 0; count < 5; count++ ) {
5581 xge_os_mdelay(2);
5582 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 3), &data);
5583 if (data == XGE_HAL_VPD_READ_COMPLETE)
5584 break;
5585 }
5586
5587 if (count >= 5) {
5588 xge_os_printf("ERR, Reading VPD data failed");
5589 fail = 1;
5590 break;
5591 }
5592
5593 xge_os_pci_read32(hldev->pdev, hldev->cfgh,(vpd_addr + 4),
5594 (u32 *)&vpd_data[index]);
5595 }
5596
5597 if(!fail) {
5598
5599 /* read serial number of adapter */
5600 for (count = 0; count < XGE_HAL_VPD_BUFFER_SIZE; count++) {
5601 if ((vpd_data[count] == 'S') &&
5602 (vpd_data[count + 1] == 'N') &&
5603 (vpd_data[count + 2] < XGE_HAL_VPD_LENGTH)) {
5604 (void) memset(hldev->vpd_data.serial_num, 0, XGE_HAL_VPD_LENGTH);
5605 (void) memcpy(hldev->vpd_data.serial_num, &vpd_data[count + 3],
5606 vpd_data[count + 2]);
5607 break;
5608 }
5609 }
5610
5611 if (vpd_data[1] < XGE_HAL_VPD_LENGTH) {
5612 (void) memset(hldev->vpd_data.product_name, 0, vpd_data[1]);
5613 (void) memcpy(hldev->vpd_data.product_name, &vpd_data[3], vpd_data[1]);
5614 }
5615
5616 }
5617
5618 xge_os_free(hldev->pdev, vpd_data, XGE_HAL_VPD_BUFFER_SIZE + 16);
5619 }
5620
5621
5622 /**
5623 * xge_hal_device_handle_tcode - Handle transfer code.
5624 * @channelh: Channel handle.
5625 * @dtrh: Descriptor handle.
5626 * @t_code: One of the enumerated (and documented in the Xframe user guide)
5627 * "transfer codes".
5628 *
5629 * Handle descriptor's transfer code. The latter comes with each completed
5630 * descriptor, see xge_hal_fifo_dtr_next_completed() and
5631 * xge_hal_ring_dtr_next_completed().
5632 * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h.
5633 *
5634 * Returns: one of the xge_hal_status_e{} enumerated types.
5635 * XGE_HAL_OK - for success.
5636 * XGE_HAL_ERR_CRITICAL - when encounters critical error.
5637 */
5638 xge_hal_status_e
xge_hal_device_handle_tcode(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,u8 t_code)5639 xge_hal_device_handle_tcode (xge_hal_channel_h channelh,
5640 xge_hal_dtr_h dtrh, u8 t_code)
5641 {
5642 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
5643 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
5644
5645 if (t_code > 15) {
5646 xge_os_printf("invalid t_code %d", t_code);
5647 return XGE_HAL_OK;
5648 }
5649
5650 if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
5651 hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++;
5652
5653 #if defined(XGE_HAL_DEBUG_BAD_TCODE)
5654 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
5655 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
5656 XGE_OS_LLXFMT":"XGE_OS_LLXFMT,
5657 txdp->control_1, txdp->control_2, txdp->buffer_pointer,
5658 txdp->host_control);
5659 #endif
5660
5661 /* handle link "down" immediately without going through
5662 * xge_hal_device_poll() routine. */
5663 if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) {
5664 /* link is down */
5665 if (hldev->link_state != XGE_HAL_LINK_DOWN) {
5666 xge_hal_pci_bar0_t *bar0 =
5667 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5668 u64 val64;
5669
5670 hldev->link_state = XGE_HAL_LINK_DOWN;
5671
5672 val64 = xge_os_pio_mem_read64(hldev->pdev,
5673 hldev->regh0, &bar0->adapter_control);
5674
5675 /* turn off LED */
5676 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
5677 xge_os_pio_mem_write64(hldev->pdev,
5678 hldev->regh0, val64,
5679 &bar0->adapter_control);
5680
5681 g_xge_hal_driver->uld_callbacks.link_down(
5682 hldev->upper_layer_info);
5683 }
5684 } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER ||
5685 t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) {
5686 __hal_device_handle_targetabort(hldev);
5687 return XGE_HAL_ERR_CRITICAL;
5688 }
5689 return XGE_HAL_ERR_PKT_DROP;
5690 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
5691 hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++;
5692
5693 #if defined(XGE_HAL_DEBUG_BAD_TCODE)
5694 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
5695 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT
5696 ":"XGE_OS_LLXFMT, rxdp->control_1,
5697 rxdp->control_2, rxdp->buffer0_ptr,
5698 rxdp->host_control);
5699 #endif
5700 if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) {
5701 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
5702 __hal_device_handle_eccerr(hldev, "rxd_t_code",
5703 (u64)t_code);
5704 return XGE_HAL_ERR_CRITICAL;
5705 } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY ||
5706 t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) {
5707 hldev->stats.sw_dev_err_stats.parity_err_cnt++;
5708 __hal_device_handle_parityerr(hldev, "rxd_t_code",
5709 (u64)t_code);
5710 return XGE_HAL_ERR_CRITICAL;
5711 /* do not drop if detected unknown IPv6 extension */
5712 } else if (t_code != XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO) {
5713 return XGE_HAL_ERR_PKT_DROP;
5714 }
5715 }
5716 return XGE_HAL_OK;
5717 }
5718
5719 /**
5720 * xge_hal_device_link_state - Get link state.
5721 * @devh: HAL device handle.
5722 * @ls: Link state, see xge_hal_device_link_state_e{}.
5723 *
5724 * Get link state.
5725 * Returns: XGE_HAL_OK.
5726 * See also: xge_hal_device_link_state_e{}.
5727 */
xge_hal_device_link_state(xge_hal_device_h devh,xge_hal_device_link_state_e * ls)5728 xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh,
5729 xge_hal_device_link_state_e *ls)
5730 {
5731 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5732
5733 xge_assert(ls != NULL);
5734 *ls = hldev->link_state;
5735 return XGE_HAL_OK;
5736 }
5737
5738 /**
5739 * xge_hal_device_sched_timer - Configure scheduled device interrupt.
5740 * @devh: HAL device handle.
5741 * @interval_us: Time interval, in miscoseconds.
5742 * Unlike transmit and receive interrupts,
5743 * the scheduled interrupt is generated independently of
5744 * traffic, but purely based on time.
5745 * @one_shot: 1 - generate scheduled interrupt only once.
5746 * 0 - generate scheduled interrupt periodically at the specified
5747 * @interval_us interval.
5748 *
5749 * (Re-)configure scheduled interrupt. Can be called at runtime to change
5750 * the setting, generate one-shot interrupts based on the resource and/or
5751 * traffic conditions, other purposes.
5752 * See also: xge_hal_device_config_t{}.
5753 */
xge_hal_device_sched_timer(xge_hal_device_h devh,int interval_us,int one_shot)5754 void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us,
5755 int one_shot)
5756 {
5757 u64 val64;
5758 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5759 xge_hal_pci_bar0_t *bar0 =
5760 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5761 unsigned int interval = hldev->config.pci_freq_mherz * interval_us;
5762
5763 interval = __hal_fix_time_ival_herc(hldev, interval);
5764
5765 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
5766 &bar0->scheduled_int_ctrl);
5767 if (interval) {
5768 val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK;
5769 val64 |= XGE_HAL_SCHED_INT_PERIOD(interval);
5770 if (one_shot) {
5771 val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT;
5772 }
5773 val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
5774 } else {
5775 val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
5776 }
5777
5778 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
5779 val64, &bar0->scheduled_int_ctrl);
5780
5781 xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s",
5782 (unsigned long long)val64,
5783 interval ? "enabled" : "disabled");
5784 }
5785
5786 /**
5787 * xge_hal_device_check_id - Verify device ID.
5788 * @devh: HAL device handle.
5789 *
5790 * Verify device ID.
5791 * Returns: one of the xge_hal_card_e{} enumerated types.
5792 * See also: xge_hal_card_e{}.
5793 */
5794 xge_hal_card_e
xge_hal_device_check_id(xge_hal_device_h devh)5795 xge_hal_device_check_id(xge_hal_device_h devh)
5796 {
5797 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5798 switch (hldev->device_id) {
5799 case XGE_PCI_DEVICE_ID_XENA_1:
5800 case XGE_PCI_DEVICE_ID_XENA_2:
5801 return XGE_HAL_CARD_XENA;
5802 case XGE_PCI_DEVICE_ID_HERC_1:
5803 case XGE_PCI_DEVICE_ID_HERC_2:
5804 return XGE_HAL_CARD_HERC;
5805 case XGE_PCI_DEVICE_ID_TITAN_1:
5806 case XGE_PCI_DEVICE_ID_TITAN_2:
5807 return XGE_HAL_CARD_TITAN;
5808 default:
5809 return XGE_HAL_CARD_UNKNOWN;
5810 }
5811 }
5812
5813 /**
5814 * xge_hal_device_pci_info_get - Get PCI bus informations such as width,
5815 * frequency, and mode from previously stored values.
5816 * @devh: HAL device handle.
5817 * @pci_mode: pointer to a variable of enumerated type
5818 * xge_hal_pci_mode_e{}.
5819 * @bus_frequency: pointer to a variable of enumerated type
5820 * xge_hal_pci_bus_frequency_e{}.
5821 * @bus_width: pointer to a variable of enumerated type
5822 * xge_hal_pci_bus_width_e{}.
5823 *
5824 * Get pci mode, frequency, and PCI bus width.
5825 * Returns: one of the xge_hal_status_e{} enumerated types.
5826 * XGE_HAL_OK - for success.
5827 * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle.
5828 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
5829 */
5830 xge_hal_status_e
xge_hal_device_pci_info_get(xge_hal_device_h devh,xge_hal_pci_mode_e * pci_mode,xge_hal_pci_bus_frequency_e * bus_frequency,xge_hal_pci_bus_width_e * bus_width)5831 xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
5832 xge_hal_pci_bus_frequency_e *bus_frequency,
5833 xge_hal_pci_bus_width_e *bus_width)
5834 {
5835 xge_hal_status_e rc_status;
5836 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5837
5838 if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) {
5839 rc_status = XGE_HAL_ERR_INVALID_DEVICE;
5840 xge_debug_device(XGE_ERR,
5841 "xge_hal_device_pci_info_get error, rc %d for device %p",
5842 rc_status, hldev);
5843
5844 return rc_status;
5845 }
5846
5847 *pci_mode = hldev->pci_mode;
5848 *bus_frequency = hldev->bus_frequency;
5849 *bus_width = hldev->bus_width;
5850 rc_status = XGE_HAL_OK;
5851 return rc_status;
5852 }
5853
5854 /**
5855 * xge_hal_reinitialize_hw
5856 * @hldev: private member of the device structure.
5857 *
5858 * This function will soft reset the NIC and re-initalize all the
5859 * I/O registers to the values they had after it's inital initialization
5860 * through the probe function.
5861 */
xge_hal_reinitialize_hw(xge_hal_device_t * hldev)5862 int xge_hal_reinitialize_hw(xge_hal_device_t * hldev)
5863 {
5864 (void) xge_hal_device_reset(hldev);
5865 if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) {
5866 xge_hal_device_terminate(hldev);
5867 (void) __hal_device_reset(hldev);
5868 return 1;
5869 }
5870 return 0;
5871 }
5872
5873
5874 /*
5875 * __hal_read_spdm_entry_line
5876 * @hldev: pointer to xge_hal_device_t structure
5877 * @spdm_line: spdm line in the spdm entry to be read.
5878 * @spdm_entry: spdm entry of the spdm_line in the SPDM table.
5879 * @spdm_line_val: Contains the value stored in the spdm line.
5880 *
5881 * SPDM table contains upto a maximum of 256 spdm entries.
5882 * Each spdm entry contains 8 lines and each line stores 8 bytes.
5883 * This function reads the spdm line(addressed by @spdm_line)
5884 * of the spdm entry(addressed by @spdm_entry) in
5885 * the SPDM table.
5886 */
5887 xge_hal_status_e
__hal_read_spdm_entry_line(xge_hal_device_t * hldev,u8 spdm_line,u16 spdm_entry,u64 * spdm_line_val)5888 __hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line,
5889 u16 spdm_entry, u64 *spdm_line_val)
5890 {
5891 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5892 u64 val64;
5893
5894 val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE |
5895 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) |
5896 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry);
5897
5898 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
5899 &bar0->rts_rth_spdm_mem_ctrl);
5900
5901 /* poll until done */
5902 if (__hal_device_register_poll(hldev,
5903 &bar0->rts_rth_spdm_mem_ctrl, 0,
5904 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE,
5905 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
5906
5907 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
5908 }
5909
5910 *spdm_line_val = xge_os_pio_mem_read64(hldev->pdev,
5911 hldev->regh0, &bar0->rts_rth_spdm_mem_data);
5912 return XGE_HAL_OK;
5913 }
5914
5915
5916 /*
5917 * __hal_get_free_spdm_entry
5918 * @hldev: pointer to xge_hal_device_t structure
5919 * @spdm_entry: Contains an index to the unused spdm entry in the SPDM table.
5920 *
5921 * This function returns an index of unused spdm entry in the SPDM
5922 * table.
5923 */
5924 static xge_hal_status_e
__hal_get_free_spdm_entry(xge_hal_device_t * hldev,u16 * spdm_entry)5925 __hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry)
5926 {
5927 xge_hal_status_e status;
5928 u64 spdm_line_val=0;
5929
5930 /*
5931 * Search in the local SPDM table for a free slot.
5932 */
5933 *spdm_entry = 0;
5934 for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) {
5935 if (hldev->spdm_table[*spdm_entry]->in_use) {
5936 break;
5937 }
5938 }
5939
5940 if (*spdm_entry >= hldev->spdm_max_entries) {
5941 return XGE_HAL_ERR_SPDM_TABLE_FULL;
5942 }
5943
5944 /*
5945 * Make sure that the corresponding spdm entry in the SPDM
5946 * table is free.
5947 * Seventh line of the spdm entry contains information about
5948 * whether the entry is free or not.
5949 */
5950 if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry,
5951 &spdm_line_val)) != XGE_HAL_OK) {
5952 return status;
5953 }
5954
5955 /* BIT(63) in spdm_line 7 corresponds to entry_enable bit */
5956 if ((spdm_line_val & BIT(63))) {
5957 /*
5958 * Log a warning
5959 */
5960 xge_debug_device(XGE_ERR, "Local SPDM table is not "
5961 "consistent with the actual one for the spdm "
5962 "entry %d", *spdm_entry);
5963 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
5964 }
5965
5966 return XGE_HAL_OK;
5967 }
5968
5969
5970 /*
5971 * __hal_calc_jhash - Calculate Jenkins hash.
5972 * @msg: Jenkins hash algorithm key.
5973 * @length: Length of the key.
5974 * @golden_ratio: Jenkins hash golden ratio.
5975 * @init_value: Jenkins hash initial value.
5976 *
5977 * This function implements the Jenkins based algorithm used for the
5978 * calculation of the RTH hash.
5979 * Returns: Jenkins hash value.
5980 *
5981 */
5982 static u32
__hal_calc_jhash(u8 * msg,u32 length,u32 golden_ratio,u32 init_value)5983 __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value)
5984 {
5985
5986 register u32 a,b,c,len;
5987
5988 /*
5989 * Set up the internal state
5990 */
5991 len = length;
5992 a = b = golden_ratio; /* the golden ratio; an arbitrary value */
5993 c = init_value; /* the previous hash value */
5994
5995 /* handle most of the key */
5996 while (len >= 12)
5997 {
5998 a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16)
5999 + ((u32)msg[3]<<24));
6000 b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16)
6001 + ((u32)msg[7]<<24));
6002 c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16)
6003 + ((u32)msg[11]<<24));
6004 mix(a,b,c);
6005 msg += 12; len -= 12;
6006 }
6007
6008 /* handle the last 11 bytes */
6009 c += length;
6010 switch(len) /* all the case statements fall through */
6011 {
6012 case 11: c+= ((u32)msg[10]<<24);
6013 break;
6014 case 10: c+= ((u32)msg[9]<<16);
6015 break;
6016 case 9 : c+= ((u32)msg[8]<<8);
6017 break;
6018 /* the first byte of c is reserved for the length */
6019 case 8 : b+= ((u32)msg[7]<<24);
6020 break;
6021 case 7 : b+= ((u32)msg[6]<<16);
6022 break;
6023 case 6 : b+= ((u32)msg[5]<<8);
6024 break;
6025 case 5 : b+= msg[4];
6026 break;
6027 case 4 : a+= ((u32)msg[3]<<24);
6028 break;
6029 case 3 : a+= ((u32)msg[2]<<16);
6030 break;
6031 case 2 : a+= ((u32)msg[1]<<8);
6032 break;
6033 case 1 : a+= msg[0];
6034 break;
6035 /* case 0: nothing left to add */
6036 }
6037
6038 mix(a,b,c);
6039
6040 /* report the result */
6041 return c;
6042 }
6043
6044
6045 /**
6046 * xge_hal_spdm_entry_add - Add a new entry to the SPDM table.
6047 * @devh: HAL device handle.
6048 * @src_ip: Source ip address(IPv4/IPv6).
6049 * @dst_ip: Destination ip address(IPv4/IPv6).
6050 * @l4_sp: L4 source port.
6051 * @l4_dp: L4 destination port.
6052 * @is_tcp: Set to 1, if the protocol is TCP.
6053 * 0, if the protocol is UDP.
6054 * @is_ipv4: Set to 1, if the protocol is IPv4.
6055 * 0, if the protocol is IPv6.
6056 * @tgt_queue: Target queue to route the receive packet.
6057 *
6058 * This function add a new entry to the SPDM table.
6059 *
6060 * Returns: XGE_HAL_OK - success.
6061 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled.
6062 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in
6063 * the time(timeout).
6064 * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full.
6065 * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry.
6066 *
6067 * See also: xge_hal_spdm_entry_remove{}.
6068 */
6069 xge_hal_status_e
xge_hal_spdm_entry_add(xge_hal_device_h devh,xge_hal_ipaddr_t * src_ip,xge_hal_ipaddr_t * dst_ip,u16 l4_sp,u16 l4_dp,u8 is_tcp,u8 is_ipv4,u8 tgt_queue)6070 xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
6071 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
6072 u8 is_tcp, u8 is_ipv4, u8 tgt_queue)
6073 {
6074
6075 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6076 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6077 u32 jhash_value;
6078 u32 jhash_init_val;
6079 u32 jhash_golden_ratio;
6080 u64 val64;
6081 int off;
6082 u16 spdm_entry;
6083 u8 msg[XGE_HAL_JHASH_MSG_LEN];
6084 int ipaddr_len;
6085 xge_hal_status_e status;
6086
6087
6088 if (!hldev->config.rth_spdm_en) {
6089 return XGE_HAL_ERR_SPDM_NOT_ENABLED;
6090 }
6091
6092 if ((tgt_queue < XGE_HAL_MIN_RING_NUM) ||
6093 (tgt_queue > XGE_HAL_MAX_RING_NUM)) {
6094 return XGE_HAL_ERR_SPDM_INVALID_ENTRY;
6095 }
6096
6097
6098 /*
6099 * Calculate the jenkins hash.
6100 */
6101 /*
6102 * Create the Jenkins hash algorithm key.
6103 * key = {L3SA, L3DA, L4SP, L4DP}, if SPDM is configured to
6104 * use L4 information. Otherwize key = {L3SA, L3DA}.
6105 */
6106
6107 if (is_ipv4) {
6108 ipaddr_len = 4; // In bytes
6109 } else {
6110 ipaddr_len = 16;
6111 }
6112
6113 /*
6114 * Jenkins hash algorithm expects the key in the big endian
6115 * format. Since key is the byte array, memcpy won't work in the
6116 * case of little endian. So, the current code extracts each
6117 * byte starting from MSB and store it in the key.
6118 */
6119 if (is_ipv4) {
6120 for (off = 0; off < ipaddr_len; off++) {
6121 u32 mask = vBIT32(0xff,(off*8),8);
6122 int shift = 32-(off+1)*8;
6123 msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift);
6124 msg[off+ipaddr_len] =
6125 (u8)((dst_ip->ipv4.addr & mask) >> shift);
6126 }
6127 } else {
6128 for (off = 0; off < ipaddr_len; off++) {
6129 int loc = off % 8;
6130 u64 mask = vBIT(0xff,(loc*8),8);
6131 int shift = 64-(loc+1)*8;
6132
6133 msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask)
6134 >> shift);
6135 msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8]
6136 & mask) >> shift);
6137 }
6138 }
6139
6140 off = (2*ipaddr_len);
6141
6142 if (hldev->config.rth_spdm_use_l4) {
6143 msg[off] = (u8)((l4_sp & 0xff00) >> 8);
6144 msg[off + 1] = (u8)(l4_sp & 0xff);
6145 msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8);
6146 msg[off + 3] = (u8)(l4_dp & 0xff);
6147 off += 4;
6148 }
6149
6150 /*
6151 * Calculate jenkins hash for this configuration
6152 */
6153 val64 = xge_os_pio_mem_read64(hldev->pdev,
6154 hldev->regh0,
6155 &bar0->rts_rth_jhash_cfg);
6156 jhash_golden_ratio = (u32)(val64 >> 32);
6157 jhash_init_val = (u32)(val64 & 0xffffffff);
6158
6159 jhash_value = __hal_calc_jhash(msg, off,
6160 jhash_golden_ratio,
6161 jhash_init_val);
6162
6163 xge_os_spin_lock(&hldev->spdm_lock);
6164
6165 /*
6166 * Locate a free slot in the SPDM table. To avoid a seach in the
6167 * actual SPDM table, which is very expensive in terms of time,
6168 * we are maintaining a local copy of the table and the search for
6169 * the free entry is performed in the local table.
6170 */
6171 if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry))
6172 != XGE_HAL_OK) {
6173 xge_os_spin_unlock(&hldev->spdm_lock);
6174 return status;
6175 }
6176
6177 /*
6178 * Add this entry to the SPDM table
6179 */
6180 status = __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp,
6181 is_tcp, is_ipv4, tgt_queue,
6182 jhash_value, /* calculated jhash */
6183 spdm_entry);
6184
6185 xge_os_spin_unlock(&hldev->spdm_lock);
6186
6187 return status;
6188 }
6189
6190 /**
6191 * xge_hal_spdm_entry_remove - Remove an entry from the SPDM table.
6192 * @devh: HAL device handle.
6193 * @src_ip: Source ip address(IPv4/IPv6).
6194 * @dst_ip: Destination ip address(IPv4/IPv6).
6195 * @l4_sp: L4 source port.
6196 * @l4_dp: L4 destination port.
6197 * @is_tcp: Set to 1, if the protocol is TCP.
6198 * 0, if the protocol os UDP.
6199 * @is_ipv4: Set to 1, if the protocol is IPv4.
6200 * 0, if the protocol is IPv6.
6201 *
6202 * This function remove an entry from the SPDM table.
6203 *
6204 * Returns: XGE_HAL_OK - success.
6205 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled.
6206 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in
6207 * the time(timeout).
6208 * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM
6209 * table.
6210 *
6211 * See also: xge_hal_spdm_entry_add{}.
6212 */
6213 xge_hal_status_e
xge_hal_spdm_entry_remove(xge_hal_device_h devh,xge_hal_ipaddr_t * src_ip,xge_hal_ipaddr_t * dst_ip,u16 l4_sp,u16 l4_dp,u8 is_tcp,u8 is_ipv4)6214 xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
6215 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
6216 u8 is_tcp, u8 is_ipv4)
6217 {
6218
6219 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6220 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6221 u64 val64;
6222 u16 spdm_entry;
6223 xge_hal_status_e status;
6224 u64 spdm_line_arr[8];
6225 u8 line_no;
6226 u8 spdm_is_tcp;
6227 u8 spdm_is_ipv4;
6228 u16 spdm_l4_sp;
6229 u16 spdm_l4_dp;
6230
6231 if (!hldev->config.rth_spdm_en) {
6232 return XGE_HAL_ERR_SPDM_NOT_ENABLED;
6233 }
6234
6235 xge_os_spin_lock(&hldev->spdm_lock);
6236
6237 /*
6238 * Poll the rxpic_int_reg register until spdm ready bit is set or
6239 * timeout happens.
6240 */
6241 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
6242 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
6243 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
6244
6245 /* upper layer may require to repeat */
6246 xge_os_spin_unlock(&hldev->spdm_lock);
6247 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
6248 }
6249
6250 /*
6251 * Clear the SPDM READY bit.
6252 */
6253 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6254 &bar0->rxpic_int_reg);
6255 val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
6256 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6257 &bar0->rxpic_int_reg);
6258
6259 /*
6260 * Search in the local SPDM table to get the index of the
6261 * corresponding entry in the SPDM table.
6262 */
6263 spdm_entry = 0;
6264 for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) {
6265 if ((!hldev->spdm_table[spdm_entry]->in_use) ||
6266 (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) ||
6267 (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) ||
6268 (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) ||
6269 (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) {
6270 continue;
6271 }
6272
6273 /*
6274 * Compare the src/dst IP addresses of source and target
6275 */
6276 if (is_ipv4) {
6277 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr
6278 != src_ip->ipv4.addr) ||
6279 (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr
6280 != dst_ip->ipv4.addr)) {
6281 continue;
6282 }
6283 } else {
6284 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0]
6285 != src_ip->ipv6.addr[0]) ||
6286 (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1]
6287 != src_ip->ipv6.addr[1]) ||
6288 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0]
6289 != dst_ip->ipv6.addr[0]) ||
6290 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1]
6291 != dst_ip->ipv6.addr[1])) {
6292 continue;
6293 }
6294 }
6295 break;
6296 }
6297
6298 if (spdm_entry >= hldev->spdm_max_entries) {
6299 xge_os_spin_unlock(&hldev->spdm_lock);
6300 return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND;
6301 }
6302
6303 /*
6304 * Retrieve the corresponding entry from the SPDM table and
6305 * make sure that the data is consistent.
6306 */
6307 for(line_no = 0; line_no < 8; line_no++) {
6308
6309 /*
6310 * SPDM line 2,3,4 are valid only for IPv6 entry.
6311 * SPDM line 5 & 6 are reserved. We don't have to
6312 * read these entries in the above cases.
6313 */
6314 if (((is_ipv4) &&
6315 ((line_no == 2)||(line_no == 3)||(line_no == 4))) ||
6316 (line_no == 5) ||
6317 (line_no == 6)) {
6318 continue;
6319 }
6320
6321 if ((status = __hal_read_spdm_entry_line(
6322 hldev,
6323 line_no,
6324 spdm_entry,
6325 &spdm_line_arr[line_no]))
6326 != XGE_HAL_OK) {
6327 xge_os_spin_unlock(&hldev->spdm_lock);
6328 return status;
6329 }
6330 }
6331
6332 /*
6333 * Seventh line of the spdm entry contains the entry_enable
6334 * bit. Make sure that the entry_enable bit of this spdm entry
6335 * is set.
6336 * To remove an entry from the SPDM table, reset this
6337 * bit.
6338 */
6339 if (!(spdm_line_arr[7] & BIT(63))) {
6340 /*
6341 * Log a warning
6342 */
6343 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6344 "consistent with the actual one for the spdm "
6345 "entry %d ", spdm_entry);
6346 goto err_exit;
6347 }
6348
6349 /*
6350 * Retreive the L4 SP/DP, src/dst ip addresses from the SPDM
6351 * table and do a comparision.
6352 */
6353 spdm_is_tcp = (u8)((spdm_line_arr[0] & BIT(59)) >> 4);
6354 spdm_is_ipv4 = (u8)(spdm_line_arr[0] & BIT(63));
6355 spdm_l4_sp = (u16)(spdm_line_arr[0] >> 48);
6356 spdm_l4_dp = (u16)((spdm_line_arr[0] >> 32) & 0xffff);
6357
6358
6359 if ((spdm_is_tcp != is_tcp) ||
6360 (spdm_is_ipv4 != is_ipv4) ||
6361 (spdm_l4_sp != l4_sp) ||
6362 (spdm_l4_dp != l4_dp)) {
6363 /*
6364 * Log a warning
6365 */
6366 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6367 "consistent with the actual one for the spdm "
6368 "entry %d ", spdm_entry);
6369 goto err_exit;
6370 }
6371
6372 if (is_ipv4) {
6373 /* Upper 32 bits of spdm_line(64 bit) contains the
6374 * src IPv4 address. Lower 32 bits of spdm_line
6375 * contains the destination IPv4 address.
6376 */
6377 u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32);
6378 u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff);
6379
6380 if ((temp_src_ip != src_ip->ipv4.addr) ||
6381 (temp_dst_ip != dst_ip->ipv4.addr)) {
6382 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6383 "consistent with the actual one for the spdm "
6384 "entry %d ", spdm_entry);
6385 goto err_exit;
6386 }
6387
6388 } else {
6389 /*
6390 * SPDM line 1 & 2 contains the src IPv6 address.
6391 * SPDM line 3 & 4 contains the dst IPv6 address.
6392 */
6393 if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) ||
6394 (spdm_line_arr[2] != src_ip->ipv6.addr[1]) ||
6395 (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) ||
6396 (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) {
6397
6398 /*
6399 * Log a warning
6400 */
6401 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6402 "consistent with the actual one for the spdm "
6403 "entry %d ", spdm_entry);
6404 goto err_exit;
6405 }
6406 }
6407
6408 /*
6409 * Reset the entry_enable bit to zero
6410 */
6411 spdm_line_arr[7] &= ~BIT(63);
6412
6413 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6414 spdm_line_arr[7],
6415 (void *)((char *)hldev->spdm_mem_base +
6416 (spdm_entry * 64) + (7 * 8)));
6417
6418 /*
6419 * Wait for the operation to be completed.
6420 */
6421 if (__hal_device_register_poll(hldev,
6422 &bar0->rxpic_int_reg, 1,
6423 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
6424 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
6425 xge_os_spin_unlock(&hldev->spdm_lock);
6426 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
6427 }
6428
6429 /*
6430 * Make the corresponding spdm entry in the local SPDM table
6431 * available for future use.
6432 */
6433 hldev->spdm_table[spdm_entry]->in_use = 0;
6434 xge_os_spin_unlock(&hldev->spdm_lock);
6435
6436 return XGE_HAL_OK;
6437
6438 err_exit:
6439 xge_os_spin_unlock(&hldev->spdm_lock);
6440 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
6441 }
6442
6443 /*
6444 * __hal_device_rti_set
6445 * @ring: The post_qid of the ring.
6446 * @channel: HAL channel of the ring.
6447 *
6448 * This function stores the RTI value associated for the MSI and
6449 * also unmasks this particular RTI in the rti_mask register.
6450 */
__hal_device_rti_set(int ring_qid,xge_hal_channel_t * channel)6451 static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel)
6452 {
6453 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6454 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6455 u64 val64;
6456
6457 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI ||
6458 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX)
6459 channel->rti = (u8)ring_qid;
6460
6461 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6462 &bar0->rx_traffic_mask);
6463 val64 &= ~BIT(ring_qid);
6464 xge_os_pio_mem_write64(hldev->pdev,
6465 hldev->regh0, val64,
6466 &bar0->rx_traffic_mask);
6467 }
6468
6469 /*
6470 * __hal_device_tti_set
6471 * @ring: The post_qid of the FIFO.
6472 * @channel: HAL channel the FIFO.
6473 *
6474 * This function stores the TTI value associated for the MSI and
6475 * also unmasks this particular TTI in the tti_mask register.
6476 */
__hal_device_tti_set(int fifo_qid,xge_hal_channel_t * channel)6477 static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel)
6478 {
6479 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6480 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6481 u64 val64;
6482
6483 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI ||
6484 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX)
6485 channel->tti = (u8)fifo_qid;
6486
6487 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6488 &bar0->tx_traffic_mask);
6489 val64 &= ~BIT(fifo_qid);
6490 xge_os_pio_mem_write64(hldev->pdev,
6491 hldev->regh0, val64,
6492 &bar0->tx_traffic_mask);
6493 }
6494
6495 /**
6496 * xge_hal_channel_msi_set - Associate a RTI with a ring or TTI with a
6497 * FIFO for a given MSI.
6498 * @channelh: HAL channel handle.
6499 * @msi: MSI Number associated with the channel.
6500 * @msi_msg: The MSI message associated with the MSI number above.
6501 *
6502 * This API will associate a given channel (either Ring or FIFO) with the
6503 * given MSI number. It will alo program the Tx_Mat/Rx_Mat tables in the
6504 * hardware to indicate this association to the hardware.
6505 */
6506 xge_hal_status_e
xge_hal_channel_msi_set(xge_hal_channel_h channelh,int msi,u32 msi_msg)6507 xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg)
6508 {
6509 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
6510 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6511 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6512 u64 val64;
6513
6514 channel->msi_msg = msi_msg;
6515 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
6516 int ring = channel->post_qid;
6517 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d,"
6518 " MSI: %d", channel->msi_msg, ring, msi);
6519 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6520 &bar0->rx_mat);
6521 val64 |= XGE_HAL_SET_RX_MAT(ring, msi);
6522 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6523 &bar0->rx_mat);
6524 __hal_device_rti_set(ring, channel);
6525 } else {
6526 int fifo = channel->post_qid;
6527 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d,"
6528 " MSI: %d", channel->msi_msg, fifo, msi);
6529 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6530 &bar0->tx_mat[0]);
6531 val64 |= XGE_HAL_SET_TX_MAT(fifo, msi);
6532 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6533 &bar0->tx_mat[0]);
6534 __hal_device_tti_set(fifo, channel);
6535 }
6536
6537 return XGE_HAL_OK;
6538 }
6539
6540 /**
6541 * xge_hal_mask_msix - Begin IRQ processing.
6542 * @hldev: HAL device handle.
6543 * @msi_id: MSI ID
6544 *
6545 * The function masks the msix interrupt for the given msi_id
6546 *
6547 * Note:
6548 *
6549 * Returns: 0,
6550 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range
6551 * status.
6552 * See also:
6553 */
6554 xge_hal_status_e
xge_hal_mask_msix(xge_hal_device_h devh,int msi_id)6555 xge_hal_mask_msix(xge_hal_device_h devh, int msi_id)
6556 {
6557 xge_hal_status_e status = XGE_HAL_OK;
6558 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6559 u32 *bar2 = (u32 *)hldev->bar2;
6560 u32 val32;
6561
6562 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES);
6563
6564 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]);
6565 val32 |= 1;
6566 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]);
6567 return status;
6568 }
6569
6570 /**
6571 * xge_hal_mask_msix - Begin IRQ processing.
6572 * @hldev: HAL device handle.
6573 * @msi_id: MSI ID
6574 *
6575 * The function masks the msix interrupt for the given msi_id
6576 *
6577 * Note:
6578 *
6579 * Returns: 0,
6580 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range
6581 * status.
6582 * See also:
6583 */
6584 xge_hal_status_e
xge_hal_unmask_msix(xge_hal_device_h devh,int msi_id)6585 xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id)
6586 {
6587 xge_hal_status_e status = XGE_HAL_OK;
6588 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6589 u32 *bar2 = (u32 *)hldev->bar2;
6590 u32 val32;
6591
6592 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES);
6593
6594 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]);
6595 val32 &= ~1;
6596 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]);
6597 return status;
6598 }
6599
6600 /*
6601 * __hal_set_msix_vals
6602 * @devh: HAL device handle.
6603 * @msix_value: 32bit MSI-X value transferred across PCI to @msix_address.
6604 * Filled in by this function.
6605 * @msix_address: 32bit MSI-X DMA address.
6606 * Filled in by this function.
6607 * @msix_idx: index that corresponds to the (@msix_value, @msix_address)
6608 * entry in the table of MSI-X (value, address) pairs.
6609 *
6610 * This function will program the hardware associating the given
6611 * address/value cobination to the specified msi number.
6612 */
__hal_set_msix_vals(xge_hal_device_h devh,u32 * msix_value,u64 * msix_addr,int msix_idx)6613 static void __hal_set_msix_vals (xge_hal_device_h devh,
6614 u32 *msix_value,
6615 u64 *msix_addr,
6616 int msix_idx)
6617 {
6618 int cnt = 0;
6619
6620 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
6621 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6622 u64 val64;
6623
6624 val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE;
6625 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
6626 (u32)(val64 >> 32), &bar0->xmsi_access);
6627 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
6628 (u32)(val64), &bar0->xmsi_access);
6629 do {
6630 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6631 &bar0->xmsi_access);
6632 if (val64 & XGE_HAL_XMSI_STROBE)
6633 break;
6634 cnt++;
6635 xge_os_mdelay(20);
6636 } while(cnt < 5);
6637 *msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6638 &bar0->xmsi_data));
6639 *msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6640 &bar0->xmsi_address);
6641 }
6642
6643 /**
6644 * xge_hal_channel_msix_set - Associate MSI-X with a channel.
6645 * @channelh: HAL channel handle.
6646 * @msix_idx: index that corresponds to a particular (@msix_value,
6647 * @msix_address) entry in the MSI-X table.
6648 *
6649 * This API associates a given channel (either Ring or FIFO) with the
6650 * given MSI-X number. It programs the Xframe's Tx_Mat/Rx_Mat tables
6651 * to indicate this association.
6652 */
6653 xge_hal_status_e
xge_hal_channel_msix_set(xge_hal_channel_h channelh,int msix_idx)6654 xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx)
6655 {
6656 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
6657 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6658 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6659 u64 val64;
6660
6661 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
6662 /* Currently Ring and RTI is one on one. */
6663 int ring = channel->post_qid;
6664 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6665 &bar0->rx_mat);
6666 val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx);
6667 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6668 &bar0->rx_mat);
6669 __hal_device_rti_set(ring, channel);
6670 hldev->config.ring.queue[channel->post_qid].intr_vector =
6671 msix_idx;
6672 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
6673 int fifo = channel->post_qid;
6674 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6675 &bar0->tx_mat[0]);
6676 val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx);
6677 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6678 &bar0->tx_mat[0]);
6679 __hal_device_tti_set(fifo, channel);
6680 hldev->config.fifo.queue[channel->post_qid].intr_vector =
6681 msix_idx;
6682 }
6683 channel->msix_idx = msix_idx;
6684 __hal_set_msix_vals(hldev, &channel->msix_data,
6685 &channel->msix_address,
6686 channel->msix_idx);
6687
6688 return XGE_HAL_OK;
6689 }
6690
6691 #if defined(XGE_HAL_CONFIG_LRO)
6692 /**
6693 * xge_hal_lro_terminate - Terminate lro resources.
6694 * @lro_scale: Amount of lro memory.
6695 * @hldev: Hal device structure.
6696 *
6697 */
6698 void
xge_hal_lro_terminate(u32 lro_scale,xge_hal_device_t * hldev)6699 xge_hal_lro_terminate(u32 lro_scale,
6700 xge_hal_device_t *hldev)
6701 {
6702 }
6703
6704 /**
6705 * xge_hal_lro_init - Initiate lro resources.
6706 * @lro_scale: Amount of lro memory.
6707 * @hldev: Hal device structure.
6708 * Note: For time being I am using only one LRO per device. Later on size
6709 * will be increased.
6710 */
6711
6712 xge_hal_status_e
xge_hal_lro_init(u32 lro_scale,xge_hal_device_t * hldev)6713 xge_hal_lro_init(u32 lro_scale,
6714 xge_hal_device_t *hldev)
6715 {
6716 int i;
6717
6718 if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE)
6719 hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE;
6720
6721 if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE)
6722 hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN;
6723
6724 for (i=0; i < XGE_HAL_MAX_RING_NUM; i++)
6725 {
6726 xge_os_memzero(hldev->lro_desc[i].lro_pool,
6727 sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS);
6728
6729 hldev->lro_desc[i].lro_next_idx = 0;
6730 hldev->lro_desc[i].lro_recent = NULL;
6731 }
6732
6733 return XGE_HAL_OK;
6734 }
6735 #endif
6736
6737
6738 /**
6739 * xge_hal_device_poll - HAL device "polling" entry point.
6740 * @devh: HAL device.
6741 *
6742 * HAL "polling" entry point. Note that this is part of HAL public API.
6743 * Upper-Layer driver _must_ periodically poll HAL via
6744 * xge_hal_device_poll().
6745 *
6746 * HAL uses caller's execution context to serially process accumulated
6747 * slow-path events, such as link state changes and hardware error
6748 * indications.
6749 *
6750 * The rate of polling could be somewhere between 500us to 10ms,
6751 * depending on requirements (e.g., the requirement to support fail-over
6752 * could mean that 500us or even 100us polling interval need to be used).
6753 *
6754 * The need and motivation for external polling includes
6755 *
6756 * - remove the error-checking "burden" from the HAL interrupt handler
6757 * (see xge_hal_device_handle_irq());
6758 *
6759 * - remove the potential source of portability issues by _not_
6760 * implementing separate polling thread within HAL itself.
6761 *
6762 * See also: xge_hal_event_e{}, xge_hal_driver_config_t{}.
6763 * Usage: See ex_slow_path{}.
6764 */
6765 void
xge_hal_device_poll(xge_hal_device_h devh)6766 xge_hal_device_poll(xge_hal_device_h devh)
6767 {
6768 unsigned char item_buf[sizeof(xge_queue_item_t) +
6769 XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
6770 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
6771 xge_queue_status_e qstatus;
6772 xge_hal_status_e hstatus;
6773 int i = 0;
6774 int queue_has_critical_event = 0;
6775 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
6776
6777 xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
6778 XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
6779
6780 _again:
6781 if (!hldev->is_initialized ||
6782 hldev->terminating ||
6783 hldev->magic != XGE_HAL_MAGIC)
6784 return;
6785
6786 if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000)
6787 {
6788 /*
6789 * Wait for an Hour
6790 */
6791 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++;
6792 } else {
6793 /*
6794 * Logging Error messages in the excess temperature,
6795 * Bias current, laser ouput for three cycle
6796 */
6797 __hal_updt_stats_xpak(hldev);
6798 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0;
6799 }
6800
6801 if (!queue_has_critical_event)
6802 queue_has_critical_event =
6803 __queue_get_reset_critical(hldev->queueh);
6804
6805 hldev->in_poll = 1;
6806 while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) {
6807
6808 qstatus = xge_queue_consume(hldev->queueh,
6809 XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
6810 item);
6811 if (qstatus == XGE_QUEUE_IS_EMPTY)
6812 break;
6813
6814 xge_debug_queue(XGE_TRACE,
6815 "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x"
6816 XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type,
6817 (u64)(ulong_t)item->context);
6818
6819 if (!hldev->is_initialized ||
6820 hldev->magic != XGE_HAL_MAGIC) {
6821 hldev->in_poll = 0;
6822 return;
6823 }
6824
6825 switch (item->event_type) {
6826 case XGE_HAL_EVENT_LINK_IS_UP: {
6827 if (!queue_has_critical_event &&
6828 g_xge_hal_driver->uld_callbacks.link_up) {
6829 g_xge_hal_driver->uld_callbacks.link_up(
6830 hldev->upper_layer_info);
6831 hldev->link_state = XGE_HAL_LINK_UP;
6832 }
6833 } break;
6834 case XGE_HAL_EVENT_LINK_IS_DOWN: {
6835 if (!queue_has_critical_event &&
6836 g_xge_hal_driver->uld_callbacks.link_down) {
6837 g_xge_hal_driver->uld_callbacks.link_down(
6838 hldev->upper_layer_info);
6839 hldev->link_state = XGE_HAL_LINK_DOWN;
6840 }
6841 } break;
6842 case XGE_HAL_EVENT_SERR:
6843 case XGE_HAL_EVENT_ECCERR:
6844 case XGE_HAL_EVENT_PARITYERR:
6845 case XGE_HAL_EVENT_TARGETABORT:
6846 case XGE_HAL_EVENT_SLOT_FREEZE: {
6847 void *item_data = xge_queue_item_data(item);
6848 xge_hal_event_e event_type = item->event_type;
6849 u64 val64 = *((u64*)item_data);
6850
6851 if (event_type != XGE_HAL_EVENT_SLOT_FREEZE)
6852 if (xge_hal_device_is_slot_freeze(hldev))
6853 event_type = XGE_HAL_EVENT_SLOT_FREEZE;
6854 if (g_xge_hal_driver->uld_callbacks.crit_err) {
6855 g_xge_hal_driver->uld_callbacks.crit_err(
6856 hldev->upper_layer_info,
6857 event_type,
6858 val64);
6859 /* handle one critical event per poll cycle */
6860 hldev->in_poll = 0;
6861 return;
6862 }
6863 } break;
6864 default: {
6865 xge_debug_queue(XGE_TRACE,
6866 "got non-HAL event %d",
6867 item->event_type);
6868 } break;
6869 }
6870
6871 /* broadcast this event */
6872 if (g_xge_hal_driver->uld_callbacks.event)
6873 g_xge_hal_driver->uld_callbacks.event(item);
6874 }
6875
6876 if (g_xge_hal_driver->uld_callbacks.before_device_poll) {
6877 if (g_xge_hal_driver->uld_callbacks.before_device_poll(
6878 hldev) != 0) {
6879 hldev->in_poll = 0;
6880 return;
6881 }
6882 }
6883
6884 hstatus = __hal_device_poll(hldev);
6885 if (g_xge_hal_driver->uld_callbacks.after_device_poll)
6886 g_xge_hal_driver->uld_callbacks.after_device_poll(hldev);
6887
6888 /*
6889 * handle critical error right away:
6890 * - walk the device queue again
6891 * - drop non-critical events, if any
6892 * - look for the 1st critical
6893 */
6894 if (hstatus == XGE_HAL_ERR_CRITICAL) {
6895 queue_has_critical_event = 1;
6896 goto _again;
6897 }
6898
6899 hldev->in_poll = 0;
6900 }
6901
6902 /**
6903 * xge_hal_rts_rth_init - Set enhanced mode for RTS hashing.
6904 * @hldev: HAL device handle.
6905 *
6906 * This function is used to set the adapter to enhanced mode.
6907 *
6908 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
6909 */
6910 void
xge_hal_rts_rth_init(xge_hal_device_t * hldev)6911 xge_hal_rts_rth_init(xge_hal_device_t *hldev)
6912 {
6913 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6914 u64 val64;
6915
6916 /*
6917 * Set the receive traffic steering mode from default(classic)
6918 * to enhanced.
6919 */
6920 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6921 &bar0->rts_ctrl);
6922 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
6923 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6924 val64, &bar0->rts_ctrl);
6925 }
6926
6927 /**
6928 * xge_hal_rts_rth_clr - Clear RTS hashing.
6929 * @hldev: HAL device handle.
6930 *
6931 * This function is used to clear all RTS hashing related stuff.
6932 * It brings the adapter out from enhanced mode to classic mode.
6933 * It also clears RTS_RTH_CFG register i.e clears hash type, function etc.
6934 *
6935 * See also: xge_hal_rts_rth_set(), xge_hal_rts_rth_itable_set().
6936 */
6937 void
xge_hal_rts_rth_clr(xge_hal_device_t * hldev)6938 xge_hal_rts_rth_clr(xge_hal_device_t *hldev)
6939 {
6940 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6941 u64 val64;
6942
6943 /*
6944 * Set the receive traffic steering mode from default(classic)
6945 * to enhanced.
6946 */
6947 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6948 &bar0->rts_ctrl);
6949 val64 &= ~XGE_HAL_RTS_CTRL_ENHANCED_MODE;
6950 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6951 val64, &bar0->rts_ctrl);
6952 val64 = 0;
6953 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6954 &bar0->rts_rth_cfg);
6955 }
6956
6957 /**
6958 * xge_hal_rts_rth_set - Set/configure RTS hashing.
6959 * @hldev: HAL device handle.
6960 * @def_q: default queue
6961 * @hash_type: hash type i.e TcpIpV4, TcpIpV6 etc.
6962 * @bucket_size: no of least significant bits to be used for hashing.
6963 *
6964 * Used to set/configure all RTS hashing related stuff.
6965 * - set the steering mode to enhanced.
6966 * - set hash function i.e algo selection.
6967 * - set the default queue.
6968 *
6969 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set().
6970 */
6971 void
xge_hal_rts_rth_set(xge_hal_device_t * hldev,u8 def_q,u64 hash_type,u16 bucket_size)6972 xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type,
6973 u16 bucket_size)
6974 {
6975 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6976 u64 val64;
6977
6978 val64 = XGE_HAL_RTS_DEFAULT_Q(def_q);
6979 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6980 &bar0->rts_default_q);
6981
6982 val64 = hash_type;
6983 val64 |= XGE_HAL_RTS_RTH_EN;
6984 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size);
6985 val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS;
6986 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6987 &bar0->rts_rth_cfg);
6988 }
6989
6990 /**
6991 * xge_hal_rts_rth_start - Start RTS hashing.
6992 * @hldev: HAL device handle.
6993 *
6994 * Used to Start RTS hashing .
6995 *
6996 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
6997 */
6998 void
xge_hal_rts_rth_start(xge_hal_device_t * hldev)6999 xge_hal_rts_rth_start(xge_hal_device_t *hldev)
7000 {
7001 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7002 u64 val64;
7003
7004
7005 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7006 &bar0->rts_rth_cfg);
7007 val64 |= XGE_HAL_RTS_RTH_EN;
7008 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7009 &bar0->rts_rth_cfg);
7010 }
7011
7012 /**
7013 * xge_hal_rts_rth_stop - Stop the RTS hashing.
7014 * @hldev: HAL device handle.
7015 *
7016 * Used to Staop RTS hashing .
7017 *
7018 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
7019 */
7020 void
xge_hal_rts_rth_stop(xge_hal_device_t * hldev)7021 xge_hal_rts_rth_stop(xge_hal_device_t *hldev)
7022 {
7023 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7024 u64 val64;
7025
7026 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7027 &bar0->rts_rth_cfg);
7028 val64 &= ~XGE_HAL_RTS_RTH_EN;
7029 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7030 &bar0->rts_rth_cfg);
7031 }
7032
7033 /**
7034 * xge_hal_rts_rth_itable_set - Set/configure indirection table (IT).
7035 * @hldev: HAL device handle.
7036 * @itable: Pointer to the indirection table
7037 * @itable_size: no of least significant bits to be used for hashing
7038 *
7039 * Used to set/configure indirection table.
7040 * It enables the required no of entries in the IT.
7041 * It adds entries to the IT.
7042 *
7043 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
7044 */
7045 xge_hal_status_e
xge_hal_rts_rth_itable_set(xge_hal_device_t * hldev,u8 * itable,u32 itable_size)7046 xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size)
7047 {
7048 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7049 u64 val64;
7050 u32 idx;
7051
7052 for (idx = 0; idx < itable_size; idx++) {
7053 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
7054 XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]);
7055
7056 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7057 &bar0->rts_rth_map_mem_data);
7058
7059 /* execute */
7060 val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
7061 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
7062 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx));
7063 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7064 &bar0->rts_rth_map_mem_ctrl);
7065
7066 /* poll until done */
7067 if (__hal_device_register_poll(hldev,
7068 &bar0->rts_rth_map_mem_ctrl, 0,
7069 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
7070 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
7071 /* upper layer may require to repeat */
7072 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
7073 }
7074 }
7075
7076 return XGE_HAL_OK;
7077 }
7078
7079
7080 /**
7081 * xge_hal_device_rts_rth_key_set - Configure 40byte secret for hash calc.
7082 *
7083 * @hldev: HAL device handle.
7084 * @KeySize: Number of 64-bit words
7085 * @Key: upto 40-byte array of 8-bit values
7086 * This function configures the 40-byte secret which is used for hash
7087 * calculation.
7088 *
7089 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
7090 */
7091 void
xge_hal_device_rts_rth_key_set(xge_hal_device_t * hldev,u8 KeySize,u8 * Key)7092 xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key)
7093 {
7094 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *) hldev->bar0;
7095 u64 val64;
7096 u32 entry, nreg, i;
7097
7098 entry = 0;
7099 nreg = 0;
7100
7101 while( KeySize ) {
7102 val64 = 0;
7103 for ( i = 0; i < 8 ; i++) {
7104 /* Prepare 64-bit word for 'nreg' containing 8 keys. */
7105 if (i)
7106 val64 <<= 8;
7107 val64 |= Key[entry++];
7108 }
7109
7110 KeySize--;
7111
7112 /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/
7113 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7114 &bar0->rts_rth_hash_mask[nreg++]);
7115 }
7116
7117 while( nreg < 5 ) {
7118 /* Clear the rest if key is less than 40 bytes */
7119 val64 = 0;
7120 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7121 &bar0->rts_rth_hash_mask[nreg++]);
7122 }
7123 }
7124
7125
7126 /**
7127 * xge_hal_device_is_closed - Device is closed
7128 *
7129 * @devh: HAL device handle.
7130 */
7131 int
xge_hal_device_is_closed(xge_hal_device_h devh)7132 xge_hal_device_is_closed(xge_hal_device_h devh)
7133 {
7134 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
7135
7136 if (xge_list_is_empty(&hldev->fifo_channels) &&
7137 xge_list_is_empty(&hldev->ring_channels))
7138 return 1;
7139
7140 return 0;
7141 }
7142
7143 xge_hal_status_e
xge_hal_device_rts_section_enable(xge_hal_device_h devh,int index)7144 xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index)
7145 {
7146 u64 val64;
7147 int section;
7148 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
7149
7150 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
7151 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7152
7153 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
7154 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
7155
7156 if ( index >= max_addr )
7157 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
7158
7159 /*
7160 * Calculate the section value
7161 */
7162 section = index / 32;
7163
7164 xge_debug_device(XGE_TRACE, "the Section value is %d ", section);
7165
7166 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7167 &bar0->rts_mac_cfg);
7168 switch(section)
7169 {
7170 case 0:
7171 val64 |= XGE_HAL_RTS_MAC_SECT0_EN;
7172 break;
7173 case 1:
7174 val64 |= XGE_HAL_RTS_MAC_SECT1_EN;
7175 break;
7176 case 2:
7177 val64 |= XGE_HAL_RTS_MAC_SECT2_EN;
7178 break;
7179 case 3:
7180 val64 |= XGE_HAL_RTS_MAC_SECT3_EN;
7181 break;
7182 case 4:
7183 val64 |= XGE_HAL_RTS_MAC_SECT4_EN;
7184 break;
7185 case 5:
7186 val64 |= XGE_HAL_RTS_MAC_SECT5_EN;
7187 break;
7188 case 6:
7189 val64 |= XGE_HAL_RTS_MAC_SECT6_EN;
7190 break;
7191 case 7:
7192 val64 |= XGE_HAL_RTS_MAC_SECT7_EN;
7193 break;
7194 default:
7195 xge_debug_device(XGE_ERR, "Invalid Section value %d "
7196 , section);
7197 }
7198
7199 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
7200 val64, &bar0->rts_mac_cfg);
7201 return XGE_HAL_OK;
7202 }
7203
7204 /*
7205 * xge_hal_fix_rldram_ecc_error
7206 * @hldev: private member of the device structure.
7207 *
7208 * SXE-02-010. This function will turn OFF the ECC error reporting for the
7209 * interface bet'n external Micron RLDRAM II device and memory controller.
7210 * The error would have been reported in RLD_ECC_DB_ERR_L and RLD_ECC_DB_ERR_U
7211 * fileds of MC_ERR_REG register. Issue reported by HP-Unix folks during the
7212 * qualification of Herc.
7213 */
7214 xge_hal_status_e
xge_hal_fix_rldram_ecc_error(xge_hal_device_t * hldev)7215 xge_hal_fix_rldram_ecc_error(xge_hal_device_t *hldev)
7216 {
7217 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
7218 u64 val64;
7219
7220 // Enter Test Mode.
7221 val64 = XGE_HAL_MC_RLDRAM_TEST_MODE;
7222 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7223 &bar0->mc_rldram_test_ctrl);
7224
7225 // Enable fg/bg tests.
7226 val64 = 0x0100000000000000ULL;
7227 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7228 &bar0->mc_driver);
7229
7230 // Enable RLDRAM configuration.
7231 val64 = 0x0000000000017B00ULL;
7232 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7233 &bar0->mc_rldram_mrs);
7234
7235 // Enable RLDRAM queues.
7236 val64 = 0x0000000001017B00ULL;
7237 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7238 &bar0->mc_rldram_mrs);
7239
7240 // Setup test ranges.
7241 val64 = 0x00000000001E0100ULL;
7242 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7243 &bar0->mc_rldram_test_add);
7244
7245 val64 = 0x00000100001F0100ULL;
7246 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7247 &bar0->mc_rldram_test_add_bkg);
7248
7249 // Start Reads.
7250 val64 = 0x0001000000010000ULL;
7251 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7252 &bar0->mc_rldram_test_ctrl);
7253
7254 if (__hal_device_register_poll(hldev, &bar0->mc_rldram_test_ctrl, 1,
7255 XGE_HAL_MC_RLDRAM_TEST_DONE,
7256 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
7257 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
7258 }
7259
7260 // Exit test mode.
7261 val64 = 0x0000000000000000ULL;
7262 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7263 &bar0->mc_rldram_test_ctrl);
7264
7265 return XGE_HAL_OK;
7266 }
7267
7268 /*
7269 * xge_hal_device_quiesce
7270 * @hldev: HAL device object
7271 * @devh : HAL device handle
7272 *
7273 * This is called by xge_quiesce to quiesce the device.
7274 */
7275 void
xge_hal_device_quiesce(xge_hal_device_t * hldev,xge_hal_device_h devh)7276 xge_hal_device_quiesce(xge_hal_device_t *hldev, xge_hal_device_h devh)
7277 {
7278 /* Turn off debugging */
7279 g_xge_hal_driver->debug_level = XGE_NONE;
7280 g_level = &(g_xge_hal_driver->debug_level);
7281
7282 /* Disable device */
7283 (void) xge_hal_device_disable(devh);
7284
7285 /* Disable Xframe interrupts */
7286 xge_hal_device_intr_disable(devh);
7287 }
7288