1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2002-2005 Neterion, Inc. 24 * All right Reserved. 25 * 26 * FileName : xgehal-device.c 27 * 28 * Description: HAL device object functionality 29 * 30 * Created: 10 May 2004 31 */ 32 33 #include "xgehal-device.h" 34 #include "xgehal-channel.h" 35 #include "xgehal-fifo.h" 36 #include "xgehal-ring.h" 37 #include "xgehal-driver.h" 38 #include "xgehal-mgmt.h" 39 40 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL 41 #define END_SIGN 0x0 42 43 #ifdef XGE_HAL_HERC_EMULATION 44 #undef XGE_HAL_PROCESS_LINK_INT_IN_ISR 45 #endif 46 47 /* 48 * Jenkins hash key length(in bytes) 49 */ 50 #define XGE_HAL_JHASH_MSG_LEN 50 51 52 /* 53 * mix(a,b,c) used in Jenkins hash algorithm 54 */ 55 #define mix(a,b,c) { \ 56 a -= b; a -= c; a ^= (c>>13); \ 57 b -= c; b -= a; b ^= (a<<8); \ 58 c -= a; c -= b; c ^= (b>>13); \ 59 a -= b; a -= c; a ^= (c>>12); \ 60 b -= c; b -= a; b ^= (a<<16); \ 61 c -= a; c -= b; c ^= (b>>5); \ 62 a -= b; a -= c; a ^= (c>>3); \ 63 b -= c; b -= a; b ^= (a<<10); \ 64 c -= a; c -= b; c ^= (b>>15); \ 65 } 66 67 extern xge_hal_driver_t *g_xge_hal_driver; 68 69 /* 70 * __hal_device_event_queued 71 * @data: pointer to xge_hal_device_t structure 72 * 73 * Will be called when new event succesfully queued. 74 */ 75 void 76 __hal_device_event_queued(void *data, int event_type) 77 { 78 xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC); 79 if (g_xge_hal_driver->uld_callbacks.event_queued) { 80 g_xge_hal_driver->uld_callbacks.event_queued(data, event_type); 81 } 82 } 83 84 /* 85 * __hal_pio_mem_write32_upper 86 * 87 * Endiann-aware implementation of xge_os_pio_mem_write32(). 88 * Since Xframe has 64bit registers, we differintiate uppper and lower 89 * parts. 90 */ 91 void 92 __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr) 93 { 94 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) 95 xge_os_pio_mem_write32(pdev, regh, val, addr); 96 #else 97 xge_os_pio_mem_write32(pdev, regh, val, (void *)((char *)addr + 4)); 98 #endif 99 } 100 101 /* 102 * __hal_pio_mem_write32_upper 103 * 104 * Endiann-aware implementation of xge_os_pio_mem_write32(). 105 * Since Xframe has 64bit registers, we differintiate uppper and lower 106 * parts. 107 */ 108 void 109 __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val, 110 void *addr) 111 { 112 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) 113 xge_os_pio_mem_write32(pdev, regh, val, 114 (void *) ((char *)addr + 4)); 115 #else 116 xge_os_pio_mem_write32(pdev, regh, val, addr); 117 #endif 118 } 119 120 /* 121 * __hal_device_register_poll 122 * @hldev: pointer to xge_hal_device_t structure 123 * @reg: register to poll for 124 * @op: 0 - bit reset, 1 - bit set 125 * @mask: mask for logical "and" condition based on %op 126 * @max_millis: maximum time to try to poll in milliseconds 127 * 128 * Will poll certain register for specified amount of time. 129 * Will poll until masked bit is not cleared. 130 */ 131 xge_hal_status_e 132 __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg, 133 int op, u64 mask, int max_millis) 134 { 135 xge_hal_status_e ret = XGE_HAL_FAIL; 136 u64 val64; 137 int i = 0; 138 139 do { 140 xge_os_udelay(1000); 141 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); 142 if (op == 0 && !(val64 & mask)) { 143 ret = XGE_HAL_OK; 144 break; 145 } else if (op == 1 && (val64 & mask) == mask) { 146 ret = XGE_HAL_OK; 147 break; 148 } 149 } while (++i <= max_millis); 150 151 return ret; 152 } 153 154 /* 155 * __hal_device_wait_quiescent 156 * @hldev: the device 157 * @hw_status: hw_status in case of error 158 * 159 * Will wait until device is quiescent for some blocks. 160 */ 161 static xge_hal_status_e 162 __hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status) 163 { 164 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 165 166 /* poll and wait first */ 167 #ifdef XGE_HAL_HERC_EMULATION 168 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, 169 (XGE_HAL_ADAPTER_STATUS_TDMA_READY | 170 XGE_HAL_ADAPTER_STATUS_RDMA_READY | 171 XGE_HAL_ADAPTER_STATUS_PFC_READY | 172 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | 173 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | 174 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | 175 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | 176 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK), 177 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); 178 #else 179 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, 180 (XGE_HAL_ADAPTER_STATUS_TDMA_READY | 181 XGE_HAL_ADAPTER_STATUS_RDMA_READY | 182 XGE_HAL_ADAPTER_STATUS_PFC_READY | 183 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | 184 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | 185 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | 186 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | 187 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK | 188 XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK), 189 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); 190 #endif 191 192 return xge_hal_device_status(hldev, hw_status); 193 } 194 195 /** 196 * xge_hal_device_is_slot_freeze 197 * @hldev: the device 198 * 199 * Returns non-zero if the slot is freezed. 200 * The determination is made based on the adapter_status 201 * register which will never give all FFs, unless PCI read 202 * cannot go through. 203 */ 204 int 205 xge_hal_device_is_slot_freeze(xge_hal_device_h devh) 206 { 207 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 208 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 209 u16 device_id; 210 u64 adapter_status = 211 xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 212 &bar0->adapter_status); 213 u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 214 &bar0->pif_rd_swapper_fb); 215 xge_os_pci_read16(hldev->pdev,hldev->cfgh, 216 xge_offsetof(xge_hal_pci_config_le_t, device_id), 217 &device_id); 218 #ifdef TX_DEBUG 219 if (adapter_status == XGE_HAL_ALL_FOXES && 220 val64 == XGE_HAL_ALL_FOXES) 221 { 222 u64 dummy; 223 dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 224 &bar0->pcc_enable); 225 printf(">>> Slot is frozen!\n"); 226 brkpoint(0); 227 } 228 #endif 229 return ((adapter_status == XGE_HAL_ALL_FOXES && 230 val64 == XGE_HAL_ALL_FOXES) || (device_id == 0xffff)); 231 } 232 233 234 /* 235 * __hal_device_led_actifity_fix 236 * @hldev: pointer to xge_hal_device_t structure 237 * 238 * SXE-002: Configure link and activity LED to turn it off 239 */ 240 static void 241 __hal_device_led_actifity_fix(xge_hal_device_t *hldev) 242 { 243 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 244 u16 subid; 245 u64 val64; 246 247 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 248 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid); 249 250 /* 251 * In the case of Herc, there is a new register named beacon control 252 * is added which was not present in Xena. 253 * Beacon control register in Herc is at the same offset as 254 * gpio control register in Xena. It means they are one and same in 255 * the case of Xena. Also, gpio control register offset in Herc and 256 * Xena is different. 257 * The current register map represents Herc(It means we have 258 * both beacon and gpio control registers in register map). 259 * WRT transition from Xena to Herc, all the code in Xena which was 260 * using gpio control register for LED handling would have to 261 * use beacon control register in Herc and the rest of the code 262 * which uses gpio control in Xena would use the same register 263 * in Herc. 264 * WRT LED handling(following code), In the case of Herc, beacon 265 * control register has to be used. This is applicable for Xena also, 266 * since it represents the gpio control register in Xena. 267 */ 268 if ((subid & 0xFF) >= 0x07) { 269 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 270 &bar0->beacon_control); 271 val64 |= 0x0000800000000000ULL; 272 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 273 val64, &bar0->beacon_control); 274 val64 = 0x0411040400000000ULL; 275 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 276 (void *) ((u8 *)bar0 + 0x2700)); 277 } 278 } 279 280 /* Constants for Fixing the MacAddress problem seen mostly on 281 * Alpha machines. 282 */ 283 static u64 xena_fix_mac[] = { 284 0x0060000000000000ULL, 0x0060600000000000ULL, 285 0x0040600000000000ULL, 0x0000600000000000ULL, 286 0x0020600000000000ULL, 0x0060600000000000ULL, 287 0x0020600000000000ULL, 0x0060600000000000ULL, 288 0x0020600000000000ULL, 0x0060600000000000ULL, 289 0x0020600000000000ULL, 0x0060600000000000ULL, 290 0x0020600000000000ULL, 0x0060600000000000ULL, 291 0x0020600000000000ULL, 0x0060600000000000ULL, 292 0x0020600000000000ULL, 0x0060600000000000ULL, 293 0x0020600000000000ULL, 0x0060600000000000ULL, 294 0x0020600000000000ULL, 0x0060600000000000ULL, 295 0x0020600000000000ULL, 0x0060600000000000ULL, 296 0x0020600000000000ULL, 0x0000600000000000ULL, 297 0x0040600000000000ULL, 0x0060600000000000ULL, 298 END_SIGN 299 }; 300 301 /* 302 * __hal_device_fix_mac 303 * @hldev: HAL device handle. 304 * 305 * Fix for all "FFs" MAC address problems observed on Alpha platforms. 306 */ 307 static void 308 __hal_device_xena_fix_mac(xge_hal_device_t *hldev) 309 { 310 int i = 0; 311 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 312 313 /* 314 * In the case of Herc, there is a new register named beacon control 315 * is added which was not present in Xena. 316 * Beacon control register in Herc is at the same offset as 317 * gpio control register in Xena. It means they are one and same in 318 * the case of Xena. Also, gpio control register offset in Herc and 319 * Xena is different. 320 * The current register map represents Herc(It means we have 321 * both beacon and gpio control registers in register map). 322 * WRT transition from Xena to Herc, all the code in Xena which was 323 * using gpio control register for LED handling would have to 324 * use beacon control register in Herc and the rest of the code 325 * which uses gpio control in Xena would use the same register 326 * in Herc. 327 * In the following code(xena_fix_mac), beacon control register has 328 * to be used in the case of Xena, since it represents gpio control 329 * register. In the case of Herc, there is no change required. 330 */ 331 while (xena_fix_mac[i] != END_SIGN) { 332 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 333 xena_fix_mac[i++], &bar0->beacon_control); 334 xge_os_mdelay(1); 335 } 336 } 337 338 /* 339 * xge_hal_device_bcast_enable 340 * @hldev: HAL device handle. 341 * 342 * Enable receiving broadcasts. 343 * The host must first write RMAC_CFG_KEY "key" 344 * register, and then - MAC_CFG register. 345 */ 346 void 347 xge_hal_device_bcast_enable(xge_hal_device_h devh) 348 { 349 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 350 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 351 u64 val64; 352 353 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 354 &bar0->mac_cfg); 355 val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE; 356 357 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 358 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 359 360 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 361 (u32)(val64 >> 32), &bar0->mac_cfg); 362 363 xge_debug_device(XGE_TRACE, "mac_cfg 0x%llx: broadcast %s", 364 (unsigned long long)val64, 365 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); 366 } 367 368 /* 369 * xge_hal_device_bcast_disable 370 * @hldev: HAL device handle. 371 * 372 * Disable receiving broadcasts. 373 * The host must first write RMAC_CFG_KEY "key" 374 * register, and then - MAC_CFG register. 375 */ 376 void 377 xge_hal_device_bcast_disable(xge_hal_device_h devh) 378 { 379 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 380 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 381 u64 val64; 382 383 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 384 &bar0->mac_cfg); 385 386 val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE); 387 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 388 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 389 390 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 391 (u32)(val64 >> 32), &bar0->mac_cfg); 392 393 xge_debug_device(XGE_TRACE, "mac_cfg 0x%llx: broadcast %s", 394 (unsigned long long)val64, 395 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); 396 } 397 398 /* 399 * __hal_device_shared_splits_configure 400 * @hldev: HAL device handle. 401 * 402 * TxDMA will stop Read request if the number of read split had exceeded 403 * the limit set by shared_splits 404 */ 405 static void 406 __hal_device_shared_splits_configure(xge_hal_device_t *hldev) 407 { 408 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 409 u64 val64; 410 411 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 412 &bar0->pic_control); 413 val64 |= 414 XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits); 415 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 416 &bar0->pic_control); 417 xge_debug_device(XGE_TRACE, "%s", "shared splits configured"); 418 } 419 420 /* 421 * __hal_device_rmac_padding_configure 422 * @hldev: HAL device handle. 423 * 424 * Configure RMAC frame padding. Depends on configuration, it 425 * can be send to host or removed by MAC. 426 */ 427 static void 428 __hal_device_rmac_padding_configure(xge_hal_device_t *hldev) 429 { 430 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 431 u64 val64; 432 433 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 434 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 435 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 436 &bar0->mac_cfg); 437 val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE ); 438 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE ); 439 val64 |= XGE_HAL_MAC_CFG_TMAC_APPEND_PAD; 440 441 /* 442 * If the RTH enable bit is not set, strip the FCS 443 */ 444 if (!hldev->config.rth_en || 445 !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 446 &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) { 447 val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS; 448 } 449 450 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD ); 451 val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM; 452 453 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 454 (u32)(val64 >> 32), (char*)&bar0->mac_cfg); 455 xge_os_mdelay(1); 456 457 xge_debug_device(XGE_TRACE, 458 "mac_cfg 0x%llx: frame padding configured", 459 (unsigned long long)val64); 460 } 461 462 /* 463 * __hal_device_pause_frames_configure 464 * @hldev: HAL device handle. 465 * 466 * Set Pause threshold. 467 * 468 * Pause frame is generated if the amount of data outstanding 469 * on any queue exceeded the ratio of 470 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256 471 */ 472 static void 473 __hal_device_pause_frames_configure(xge_hal_device_t *hldev) 474 { 475 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 476 int i; 477 u64 val64; 478 479 switch (hldev->config.mac.media) { 480 case XGE_HAL_MEDIA_SR: 481 case XGE_HAL_MEDIA_SW: 482 val64=0xfffbfffbfffbfffbULL; 483 break; 484 case XGE_HAL_MEDIA_LR: 485 case XGE_HAL_MEDIA_LW: 486 val64=0xffbbffbbffbbffbbULL; 487 break; 488 case XGE_HAL_MEDIA_ER: 489 case XGE_HAL_MEDIA_EW: 490 default: 491 val64=0xffbbffbbffbbffbbULL; 492 break; 493 } 494 495 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 496 val64, &bar0->mc_pause_thresh_q0q3); 497 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 498 val64, &bar0->mc_pause_thresh_q4q7); 499 500 /* Set the time value to be inserted in the pause frame generated 501 * by Xframe */ 502 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 503 &bar0->rmac_pause_cfg); 504 if (hldev->config.mac.rmac_pause_gen_en) 505 val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN; 506 else 507 val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN); 508 if (hldev->config.mac.rmac_pause_rcv_en) 509 val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN; 510 else 511 val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN); 512 val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff)); 513 val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time); 514 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 515 &bar0->rmac_pause_cfg); 516 517 val64 = 0; 518 for (i = 0; i<4; i++) { 519 val64 |= 520 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3) 521 <<(i*2*8)); 522 } 523 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 524 &bar0->mc_pause_thresh_q0q3); 525 526 val64 = 0; 527 for (i = 0; i<4; i++) { 528 val64 |= 529 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7) 530 <<(i*2*8)); 531 } 532 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 533 &bar0->mc_pause_thresh_q4q7); 534 xge_debug_device(XGE_TRACE, "%s", "pause frames configured"); 535 } 536 537 /* 538 * Herc's clock rate doubled, unless the slot is 33MHz. 539 */ 540 unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev, 541 unsigned int time_ival) 542 { 543 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 544 return time_ival; 545 546 xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC); 547 548 if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN && 549 hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ) 550 time_ival *= 2; 551 552 return time_ival; 553 } 554 555 556 /* 557 * __hal_device_bus_master_disable 558 * @hldev: HAL device handle. 559 * 560 * Disable bus mastership. 561 */ 562 static void 563 __hal_device_bus_master_disable (xge_hal_device_t *hldev) 564 { 565 u16 cmd; 566 u16 bus_master = 4; 567 568 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 569 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 570 cmd &= ~bus_master; 571 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 572 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 573 } 574 575 /* 576 * __hal_device_bus_master_enable 577 * @hldev: HAL device handle. 578 * 579 * Disable bus mastership. 580 */ 581 static void 582 __hal_device_bus_master_enable (xge_hal_device_t *hldev) 583 { 584 u16 cmd; 585 u16 bus_master = 4; 586 587 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 588 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 589 590 /* already enabled? do nothing */ 591 if (cmd & bus_master) 592 return; 593 594 cmd |= bus_master; 595 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 596 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 597 } 598 /* 599 * __hal_device_intr_mgmt 600 * @hldev: HAL device handle. 601 * @mask: mask indicating which Intr block must be modified. 602 * @flag: if true - enable, otherwise - disable interrupts. 603 * 604 * Disable or enable device interrupts. Mask is used to specify 605 * which hardware blocks should produce interrupts. For details 606 * please refer to Xframe User Guide. 607 */ 608 static void 609 __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag) 610 { 611 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 612 u64 val64 = 0, temp64 = 0; 613 u64 gim, gim_saved; 614 615 gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev, 616 hldev->regh0, &bar0->general_int_mask); 617 618 /* Top level interrupt classification */ 619 /* PIC Interrupts */ 620 if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) { 621 /* Enable PIC Intrs in the general intr mask register */ 622 val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/; 623 if (flag) { 624 gim &= ~((u64) val64); 625 temp64 = xge_os_pio_mem_read64(hldev->pdev, 626 hldev->regh0, &bar0->pic_int_mask); 627 628 temp64 &= ~XGE_HAL_PIC_INT_TX; 629 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 630 if (xge_hal_device_check_id(hldev) == 631 XGE_HAL_CARD_HERC) { 632 temp64 &= ~XGE_HAL_PIC_INT_MISC; 633 } 634 #endif 635 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 636 temp64, &bar0->pic_int_mask); 637 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 638 if (xge_hal_device_check_id(hldev) == 639 XGE_HAL_CARD_HERC) { 640 /* 641 * Unmask only Link Up interrupt 642 */ 643 temp64 = xge_os_pio_mem_read64(hldev->pdev, 644 hldev->regh0, &bar0->misc_int_mask); 645 temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 646 xge_os_pio_mem_write64(hldev->pdev, 647 hldev->regh0, temp64, 648 &bar0->misc_int_mask); 649 xge_debug_device(XGE_TRACE, 650 "unmask link up flag %llx", 651 (unsigned long long)temp64); 652 } 653 #endif 654 } else { /* flag == 0 */ 655 656 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 657 if (xge_hal_device_check_id(hldev) == 658 XGE_HAL_CARD_HERC) { 659 /* 660 * Mask both Link Up and Down interrupts 661 */ 662 temp64 = xge_os_pio_mem_read64(hldev->pdev, 663 hldev->regh0, &bar0->misc_int_mask); 664 temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 665 temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 666 xge_os_pio_mem_write64(hldev->pdev, 667 hldev->regh0, temp64, 668 &bar0->misc_int_mask); 669 xge_debug_device(XGE_TRACE, 670 "mask link up/down flag %llx", 671 (unsigned long long)temp64); 672 } 673 #endif 674 /* Disable PIC Intrs in the general intr mask 675 * register */ 676 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 677 XGE_HAL_ALL_INTRS_DIS, 678 &bar0->pic_int_mask); 679 gim |= val64; 680 } 681 } 682 683 /* DMA Interrupts */ 684 /* Enabling/Disabling Tx DMA interrupts */ 685 if (mask & XGE_HAL_TX_DMA_INTR) { 686 /* Enable TxDMA Intrs in the general intr mask register */ 687 val64 = XGE_HAL_TXDMA_INT_M; 688 if (flag) { 689 gim &= ~((u64) val64); 690 /* Disable all TxDMA interrupts */ 691 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 692 XGE_HAL_ALL_INTRS_DIS, 693 &bar0->txdma_int_mask); 694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 695 XGE_HAL_ALL_INTRS_DIS, 696 &bar0->pfc_err_mask); 697 698 } else { /* flag == 0 */ 699 700 /* Disable TxDMA Intrs in the general intr mask 701 * register */ 702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 703 XGE_HAL_ALL_INTRS_DIS, 704 &bar0->txdma_int_mask); 705 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 706 XGE_HAL_ALL_INTRS_DIS, 707 &bar0->pfc_err_mask); 708 709 gim |= val64; 710 } 711 } 712 713 /* Enabling/Disabling Rx DMA interrupts */ 714 if (mask & XGE_HAL_RX_DMA_INTR) { 715 /* Enable RxDMA Intrs in the general intr mask register */ 716 val64 = XGE_HAL_RXDMA_INT_M; 717 if (flag) { 718 719 gim &= ~((u64) val64); 720 /* All RxDMA block interrupts are disabled for now 721 * TODO */ 722 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 723 XGE_HAL_ALL_INTRS_DIS, 724 &bar0->rxdma_int_mask); 725 726 } else { /* flag == 0 */ 727 728 /* Disable RxDMA Intrs in the general intr mask 729 * register */ 730 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 731 XGE_HAL_ALL_INTRS_DIS, 732 &bar0->rxdma_int_mask); 733 734 gim |= val64; 735 } 736 } 737 738 /* MAC Interrupts */ 739 /* Enabling/Disabling MAC interrupts */ 740 if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) { 741 val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M; 742 if (flag) { 743 744 gim &= ~((u64) val64); 745 746 /* All MAC block error inter. are disabled for now. */ 747 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 748 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); 749 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 750 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); 751 752 } else { /* flag == 0 */ 753 754 /* Disable MAC Intrs in the general intr mask 755 * register */ 756 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 757 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); 758 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 759 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); 760 761 gim |= val64; 762 } 763 } 764 765 /* XGXS Interrupts */ 766 if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) { 767 val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M; 768 if (flag) { 769 770 gim &= ~((u64) val64); 771 /* All XGXS block error interrupts are disabled for now 772 * TODO */ 773 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 774 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); 775 776 } else { /* flag == 0 */ 777 778 /* Disable MC Intrs in the general intr mask register */ 779 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 780 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); 781 782 gim |= val64; 783 } 784 } 785 786 /* Memory Controller(MC) interrupts */ 787 if (mask & XGE_HAL_MC_INTR) { 788 val64 = XGE_HAL_MC_INT_M; 789 if (flag) { 790 791 gim &= ~((u64) val64); 792 793 /* Enable all MC blocks error interrupts */ 794 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 795 0x0ULL, &bar0->mc_int_mask); 796 797 } else { /* flag == 0 */ 798 799 /* Disable MC Intrs in the general intr mask 800 * register */ 801 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 802 XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask); 803 804 gim |= val64; 805 } 806 } 807 808 809 /* Tx traffic interrupts */ 810 if (mask & XGE_HAL_TX_TRAFFIC_INTR) { 811 val64 = XGE_HAL_TXTRAFFIC_INT_M; 812 if (flag) { 813 814 gim &= ~((u64) val64); 815 816 /* Enable all the Tx side interrupts */ 817 /* '0' Enables all 64 TX interrupt levels. */ 818 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, 819 &bar0->tx_traffic_mask); 820 821 } else { /* flag == 0 */ 822 823 /* Disable Tx Traffic Intrs in the general intr mask 824 * register. */ 825 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 826 XGE_HAL_ALL_INTRS_DIS, 827 &bar0->tx_traffic_mask); 828 gim |= val64; 829 } 830 } 831 832 /* Rx traffic interrupts */ 833 if (mask & XGE_HAL_RX_TRAFFIC_INTR) { 834 val64 = XGE_HAL_RXTRAFFIC_INT_M; 835 if (flag) { 836 gim &= ~((u64) val64); 837 /* '0' Enables all 8 RX interrupt levels. */ 838 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, 839 &bar0->rx_traffic_mask); 840 841 } else { /* flag == 0 */ 842 843 /* Disable Rx Traffic Intrs in the general intr mask 844 * register. 845 */ 846 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 847 XGE_HAL_ALL_INTRS_DIS, 848 &bar0->rx_traffic_mask); 849 850 gim |= val64; 851 } 852 } 853 854 /* Sched Timer interrupt */ 855 if (mask & XGE_HAL_SCHED_INTR) { 856 if (flag) { 857 temp64 = xge_os_pio_mem_read64(hldev->pdev, 858 hldev->regh0, &bar0->txpic_int_mask); 859 temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR; 860 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 861 temp64, &bar0->txpic_int_mask); 862 863 xge_hal_device_sched_timer(hldev, 864 hldev->config.sched_timer_us, 865 hldev->config.sched_timer_one_shot); 866 } else { 867 temp64 = xge_os_pio_mem_read64(hldev->pdev, 868 hldev->regh0, &bar0->txpic_int_mask); 869 temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR; 870 871 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 872 temp64, &bar0->txpic_int_mask); 873 874 xge_hal_device_sched_timer(hldev, 875 XGE_HAL_SCHED_TIMER_DISABLED, 876 XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE); 877 } 878 } 879 880 if (gim != gim_saved) { 881 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim, 882 &bar0->general_int_mask); 883 xge_debug_device(XGE_TRACE, "general_int_mask updated %llx => %llx", 884 (unsigned long long)gim_saved, (unsigned long long)gim); 885 } 886 } 887 888 /* 889 * __hal_device_rti_configure 890 * @hldev: HAL device handle. 891 * 892 * RTI Initialization. 893 * Initialize Receive Traffic Interrupt Scheme. 894 */ 895 static xge_hal_status_e 896 __hal_device_rti_configure(xge_hal_device_t *hldev, int runtime) 897 { 898 xge_hal_pci_bar0_t *bar0; 899 u64 val64, data1 = 0, data2 = 0; 900 int i; 901 902 if (runtime) 903 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 904 else 905 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 906 907 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { 908 xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti; 909 910 if (!hldev->config.ring.queue[i].configured) 911 continue; 912 913 if (rti->timer_val_us) { 914 unsigned int rx_interval; 915 916 if (hldev->config.pci_freq_mherz) { 917 rx_interval = hldev->config.pci_freq_mherz * 918 rti->timer_val_us / 8; 919 rx_interval = 920 __hal_fix_time_ival_herc(hldev, 921 rx_interval); 922 } else { 923 rx_interval = rti->timer_val_us; 924 } 925 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval); 926 if (rti->timer_ac_en) { 927 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN; 928 } 929 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN; 930 } 931 932 if (rti->urange_a || 933 rti->urange_b || 934 rti->urange_c || 935 rti->ufc_a || 936 rti->ufc_b || 937 rti->ufc_c || 938 rti->ufc_d) { 939 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) | 940 XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) | 941 XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c); 942 943 data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) | 944 XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) | 945 XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) | 946 XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d); 947 } 948 949 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1, 950 &bar0->rti_data1_mem); 951 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2, 952 &bar0->rti_data2_mem); 953 xge_os_wmb(); 954 955 val64 = XGE_HAL_RTI_CMD_MEM_WE | 956 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD; 957 val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i); 958 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 959 &bar0->rti_command_mem); 960 961 if (!runtime && __hal_device_register_poll(hldev, 962 &bar0->rti_command_mem, 0, 963 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD, 964 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 965 /* upper layer may require to repeat */ 966 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 967 } 968 } 969 970 if (!runtime) { 971 xge_debug_device(XGE_TRACE, 972 "RTI configured: rti_data1_mem 0x%llx", 973 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, 974 hldev->regh0, &bar0->rti_data1_mem)); 975 } 976 977 return XGE_HAL_OK; 978 } 979 980 /* 981 * __hal_device_tti_configure 982 * @hldev: HAL device handle. 983 * 984 * TTI Initialization. 985 * Initialize Transmit Traffic Interrupt Scheme. 986 */ 987 static xge_hal_status_e 988 __hal_device_tti_configure(xge_hal_device_t *hldev) 989 { 990 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 991 xge_hal_tti_config_t *tti = &hldev->config.tti; 992 u64 val64, data1 = 0, data2 = 0; 993 994 if (tti->timer_val_us) { 995 unsigned int tx_interval; 996 997 if (hldev->config.pci_freq_mherz) { 998 tx_interval = hldev->config.pci_freq_mherz * 999 tti->timer_val_us / 64; 1000 tx_interval = 1001 __hal_fix_time_ival_herc(hldev, 1002 tx_interval); 1003 } else { 1004 tx_interval = tti->timer_val_us; 1005 } 1006 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval); 1007 if (tti->timer_ac_en) { 1008 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN; 1009 } 1010 if (tti->timer_ci_en) { 1011 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN; 1012 } 1013 1014 xge_debug_device(XGE_TRACE, "TTI timer enabled to %d, ci %s", 1015 tx_interval, tti->timer_ci_en ? 1016 "enabled": "disabled"); 1017 } 1018 1019 if (tti->urange_a || 1020 tti->urange_b || 1021 tti->urange_c || 1022 tti->ufc_a || 1023 tti->ufc_b || 1024 tti->ufc_c || 1025 tti->ufc_d ) { 1026 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) | 1027 XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) | 1028 XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c); 1029 1030 data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) | 1031 XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) | 1032 XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) | 1033 XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d); 1034 1035 xge_debug_device(XGE_TRACE, "%s", "TTI utiliz. enabled"); 1036 } 1037 1038 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1, 1039 &bar0->tti_data1_mem); 1040 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2, 1041 &bar0->tti_data2_mem); 1042 1043 val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD; 1044 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1045 &bar0->tti_command_mem); 1046 1047 if (__hal_device_register_poll(hldev, &bar0->tti_command_mem, 0, 1048 XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD, 1049 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1050 /* upper layer may require to repeat */ 1051 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1052 } 1053 1054 xge_debug_device(XGE_TRACE, "TTI configured: tti_data1_mem 0x%llx", 1055 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, 1056 hldev->regh0, 1057 &bar0->tti_data1_mem)); 1058 1059 return XGE_HAL_OK; 1060 } 1061 1062 1063 /* Constants to be programmed into the Xena's registers to configure 1064 * the XAUI. */ 1065 static u64 default_xena_mdio_cfg[] = { 1066 /* Reset PMA PLL */ 1067 0xC001010000000000ULL, 0xC0010100000000E0ULL, 1068 0xC0010100008000E4ULL, 1069 /* Remove Reset from PMA PLL */ 1070 0xC001010000000000ULL, 0xC0010100000000E0ULL, 1071 0xC0010100000000E4ULL, 1072 END_SIGN 1073 }; 1074 1075 static u64 default_herc_mdio_cfg[] = { 1076 END_SIGN 1077 }; 1078 1079 static u64 default_xena_dtx_cfg[] = { 1080 0x8000051500000000ULL, 0x80000515000000E0ULL, 1081 0x80000515D93500E4ULL, 0x8001051500000000ULL, 1082 0x80010515000000E0ULL, 0x80010515001E00E4ULL, 1083 0x8002051500000000ULL, 0x80020515000000E0ULL, 1084 0x80020515F21000E4ULL, 1085 /* Set PADLOOPBACKN */ 1086 0x8002051500000000ULL, 0x80020515000000E0ULL, 1087 0x80020515B20000E4ULL, 0x8003051500000000ULL, 1088 0x80030515000000E0ULL, 0x80030515B20000E4ULL, 1089 0x8004051500000000ULL, 0x80040515000000E0ULL, 1090 0x80040515B20000E4ULL, 0x8005051500000000ULL, 1091 0x80050515000000E0ULL, 0x80050515B20000E4ULL, 1092 SWITCH_SIGN, 1093 /* Remove PADLOOPBACKN */ 1094 0x8002051500000000ULL, 0x80020515000000E0ULL, 1095 0x80020515F20000E4ULL, 0x8003051500000000ULL, 1096 0x80030515000000E0ULL, 0x80030515F20000E4ULL, 1097 0x8004051500000000ULL, 0x80040515000000E0ULL, 1098 0x80040515F20000E4ULL, 0x8005051500000000ULL, 1099 0x80050515000000E0ULL, 0x80050515F20000E4ULL, 1100 END_SIGN 1101 }; 1102 1103 /* 1104 static u64 default_herc_dtx_cfg[] = { 1105 0x80000515BA750000ULL, 0x80000515BA7500E0ULL, 1106 0x80000515BA750004ULL, 0x80000515BA7500E4ULL, 1107 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 1108 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 1109 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 1110 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 1111 END_SIGN 1112 }; 1113 */ 1114 1115 static u64 default_herc_dtx_cfg[] = { 1116 0x8000051536750000ULL, 0x80000515367500E0ULL, 1117 0x8000051536750004ULL, 0x80000515367500E4ULL, 1118 1119 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 1120 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 1121 1122 0x801205150D440000ULL, 0x801205150D4400E0ULL, 1123 0x801205150D440004ULL, 0x801205150D4400E4ULL, 1124 1125 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 1126 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 1127 END_SIGN 1128 }; 1129 1130 /* 1131 * __hal_device_xaui_configure 1132 * @hldev: HAL device handle. 1133 * 1134 * Configure XAUI Interface of Xena. 1135 * 1136 * To Configure the Xena's XAUI, one has to write a series 1137 * of 64 bit values into two registers in a particular 1138 * sequence. Hence a macro 'SWITCH_SIGN' has been defined 1139 * which will be defined in the array of configuration values 1140 * (default_dtx_cfg & default_mdio_cfg) at appropriate places 1141 * to switch writing from one regsiter to another. We continue 1142 * writing these values until we encounter the 'END_SIGN' macro. 1143 * For example, After making a series of 21 writes into 1144 * dtx_control register the 'SWITCH_SIGN' appears and hence we 1145 * start writing into mdio_control until we encounter END_SIGN. 1146 */ 1147 static void 1148 __hal_device_xaui_configure(xge_hal_device_t *hldev) 1149 { 1150 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1151 int mdio_cnt = 0, dtx_cnt = 0; 1152 u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL; 1153 1154 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 1155 default_dtx_cfg = default_xena_dtx_cfg; 1156 default_mdio_cfg = default_xena_mdio_cfg; 1157 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 1158 default_dtx_cfg = default_herc_dtx_cfg; 1159 default_mdio_cfg = default_herc_mdio_cfg; 1160 } 1161 xge_assert(default_dtx_cfg); 1162 1163 do { 1164 dtx_cfg: 1165 while (default_dtx_cfg[dtx_cnt] != END_SIGN) { 1166 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) { 1167 dtx_cnt++; 1168 goto mdio_cfg; 1169 } 1170 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 1171 (u32)(default_dtx_cfg[dtx_cnt]>>32), 1172 &bar0->dtx_control); 1173 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, 1174 (u32)default_dtx_cfg[dtx_cnt], 1175 &bar0->dtx_control); 1176 xge_os_wmb(); 1177 xge_os_mdelay(1); 1178 dtx_cnt++; 1179 } 1180 mdio_cfg: 1181 while (default_mdio_cfg[mdio_cnt] != END_SIGN) { 1182 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) { 1183 mdio_cnt++; 1184 goto dtx_cfg; 1185 } 1186 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 1187 (u32)(default_mdio_cfg[mdio_cnt]>>32), 1188 &bar0->mdio_control); 1189 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, 1190 (u32)default_mdio_cfg[mdio_cnt], 1191 &bar0->mdio_control); 1192 xge_os_wmb(); 1193 xge_os_mdelay(1); 1194 mdio_cnt++; 1195 } 1196 } while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) && 1197 (default_mdio_cfg[mdio_cnt] == END_SIGN)) ); 1198 1199 xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured"); 1200 } 1201 1202 /* 1203 * __hal_device_mac_link_util_set 1204 * @hldev: HAL device handle. 1205 * 1206 * Set sampling rate to calculate link utilization. 1207 */ 1208 static void 1209 __hal_device_mac_link_util_set(xge_hal_device_t *hldev) 1210 { 1211 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1212 u64 val64; 1213 1214 val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL( 1215 hldev->config.mac.tmac_util_period) | 1216 XGE_HAL_MAC_RX_LINK_UTIL_VAL( 1217 hldev->config.mac.rmac_util_period); 1218 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1219 &bar0->mac_link_util); 1220 xge_debug_device(XGE_TRACE, "%s", 1221 "bandwidth link utilization configured"); 1222 } 1223 1224 /* 1225 * __hal_device_set_swapper 1226 * @hldev: HAL device handle. 1227 * 1228 * Set the Xframe's byte "swapper" in accordance with 1229 * endianness of the host. 1230 */ 1231 xge_hal_status_e 1232 __hal_device_set_swapper(xge_hal_device_t *hldev) 1233 { 1234 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1235 u64 val64; 1236 1237 /* 1238 * from 32bit errarta: 1239 * 1240 * The SWAPPER_CONTROL register determines how the adapter accesses 1241 * host memory as well as how it responds to read and write requests 1242 * from the host system. Writes to this register should be performed 1243 * carefully, since the byte swappers could reverse the order of bytes. 1244 * When configuring this register keep in mind that writes to the PIF 1245 * read and write swappers could reverse the order of the upper and 1246 * lower 32-bit words. This means that the driver may have to write 1247 * to the upper 32 bits of the SWAPPER_CONTROL twice in order to 1248 * configure the entire register. */ 1249 1250 /* 1251 * The device by default set to a big endian format, so a big endian 1252 * driver need not set anything. 1253 */ 1254 1255 #if defined(XGE_HAL_CUSTOM_HW_SWAPPER) 1256 1257 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1258 0xffffffffffffffffULL, &bar0->swapper_ctrl); 1259 1260 val64 = XGE_HAL_CUSTOM_HW_SWAPPER; 1261 1262 xge_os_wmb(); 1263 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1264 &bar0->swapper_ctrl); 1265 1266 xge_debug_device(XGE_TRACE, "using custom HW swapper 0x%llx", 1267 (unsigned long long)val64); 1268 1269 #elif !defined(XGE_OS_HOST_BIG_ENDIAN) 1270 1271 /* 1272 * Initially we enable all bits to make it accessible by the driver, 1273 * then we selectively enable only those bits that we want to set. 1274 * i.e. force swapper to swap for the first time since second write 1275 * will overwrite with the final settings. 1276 * 1277 * Use only for little endian platforms. 1278 */ 1279 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1280 0xffffffffffffffffULL, &bar0->swapper_ctrl); 1281 xge_os_wmb(); 1282 val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE | 1283 XGE_HAL_SWAPPER_CTRL_PIF_R_SE | 1284 XGE_HAL_SWAPPER_CTRL_PIF_W_FE | 1285 XGE_HAL_SWAPPER_CTRL_PIF_W_SE | 1286 XGE_HAL_SWAPPER_CTRL_RTH_FE | 1287 XGE_HAL_SWAPPER_CTRL_RTH_SE | 1288 XGE_HAL_SWAPPER_CTRL_TXP_FE | 1289 XGE_HAL_SWAPPER_CTRL_TXP_SE | 1290 XGE_HAL_SWAPPER_CTRL_TXD_R_FE | 1291 XGE_HAL_SWAPPER_CTRL_TXD_R_SE | 1292 XGE_HAL_SWAPPER_CTRL_TXD_W_FE | 1293 XGE_HAL_SWAPPER_CTRL_TXD_W_SE | 1294 XGE_HAL_SWAPPER_CTRL_TXF_R_FE | 1295 XGE_HAL_SWAPPER_CTRL_RXD_R_FE | 1296 XGE_HAL_SWAPPER_CTRL_RXD_R_SE | 1297 XGE_HAL_SWAPPER_CTRL_RXD_W_FE | 1298 XGE_HAL_SWAPPER_CTRL_RXD_W_SE | 1299 XGE_HAL_SWAPPER_CTRL_RXF_W_FE | 1300 XGE_HAL_SWAPPER_CTRL_XMSI_FE | 1301 XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE); 1302 /* 1303 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 1304 val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE; 1305 } */ 1306 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64, 1307 &bar0->swapper_ctrl); 1308 xge_os_wmb(); 1309 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), 1310 &bar0->swapper_ctrl); 1311 xge_os_wmb(); 1312 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), 1313 &bar0->swapper_ctrl); 1314 xge_debug_device(XGE_TRACE, "%s", "using little endian set"); 1315 #endif 1316 1317 /* Verifying if endian settings are accurate by reading a feedback 1318 * register. */ 1319 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1320 &bar0->pif_rd_swapper_fb); 1321 if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) { 1322 xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read %llx", 1323 (unsigned long long) val64); 1324 return XGE_HAL_ERR_SWAPPER_CTRL; 1325 } 1326 1327 xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled"); 1328 1329 return XGE_HAL_OK; 1330 } 1331 1332 /* 1333 * __hal_device_rts_mac_configure - Configure RTS steering based on 1334 * destination mac address. 1335 * @hldev: HAL device handle. 1336 * 1337 */ 1338 xge_hal_status_e 1339 __hal_device_rts_mac_configure(xge_hal_device_t *hldev) 1340 { 1341 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1342 u64 val64; 1343 1344 if (!hldev->config.rts_mac_en) { 1345 return XGE_HAL_OK; 1346 } 1347 1348 /* 1349 * Set the receive traffic steering mode from default(classic) 1350 * to enhanced. 1351 */ 1352 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1353 &bar0->rts_ctrl); 1354 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1355 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1356 val64, &bar0->rts_ctrl); 1357 return XGE_HAL_OK; 1358 } 1359 1360 /* 1361 * xge__hal_device_rts_mac_enable 1362 * 1363 * @devh: HAL device handle. 1364 * @index: index number where the MAC addr will be stored 1365 * @macaddr: MAC address 1366 * 1367 * - Enable RTS steering for the given MAC address. This function has to be 1368 * called with lock acquired. 1369 * 1370 * NOTE: 1371 * 1. ULD has to call this function with the index value which 1372 * statisfies the following condition: 1373 * ring_num = (index % 8) 1374 * 2.ULD also needs to make sure that the index is not 1375 * occupied by any MAC address. If that index has any MAC address 1376 * it will be overwritten and HAL will not check for it. 1377 * 1378 */ 1379 xge_hal_status_e 1380 xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr) 1381 { 1382 u64 val64; 1383 int section; 1384 xge_hal_status_e status; 1385 1386 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 1387 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1388 1389 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) 1390 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 1391 1392 /* 1393 * Set the MAC address at the given location marked by index. 1394 */ 1395 status = xge_hal_device_macaddr_set(hldev, index, macaddr); 1396 if (status != XGE_HAL_OK) { 1397 xge_debug_device(XGE_ERR, "%s", 1398 "Not able to set the mac addr"); 1399 return status; 1400 } 1401 1402 /* 1403 * Calculate the section value 1404 */ 1405 section = index / 32; 1406 1407 xge_debug_device(XGE_TRACE, "the Section value is %d \n", section); 1408 1409 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1410 &bar0->rts_mac_cfg); 1411 switch(section) 1412 { 1413 case 0: 1414 val64 |= XGE_HAL_RTS_MAC_SECT0_EN; 1415 break; 1416 case 1: 1417 val64 |= XGE_HAL_RTS_MAC_SECT1_EN; 1418 break; 1419 case 2: 1420 val64 |= XGE_HAL_RTS_MAC_SECT2_EN; 1421 break; 1422 case 3: 1423 val64 |= XGE_HAL_RTS_MAC_SECT3_EN; 1424 break; 1425 case 4: 1426 val64 |= XGE_HAL_RTS_MAC_SECT4_EN; 1427 break; 1428 case 5: 1429 val64 |= XGE_HAL_RTS_MAC_SECT5_EN; 1430 break; 1431 case 6: 1432 val64 |= XGE_HAL_RTS_MAC_SECT6_EN; 1433 break; 1434 case 7: 1435 val64 |= XGE_HAL_RTS_MAC_SECT7_EN; 1436 break; 1437 default: 1438 xge_debug_device(XGE_ERR, "Invalid Section value %d \n" 1439 , section); 1440 } 1441 1442 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1443 val64, &bar0->rts_mac_cfg); 1444 return XGE_HAL_OK; 1445 } 1446 1447 /* 1448 * xge__hal_device_rts_mac_disable 1449 * @hldev: HAL device handle. 1450 * @index: index number where to disable the MAC addr 1451 * 1452 * Disable RTS Steering based on the MAC address. 1453 * This function should be called with lock acquired. 1454 * 1455 */ 1456 xge_hal_status_e 1457 xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index) 1458 { 1459 xge_hal_status_e status; 1460 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 1461 1462 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 1463 1464 xge_debug_ll(XGE_TRACE, "the index value is %d \n", index); 1465 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) 1466 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 1467 1468 /* 1469 * Disable MAC address @ given index location 1470 */ 1471 status = xge_hal_device_macaddr_set(hldev, index, macaddr); 1472 if (status != XGE_HAL_OK) { 1473 xge_debug_device(XGE_ERR, "%s", 1474 "Not able to set the mac addr"); 1475 return status; 1476 } 1477 1478 return XGE_HAL_OK; 1479 } 1480 1481 1482 /* 1483 * __hal_device_rth_configure - Configure RTH for the device 1484 * @hldev: HAL device handle. 1485 * 1486 * Using IT (Indirection Table). 1487 */ 1488 xge_hal_status_e 1489 __hal_device_rth_it_configure(xge_hal_device_t *hldev) 1490 { 1491 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1492 u64 val64; 1493 int rings[XGE_HAL_MAX_RING_NUM]={0}; 1494 int rnum; 1495 int rmax; 1496 int buckets_num; 1497 int bucket; 1498 1499 if (!hldev->config.rth_en) { 1500 return XGE_HAL_OK; 1501 } 1502 1503 /* 1504 * Set the receive traffic steering mode from default(classic) 1505 * to enhanced. 1506 */ 1507 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1508 &bar0->rts_ctrl); 1509 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1510 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1511 val64, &bar0->rts_ctrl); 1512 1513 buckets_num = (1 << hldev->config.rth_bucket_size); 1514 1515 rmax=0; 1516 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) { 1517 if (hldev->config.ring.queue[rnum].configured && 1518 hldev->config.ring.queue[rnum].rth_en) 1519 rings[rmax++] = rnum; 1520 } 1521 1522 rnum = 0; 1523 /* for starters: fill in all the buckets with rings "equally" */ 1524 for (bucket = 0; bucket < buckets_num; bucket++) { 1525 1526 if (rnum == rmax) 1527 rnum = 0; 1528 1529 /* write data */ 1530 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | 1531 XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]); 1532 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1533 &bar0->rts_rth_map_mem_data); 1534 1535 /* execute */ 1536 val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | 1537 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | 1538 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket); 1539 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1540 &bar0->rts_rth_map_mem_ctrl); 1541 1542 /* poll until done */ 1543 if (__hal_device_register_poll(hldev, 1544 &bar0->rts_rth_map_mem_ctrl, 0, 1545 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, 1546 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1547 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1548 } 1549 1550 rnum++; 1551 } 1552 1553 val64 = XGE_HAL_RTS_RTH_EN; 1554 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size); 1555 val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN | 1556 XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN | 1557 XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN; 1558 1559 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1560 &bar0->rts_rth_cfg); 1561 1562 xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d", 1563 hldev->config.rth_bucket_size); 1564 1565 return XGE_HAL_OK; 1566 } 1567 1568 1569 /* 1570 * __hal_spdm_entry_add - Add a new entry to the SPDM table. 1571 * 1572 * Add a new entry to the SPDM table 1573 * 1574 * This function add a new entry to the SPDM table. 1575 * 1576 * Note: 1577 * This function should be called with spdm_lock. 1578 * 1579 * See also: xge_hal_spdm_entry_add , xge_hal_spdm_entry_remove. 1580 */ 1581 static xge_hal_status_e 1582 __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip, 1583 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp, 1584 u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry) 1585 { 1586 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1587 u64 val64; 1588 u64 spdm_line_arr[8]; 1589 u8 line_no; 1590 1591 /* 1592 * Poll the rxpic_int_reg register until spdm ready bit is set or 1593 * timeout happens. 1594 */ 1595 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, 1596 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 1597 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1598 1599 /* upper layer may require to repeat */ 1600 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1601 } 1602 1603 /* 1604 * Clear the SPDM READY bit 1605 */ 1606 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1607 &bar0->rxpic_int_reg); 1608 val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY; 1609 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1610 &bar0->rxpic_int_reg); 1611 1612 xge_debug_device(XGE_TRACE, 1613 "L4 SP %x:DP %x: hash %x tgt_queue %d \n", 1614 l4_sp, l4_dp, jhash_value, tgt_queue); 1615 1616 xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr)); 1617 1618 /* 1619 * Construct the SPDM entry. 1620 */ 1621 spdm_line_arr[0] = vBIT(l4_sp,0,16) | 1622 vBIT(l4_dp,16,32) | 1623 vBIT(tgt_queue,53,3) | 1624 vBIT(is_tcp,59,1) | 1625 vBIT(is_ipv4,63,1); 1626 1627 1628 if (is_ipv4) { 1629 spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) | 1630 vBIT(dst_ip->ipv4.addr,32,32); 1631 1632 } else { 1633 xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8); 1634 xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8); 1635 xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8); 1636 xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8); 1637 } 1638 1639 spdm_line_arr[7] = vBIT(jhash_value,0,32) | 1640 BIT(63); /* entry enable bit */ 1641 1642 /* 1643 * Add the entry to the SPDM table 1644 */ 1645 for(line_no = 0; line_no < 8; line_no++) { 1646 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1647 spdm_line_arr[line_no], 1648 (void *)((char *)hldev->spdm_mem_base + 1649 (spdm_entry * 64) + 1650 (line_no * 8))); 1651 } 1652 1653 /* 1654 * Wait for the operation to be completed. 1655 */ 1656 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, 1657 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 1658 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1659 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1660 } 1661 1662 /* 1663 * Add this information to a local SPDM table. The purpose of 1664 * maintaining a local SPDM table is to avoid a search in the 1665 * adapter SPDM table for spdm entry lookup which is very costly 1666 * in terms of time. 1667 */ 1668 hldev->spdm_table[spdm_entry]->in_use = 1; 1669 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip, 1670 sizeof(xge_hal_ipaddr_t)); 1671 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip, 1672 sizeof(xge_hal_ipaddr_t)); 1673 hldev->spdm_table[spdm_entry]->l4_sp = l4_sp; 1674 hldev->spdm_table[spdm_entry]->l4_dp = l4_dp; 1675 hldev->spdm_table[spdm_entry]->is_tcp = is_tcp; 1676 hldev->spdm_table[spdm_entry]->is_ipv4 = is_ipv4; 1677 hldev->spdm_table[spdm_entry]->tgt_queue = tgt_queue; 1678 hldev->spdm_table[spdm_entry]->jhash_value = jhash_value; 1679 hldev->spdm_table[spdm_entry]->spdm_entry = spdm_entry; 1680 1681 return XGE_HAL_OK; 1682 } 1683 1684 /* 1685 * __hal_device_rth_spdm_configure - Configure RTH for the device 1686 * @hldev: HAL device handle. 1687 * 1688 * Using SPDM (Socket-Pair Direct Match). 1689 */ 1690 xge_hal_status_e 1691 __hal_device_rth_spdm_configure(xge_hal_device_t *hldev) 1692 { 1693 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 1694 u64 val64; 1695 u8 spdm_bar_num; 1696 u32 spdm_bar_offset; 1697 int spdm_table_size; 1698 int i; 1699 1700 if (!hldev->config.rth_spdm_en) { 1701 return XGE_HAL_OK; 1702 } 1703 1704 /* 1705 * Retrieve the base address of SPDM Table. 1706 */ 1707 val64 = xge_os_pio_mem_read64(hldev->pdev, 1708 hldev->regh0, &bar0->spdm_bir_offset); 1709 1710 spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64); 1711 spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64); 1712 1713 1714 /* 1715 * spdm_bar_num specifies the PCI bar num register used to 1716 * address the memory space. spdm_bar_offset specifies the offset 1717 * of the SPDM memory with in the bar num memory space. 1718 */ 1719 switch (spdm_bar_num) { 1720 case 0: 1721 { 1722 hldev->spdm_mem_base = (char *)bar0 + 1723 (spdm_bar_offset * 8); 1724 break; 1725 } 1726 case 1: 1727 { 1728 char *bar1 = (char *)hldev->bar1; 1729 hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8); 1730 break; 1731 } 1732 default: 1733 xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1))); 1734 } 1735 1736 /* 1737 * Retrieve the size of SPDM table(number of entries). 1738 */ 1739 val64 = xge_os_pio_mem_read64(hldev->pdev, 1740 hldev->regh0, &bar0->spdm_structure); 1741 hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64); 1742 1743 1744 spdm_table_size = hldev->spdm_max_entries * 1745 sizeof(xge_hal_spdm_entry_t); 1746 if (hldev->spdm_table == NULL) { 1747 void *mem; 1748 1749 /* 1750 * Allocate memory to hold the copy of SPDM table. 1751 */ 1752 if ((hldev->spdm_table = (xge_hal_spdm_entry_t **) 1753 xge_os_malloc( 1754 hldev->pdev, 1755 (sizeof(xge_hal_spdm_entry_t *) * 1756 hldev->spdm_max_entries))) == NULL) { 1757 return XGE_HAL_ERR_OUT_OF_MEMORY; 1758 } 1759 1760 if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL) 1761 { 1762 xge_os_free(hldev->pdev, hldev->spdm_table, 1763 (sizeof(xge_hal_spdm_entry_t *) * 1764 hldev->spdm_max_entries)); 1765 return XGE_HAL_ERR_OUT_OF_MEMORY; 1766 } 1767 1768 xge_os_memzero(mem, spdm_table_size); 1769 for (i = 0; i < hldev->spdm_max_entries; i++) { 1770 hldev->spdm_table[i] = (xge_hal_spdm_entry_t *) 1771 ((char *)mem + 1772 i * sizeof(xge_hal_spdm_entry_t)); 1773 } 1774 xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev); 1775 } else { 1776 /* 1777 * We are here because the host driver tries to 1778 * do a soft reset on the device. 1779 * Since the device soft reset clears the SPDM table, copy 1780 * the entries from the local SPDM table to the actual one. 1781 */ 1782 xge_os_spin_lock(&hldev->spdm_lock); 1783 for (i = 0; i < hldev->spdm_max_entries; i++) { 1784 xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i]; 1785 1786 if (spdm_entry->in_use) { 1787 if (__hal_spdm_entry_add(hldev, 1788 &spdm_entry->src_ip, 1789 &spdm_entry->dst_ip, 1790 spdm_entry->l4_sp, 1791 spdm_entry->l4_dp, 1792 spdm_entry->is_tcp, 1793 spdm_entry->is_ipv4, 1794 spdm_entry->tgt_queue, 1795 spdm_entry->jhash_value, 1796 spdm_entry->spdm_entry) 1797 != XGE_HAL_OK) { 1798 /* Log an warning */ 1799 xge_debug_device(XGE_ERR, 1800 "SPDM table update from local" 1801 " memory failed"); 1802 } 1803 } 1804 } 1805 xge_os_spin_unlock(&hldev->spdm_lock); 1806 } 1807 1808 /* 1809 * Set the receive traffic steering mode from default(classic) 1810 * to enhanced. 1811 */ 1812 val64 = xge_os_pio_mem_read64(hldev->pdev, 1813 hldev->regh0, &bar0->rts_ctrl); 1814 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1815 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1816 val64, &bar0->rts_ctrl); 1817 1818 /* 1819 * We may not need to configure rts_rth_jhash_cfg register as the 1820 * default values are good enough to calculate the hash. 1821 */ 1822 1823 /* 1824 * As of now, set all the rth mask registers to zero. TODO. 1825 */ 1826 for(i = 0; i < 5; i++) { 1827 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1828 0, &bar0->rts_rth_hash_mask[i]); 1829 } 1830 1831 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1832 0, &bar0->rts_rth_hash_mask_5); 1833 1834 if (hldev->config.rth_spdm_use_l4) { 1835 val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4; 1836 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1837 val64, &bar0->rts_rth_status); 1838 } 1839 1840 val64 = XGE_HAL_RTS_RTH_EN; 1841 val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN; 1842 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1843 &bar0->rts_rth_cfg); 1844 1845 1846 return XGE_HAL_OK; 1847 } 1848 1849 /* 1850 * __hal_device_pci_init 1851 * @hldev: HAL device handle. 1852 * 1853 * Initialize certain PCI/PCI-X configuration registers 1854 * with recommended values. Save config space for future hw resets. 1855 */ 1856 static void 1857 __hal_device_pci_init(xge_hal_device_t *hldev) 1858 { 1859 int i, pcisize = 0; 1860 u16 cmd = 0; 1861 u8 val; 1862 1863 /* Set the PErr Repconse bit and SERR in PCI command register. */ 1864 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 1865 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 1866 cmd |= 0x140; 1867 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 1868 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 1869 1870 /* Set user spcecified value for the PCI Latency Timer */ 1871 if (hldev->config.latency_timer && 1872 hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) { 1873 xge_os_pci_write8(hldev->pdev, hldev->cfgh, 1874 xge_offsetof(xge_hal_pci_config_le_t, 1875 latency_timer), 1876 (u8)hldev->config.latency_timer); 1877 } 1878 /* Read back latency timer to reflect it into user level */ 1879 xge_os_pci_read8(hldev->pdev, hldev->cfgh, 1880 xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val); 1881 hldev->config.latency_timer = val; 1882 1883 /* Enable Data Parity Error Recovery in PCI-X command register. */ 1884 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 1885 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 1886 cmd |= 1; 1887 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 1888 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); 1889 1890 /* Set MMRB count in PCI-X command register. */ 1891 if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) { 1892 cmd &= 0xFFF3; 1893 cmd |= hldev->config.mmrb_count << 2; 1894 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 1895 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 1896 cmd); 1897 } 1898 /* Read back MMRB count to reflect it into user level */ 1899 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 1900 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 1901 &cmd); 1902 cmd &= 0x000C; 1903 hldev->config.mmrb_count = cmd>>2; 1904 1905 /* Setting Maximum outstanding splits based on system type. */ 1906 if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) { 1907 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 1908 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 1909 &cmd); 1910 cmd &= 0xFF8F; 1911 cmd |= hldev->config.max_splits_trans << 4; 1912 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 1913 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 1914 cmd); 1915 } 1916 1917 /* Read back max split trans to reflect it into user level */ 1918 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 1919 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 1920 cmd &= 0x0070; 1921 hldev->config.max_splits_trans = cmd>>4; 1922 1923 /* Forcibly disabling relaxed ordering capability of the card. */ 1924 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 1925 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 1926 cmd &= 0xFFFD; 1927 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 1928 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); 1929 1930 /* Store PCI device ID and revision for future references where in we 1931 * decide Xena revision using PCI sub system ID */ 1932 xge_os_pci_read16(hldev->pdev,hldev->cfgh, 1933 xge_offsetof(xge_hal_pci_config_le_t, device_id), 1934 &hldev->device_id); 1935 xge_os_pci_read8(hldev->pdev,hldev->cfgh, 1936 xge_offsetof(xge_hal_pci_config_le_t, revision), 1937 &hldev->revision); 1938 1939 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 1940 pcisize = XGE_HAL_PCISIZE_HERC; 1941 else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 1942 pcisize = XGE_HAL_PCISIZE_XENA; 1943 1944 /* save PCI config space for future resets */ 1945 for (i = 0; i < pcisize; i++) { 1946 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, 1947 (u32*)&hldev->pci_config_space + i); 1948 } 1949 1950 #if defined(XGE_HAL_MSI) 1951 /* Upper limit of the MSI number enabled by the system */ 1952 xge_os_pci_read32(hldev->pdev, hldev->cfgh, 1953 xge_offsetof(xge_hal_pci_config_le_t, msi_control), 1954 &hldev->msi_mask); 1955 hldev->msi_mask &= 0x70; 1956 if (!hldev->msi_mask) 1957 return; 1958 hldev->msi_mask >>= 4; /* 1959 * This number's power of 2 is the number 1960 * of MSIs enabled. 1961 */ 1962 hldev->msi_mask = (0x1 << hldev->msi_mask); 1963 /* 1964 * NOTE: 1965 * If 32 MSIs are enabled, then MSI numbers range from 0 - 31. 1966 */ 1967 hldev->msi_mask -= 1; 1968 #endif 1969 } 1970 1971 /* 1972 * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency 1973 * and mode. 1974 * @devh: HAL device handle. 1975 * @pci_mode: pointer to a variable of enumerated type 1976 * xge_hal_pci_mode_e{}. 1977 * @bus_frequency: pointer to a variable of enumerated type 1978 * xge_hal_pci_bus_frequency_e{}. 1979 * @bus_width: pointer to a variable of enumerated type 1980 * xge_hal_pci_bus_width_e{}. 1981 * 1982 * Get pci mode, frequency, and PCI bus width. 1983 * 1984 * Returns: one of the xge_hal_status_e{} enumerated types. 1985 * XGE_HAL_OK - for success. 1986 * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card. 1987 * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card. 1988 * 1989 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. 1990 */ 1991 static xge_hal_status_e 1992 __hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, 1993 xge_hal_pci_bus_frequency_e *bus_frequency, 1994 xge_hal_pci_bus_width_e *bus_width) 1995 { 1996 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 1997 xge_hal_status_e rc_status = XGE_HAL_OK; 1998 xge_hal_card_e card_id = xge_hal_device_check_id (devh); 1999 2000 #ifdef XGE_HAL_HERC_EMULATION 2001 hldev->config.pci_freq_mherz = 2002 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2003 *bus_frequency = 2004 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2005 *pci_mode = XGE_HAL_PCI_66MHZ_MODE; 2006 #else 2007 if (card_id == XGE_HAL_CARD_HERC) { 2008 xge_hal_pci_bar0_t *bar0 = 2009 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2010 u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2011 &bar0->pci_info); 2012 if (XGE_HAL_PCI_32_BIT & pci_info) 2013 *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT; 2014 else 2015 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; 2016 switch((pci_info & XGE_HAL_PCI_INFO)>>60) 2017 { 2018 case XGE_HAL_PCI_33MHZ_MODE: 2019 *bus_frequency = 2020 XGE_HAL_PCI_BUS_FREQUENCY_33MHZ; 2021 *pci_mode = XGE_HAL_PCI_33MHZ_MODE; 2022 break; 2023 case XGE_HAL_PCI_66MHZ_MODE: 2024 *bus_frequency = 2025 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2026 *pci_mode = XGE_HAL_PCI_66MHZ_MODE; 2027 break; 2028 case XGE_HAL_PCIX_M1_66MHZ_MODE: 2029 *bus_frequency = 2030 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2031 *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE; 2032 break; 2033 case XGE_HAL_PCIX_M1_100MHZ_MODE: 2034 *bus_frequency = 2035 XGE_HAL_PCI_BUS_FREQUENCY_100MHZ; 2036 *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE; 2037 break; 2038 case XGE_HAL_PCIX_M1_133MHZ_MODE: 2039 *bus_frequency = 2040 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2041 *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE; 2042 break; 2043 case XGE_HAL_PCIX_M2_66MHZ_MODE: 2044 *bus_frequency = 2045 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2046 *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE; 2047 break; 2048 case XGE_HAL_PCIX_M2_100MHZ_MODE: 2049 *bus_frequency = 2050 XGE_HAL_PCI_BUS_FREQUENCY_200MHZ; 2051 *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE; 2052 break; 2053 case XGE_HAL_PCIX_M2_133MHZ_MODE: 2054 *bus_frequency = 2055 XGE_HAL_PCI_BUS_FREQUENCY_266MHZ; 2056 *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE; 2057 break; 2058 case XGE_HAL_PCIX_M1_RESERVED: 2059 case XGE_HAL_PCIX_M1_66MHZ_NS: 2060 case XGE_HAL_PCIX_M1_100MHZ_NS: 2061 case XGE_HAL_PCIX_M1_133MHZ_NS: 2062 case XGE_HAL_PCIX_M2_RESERVED: 2063 case XGE_HAL_PCIX_533_RESERVED: 2064 default: 2065 rc_status = XGE_HAL_ERR_INVALID_PCI_INFO; 2066 xge_debug_device(XGE_ERR, 2067 "invalid pci info %llx", 2068 (unsigned long long)pci_info); 2069 break; 2070 } 2071 if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO) 2072 xge_debug_device(XGE_TRACE, "PCI info: mode %d width " 2073 "%d frequency %d", *pci_mode, *bus_width, 2074 *bus_frequency); 2075 2076 hldev->config.pci_freq_mherz = *bus_frequency; 2077 } 2078 /* for XENA, we report PCI mode, only. PCI bus frequency, and bus width 2079 * are set to unknown */ 2080 else if (card_id == XGE_HAL_CARD_XENA) { 2081 u32 pcix_status; 2082 u8 dev_num, bus_num; 2083 /* initialize defaults for XENA */ 2084 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; 2085 *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; 2086 xge_os_pci_read32(hldev->pdev, hldev->cfgh, 2087 xge_offsetof(xge_hal_pci_config_le_t, pcix_status), 2088 &pcix_status); 2089 dev_num = (u8)((pcix_status & 0xF8) >> 3); 2090 bus_num = (u8)((pcix_status & 0xFF00) >> 8); 2091 if (dev_num == 0 && bus_num == 0) 2092 *pci_mode = XGE_HAL_PCI_BASIC_MODE; 2093 else 2094 *pci_mode = XGE_HAL_PCIX_BASIC_MODE; 2095 xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode); 2096 if (hldev->config.pci_freq_mherz == 2097 XGE_HAL_DEFAULT_USE_HARDCODE) { 2098 /* 2099 * There is no way to detect BUS frequency on Xena, 2100 * so, in case of automatic configuration we hopelessly 2101 * assume 133MHZ. 2102 */ 2103 hldev->config.pci_freq_mherz = 2104 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2105 } 2106 } else{ 2107 rc_status = XGE_HAL_ERR_BAD_DEVICE_ID; 2108 xge_debug_device(XGE_ERR, "invalid device id %d", card_id); 2109 } 2110 #endif 2111 2112 return rc_status; 2113 } 2114 2115 /* 2116 * __hal_device_handle_link_up_ind 2117 * @hldev: HAL device handle. 2118 * 2119 * Link up indication handler. The function is invoked by HAL when 2120 * Xframe indicates that the link is up for programmable amount of time. 2121 */ 2122 static int 2123 __hal_device_handle_link_up_ind(xge_hal_device_t *hldev) 2124 { 2125 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2126 u64 val64; 2127 2128 /* 2129 * If the previous link state is not down, return. 2130 */ 2131 if (hldev->link_state == XGE_HAL_LINK_UP) { 2132 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2133 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ 2134 val64 = xge_os_pio_mem_read64( 2135 hldev->pdev, hldev->regh0, 2136 &bar0->misc_int_mask); 2137 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2138 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2139 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2140 val64, &bar0->misc_int_mask); 2141 } 2142 #endif 2143 xge_debug_device(XGE_TRACE, 2144 "link up indication while link is up, ignoring.."); 2145 return 0; 2146 } 2147 2148 /* Now re-enable it as due to noise, hardware turned it off */ 2149 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2150 &bar0->adapter_control); 2151 val64 |= XGE_HAL_ADAPTER_CNTL_EN; 2152 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ 2153 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2154 &bar0->adapter_control); 2155 2156 /* Turn on the Laser */ 2157 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2158 &bar0->adapter_control); 2159 val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON | 2160 XGE_HAL_ADAPTER_LED_ON); 2161 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2162 &bar0->adapter_control); 2163 2164 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2165 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2166 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2167 &bar0->adapter_status); 2168 if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2169 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) { 2170 xge_debug_device(XGE_TRACE, "%s", 2171 "fail to transition link to up..."); 2172 return 0; 2173 } 2174 else { 2175 /* 2176 * Mask the Link Up interrupt and unmask the Link Down 2177 * interrupt. 2178 */ 2179 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2180 &bar0->misc_int_mask); 2181 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2182 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2183 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2184 &bar0->misc_int_mask); 2185 xge_debug_device(XGE_TRACE, "calling link up.."); 2186 hldev->link_state = XGE_HAL_LINK_UP; 2187 2188 /* notify ULD */ 2189 if (g_xge_hal_driver->uld_callbacks.link_up) { 2190 g_xge_hal_driver->uld_callbacks.link_up( 2191 hldev->upper_layer_info); 2192 } 2193 return 1; 2194 } 2195 } 2196 #endif 2197 xge_os_mdelay(1); 2198 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, 2199 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2200 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), 2201 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { 2202 2203 /* notify ULD */ 2204 (void) xge_queue_produce_context(hldev->queueh, 2205 XGE_HAL_EVENT_LINK_IS_UP, 2206 hldev); 2207 /* link is up after been enabled */ 2208 return 1; 2209 } else { 2210 xge_debug_device(XGE_TRACE, "%s", 2211 "fail to transition link to up..."); 2212 return 0; 2213 } 2214 } 2215 2216 /* 2217 * __hal_device_handle_link_down_ind 2218 * @hldev: HAL device handle. 2219 * 2220 * Link down indication handler. The function is invoked by HAL when 2221 * Xframe indicates that the link is down. 2222 */ 2223 static int 2224 __hal_device_handle_link_down_ind(xge_hal_device_t *hldev) 2225 { 2226 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2227 u64 val64; 2228 2229 /* 2230 * If the previous link state is not up, return. 2231 */ 2232 if (hldev->link_state == XGE_HAL_LINK_DOWN) { 2233 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2234 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ 2235 val64 = xge_os_pio_mem_read64( 2236 hldev->pdev, hldev->regh0, 2237 &bar0->misc_int_mask); 2238 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2239 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2240 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2241 val64, &bar0->misc_int_mask); 2242 } 2243 #endif 2244 xge_debug_device(XGE_TRACE, 2245 "link down indication while link is down, ignoring.."); 2246 return 0; 2247 } 2248 xge_os_mdelay(1); 2249 2250 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2251 &bar0->adapter_control); 2252 2253 /* try to debounce the link only if the adapter is enabled. */ 2254 if (val64 & XGE_HAL_ADAPTER_CNTL_EN) { 2255 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, 2256 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2257 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), 2258 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { 2259 xge_debug_device(XGE_TRACE, 2260 "link is actually up (possible noisy link?), ignoring."); 2261 return(0); 2262 } 2263 } 2264 2265 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2266 &bar0->adapter_control); 2267 /* turn off LED */ 2268 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 2269 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2270 &bar0->adapter_control); 2271 2272 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2273 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2274 /* 2275 * Mask the Link Down interrupt and unmask the Link up 2276 * interrupt 2277 */ 2278 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2279 &bar0->misc_int_mask); 2280 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2281 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2282 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2283 &bar0->misc_int_mask); 2284 2285 /* link is down */ 2286 xge_debug_device(XGE_TRACE, "calling link down.."); 2287 hldev->link_state = XGE_HAL_LINK_DOWN; 2288 2289 /* notify ULD */ 2290 if (g_xge_hal_driver->uld_callbacks.link_down) { 2291 g_xge_hal_driver->uld_callbacks.link_down( 2292 hldev->upper_layer_info); 2293 } 2294 return 1; 2295 } 2296 #endif 2297 /* notify ULD */ 2298 (void) xge_queue_produce_context(hldev->queueh, 2299 XGE_HAL_EVENT_LINK_IS_DOWN, 2300 hldev); 2301 /* link is down */ 2302 return 1; 2303 } 2304 /* 2305 * __hal_device_handle_link_state_change 2306 * @hldev: HAL device handle. 2307 * 2308 * Link state change handler. The function is invoked by HAL when 2309 * Xframe indicates link state change condition. The code here makes sure to 2310 * 1) ignore redundant state change indications; 2311 * 2) execute link-up sequence, and handle the failure to bring the link up; 2312 * 3) generate XGE_HAL_LINK_UP/DOWN event for the subsequent handling by 2313 * upper-layer driver (ULD). 2314 */ 2315 static int 2316 __hal_device_handle_link_state_change(xge_hal_device_t *hldev) 2317 { 2318 u64 hw_status; 2319 int hw_link_state; 2320 int retcode; 2321 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2322 u64 val64; 2323 2324 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2325 &bar0->adapter_control); 2326 2327 /* If the adapter is not enabled but the hal thinks we are in the up 2328 * state then transition to the down state. 2329 */ 2330 if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) && 2331 (hldev->link_state == XGE_HAL_LINK_UP) ) { 2332 return(__hal_device_handle_link_down_ind(hldev)); 2333 } 2334 2335 (void) xge_hal_device_status(hldev, &hw_status); 2336 hw_link_state = (hw_status & 2337 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2338 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ? 2339 XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP; 2340 2341 /* If the current link state is same as previous, just return */ 2342 if (hldev->link_state == hw_link_state) 2343 retcode = 0; 2344 /* detected state change */ 2345 else if (hw_link_state == XGE_HAL_LINK_UP) 2346 retcode = __hal_device_handle_link_up_ind(hldev); 2347 else 2348 retcode = __hal_device_handle_link_down_ind(hldev); 2349 return retcode; 2350 } 2351 2352 /* 2353 * 2354 */ 2355 static void 2356 __hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value) 2357 { 2358 hldev->stats.sw_dev_err_stats.serr_cnt++; 2359 if (hldev->config.dump_on_serr) { 2360 #ifdef XGE_HAL_USE_MGMT_AUX 2361 (void) xge_hal_aux_device_dump(hldev); 2362 #endif 2363 } 2364 2365 (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev, 2366 1, sizeof(u64), (void *)&value); 2367 2368 xge_debug_device(XGE_ERR, "%s: read %llx", reg, 2369 (unsigned long long) value); 2370 } 2371 2372 /* 2373 * 2374 */ 2375 static void 2376 __hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value) 2377 { 2378 if (hldev->config.dump_on_eccerr) { 2379 #ifdef XGE_HAL_USE_MGMT_AUX 2380 (void) xge_hal_aux_device_dump(hldev); 2381 #endif 2382 } 2383 2384 /* Herc smart enough to recover on its own! */ 2385 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 2386 (void) xge_queue_produce(hldev->queueh, 2387 XGE_HAL_EVENT_ECCERR, hldev, 2388 1, sizeof(u64), (void *)&value); 2389 } 2390 2391 xge_debug_device(XGE_ERR, "%s: read %llx", reg, 2392 (unsigned long long) value); 2393 } 2394 2395 /* 2396 * 2397 */ 2398 static void 2399 __hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value) 2400 { 2401 if (hldev->config.dump_on_parityerr) { 2402 #ifdef XGE_HAL_USE_MGMT_AUX 2403 (void) xge_hal_aux_device_dump(hldev); 2404 #endif 2405 } 2406 (void) xge_queue_produce_context(hldev->queueh, 2407 XGE_HAL_EVENT_PARITYERR, hldev); 2408 2409 xge_debug_device(XGE_ERR, "%s: read %llx", reg, 2410 (unsigned long long) value); 2411 } 2412 2413 /* 2414 * 2415 */ 2416 static void 2417 __hal_device_handle_targetabort(xge_hal_device_t *hldev) 2418 { 2419 (void) xge_queue_produce_context(hldev->queueh, 2420 XGE_HAL_EVENT_TARGETABORT, hldev); 2421 } 2422 2423 2424 /* 2425 * __hal_device_hw_initialize 2426 * @hldev: HAL device handle. 2427 * 2428 * Initialize Xframe hardware. 2429 */ 2430 static xge_hal_status_e 2431 __hal_device_hw_initialize(xge_hal_device_t *hldev) 2432 { 2433 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2434 xge_hal_status_e status; 2435 u64 val64; 2436 2437 /* Set proper endian settings and verify the same by reading the PIF 2438 * Feed-back register. */ 2439 status = __hal_device_set_swapper(hldev); 2440 if (status != XGE_HAL_OK) { 2441 return status; 2442 } 2443 2444 /* update the pci mode, frequency, and width */ 2445 if (__hal_device_pci_info_get(hldev, &hldev->pci_mode, 2446 &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){ 2447 hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE; 2448 hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; 2449 hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; 2450 /* 2451 * FIXME: this cannot happen. 2452 * But if it happens we cannot continue just like that 2453 */ 2454 xge_debug_device(XGE_ERR, "unable to get pci info"); 2455 } 2456 2457 if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) || 2458 (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) || 2459 (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) { 2460 /* PCI optimization: set TxReqTimeOut 2461 * register (0x800+0x120) to 0x1ff or 2462 * something close to this. 2463 * Note: not to be used for PCI-X! */ 2464 2465 val64 = XGE_HAL_TXREQTO_VAL(0x1FF); 2466 val64 |= XGE_HAL_TXREQTO_EN; 2467 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2468 &bar0->txreqtimeout); 2469 2470 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, 2471 &bar0->read_retry_delay); 2472 2473 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, 2474 &bar0->write_retry_delay); 2475 2476 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode"); 2477 } 2478 2479 /* added this to clear the EOI_RESET field while leaving XGXS_RESET 2480 * in reset, then a 1-second delay */ 2481 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2482 XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset); 2483 xge_os_mdelay(1000); 2484 2485 /* Clear the XGXS_RESET field of the SW_RESET register in order to 2486 * release the XGXS from reset. Its reset value is 0xA5; write 0x00 2487 * to activate the XGXS. The core requires a minimum 500 us reset.*/ 2488 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset); 2489 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2490 &bar0->sw_reset); 2491 xge_os_mdelay(1); 2492 2493 /* read registers in all blocks */ 2494 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2495 &bar0->mac_int_mask); 2496 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2497 &bar0->mc_int_mask); 2498 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2499 &bar0->xgxs_int_mask); 2500 2501 /* set default MTU and steer based on length*/ 2502 __hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work 2503 2504 if (hldev->config.mac.rmac_bcast_en) { 2505 xge_hal_device_bcast_enable(hldev); 2506 } else { 2507 xge_hal_device_bcast_disable(hldev); 2508 } 2509 2510 #ifndef XGE_HAL_HERC_EMULATION 2511 __hal_device_xaui_configure(hldev); 2512 #endif 2513 __hal_device_mac_link_util_set(hldev); 2514 2515 __hal_device_mac_link_util_set(hldev); 2516 2517 /* 2518 * Keep its PCI REQ# line asserted during a write 2519 * transaction up to the end of the transaction 2520 */ 2521 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2522 &bar0->misc_control); 2523 val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN; 2524 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2525 val64, &bar0->misc_control); 2526 2527 /* 2528 * Initialize the device tti registers only if the TTI feature is 2529 * enabled. 2530 */ 2531 if (hldev->config.tti.enabled) { 2532 if ((status = __hal_device_tti_configure(hldev)) != 2533 XGE_HAL_OK) { 2534 return status; 2535 } 2536 } 2537 2538 status = __hal_device_rti_configure(hldev, 0); 2539 if (status != XGE_HAL_OK) { 2540 return status; 2541 } 2542 status = __hal_device_rth_it_configure(hldev); 2543 if (status != XGE_HAL_OK) { 2544 return status; 2545 } 2546 status = __hal_device_rth_spdm_configure(hldev); 2547 if (status != XGE_HAL_OK) { 2548 return status; 2549 } 2550 status = __hal_device_rts_mac_configure(hldev); 2551 if (status != XGE_HAL_OK) { 2552 xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed \n"); 2553 return status; 2554 } 2555 2556 __hal_device_pause_frames_configure(hldev); 2557 __hal_device_rmac_padding_configure(hldev); 2558 __hal_device_shared_splits_configure(hldev); 2559 2560 /* make sure all interrupts going to be disabled at the moment */ 2561 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); 2562 2563 /* SXE-008 Transmit DMA arbitration issue */ 2564 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && 2565 hldev->revision < 4) { 2566 xge_os_pio_mem_write64(hldev->pdev,hldev->regh0, 2567 XGE_HAL_ADAPTER_PCC_ENABLE_FOUR, 2568 &bar0->pcc_enable); 2569 } 2570 __hal_fifo_hw_initialize(hldev); 2571 __hal_ring_hw_initialize(hldev); 2572 2573 if (__hal_device_wait_quiescent(hldev, &val64)) { 2574 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 2575 } 2576 2577 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, 2578 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, 2579 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 2580 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); 2581 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 2582 } 2583 2584 xge_debug_device(XGE_TRACE, "device 0x%llx is quiescent", 2585 (unsigned long long)(ulong_t)hldev); 2586 2587 #if defined(XGE_HAL_MSI) 2588 /* 2589 * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL 2590 * is disabled. 2591 */ 2592 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2593 &bar0->pic_control); 2594 val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT); 2595 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2596 &bar0->pic_control); 2597 #endif 2598 2599 hldev->hw_is_initialized = 1; 2600 hldev->terminating = 0; 2601 return XGE_HAL_OK; 2602 } 2603 2604 /* 2605 * __hal_device_reset - Reset device only. 2606 * @hldev: HAL device handle. 2607 * 2608 * Reset the device, and subsequently restore 2609 * the previously saved PCI configuration space. 2610 */ 2611 static xge_hal_status_e 2612 __hal_device_reset(xge_hal_device_t *hldev) 2613 { 2614 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2615 int i, swap_done, pcisize = 0; 2616 u64 val64, rawval = 0ULL; 2617 2618 #if defined(XGE_HAL_MSI_X) 2619 /* Restore MSI-X vector table */ 2620 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2621 if ( hldev->bar2 ) { 2622 u64 *msix_vetor_table = (u64 *)hldev->bar2; 2623 2624 // 2 64bit words for each entry 2625 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; i++) { 2626 hldev->msix_vector_table[i] = xge_os_pio_mem_read64(hldev->pdev, 2627 hldev->regh2, &msix_vetor_table[i]); 2628 } 2629 } 2630 } 2631 2632 #endif 2633 2634 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2635 &bar0->pif_rd_swapper_fb); 2636 swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB); 2637 2638 if (swap_done) { 2639 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 2640 (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset); 2641 } else { 2642 u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32); 2643 #if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN) 2644 /* swap it */ 2645 val = (((val & (u32)0x000000ffUL) << 24) | 2646 ((val & (u32)0x0000ff00UL) << 8) | 2647 ((val & (u32)0x00ff0000UL) >> 8) | 2648 ((val & (u32)0xff000000UL) >> 24)); 2649 #endif 2650 xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val, 2651 &bar0->sw_reset); 2652 } 2653 2654 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2655 int cnt = 0; 2656 2657 rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC; 2658 pcisize = XGE_HAL_PCISIZE_HERC; 2659 xge_os_mdelay(1); 2660 do { 2661 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2662 &bar0->sw_reset); 2663 if (val64 != rawval) { 2664 break; 2665 } 2666 cnt++; 2667 xge_os_mdelay(1); /* Wait for 1ms before retry */ 2668 } while(cnt < 20); 2669 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 2670 rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA; 2671 pcisize = XGE_HAL_PCISIZE_XENA; 2672 xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS); 2673 } 2674 2675 for (i = 0; i < pcisize; i++) { 2676 xge_os_pci_write32(hldev->pdev, hldev->cfgh, i * 4, 2677 *((u32*)&hldev->pci_config_space + i)); 2678 } 2679 2680 #if defined(XGE_HAL_MSI_X) 2681 /* Restore MSI-X vector table */ 2682 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2683 if ( hldev->bar2 ) { 2684 /* 2685 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 ) 2686 98: PBATable 00000404 ( BIR:4 Offset:0x400 ) 2687 */ 2688 u64 *msix_vetor_table = (u64 *)hldev->bar2; 2689 2690 //xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2691 //xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid); 2692 2693 // 2 64bit words for each entry 2694 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; i++) { 2695 xge_os_pio_mem_write64(hldev->pdev, hldev->regh2, 2696 hldev->msix_vector_table[i], &msix_vetor_table[i]); 2697 } 2698 } 2699 } 2700 2701 #endif 2702 2703 hldev->link_state = XGE_HAL_LINK_DOWN; 2704 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2705 &bar0->sw_reset); 2706 2707 if (val64 != rawval) { 2708 xge_debug_device(XGE_ERR, "device has not been reset " 2709 "got 0x%llx, expected 0x%llx", 2710 (unsigned long long)val64, (unsigned long long)rawval); 2711 return XGE_HAL_ERR_RESET_FAILED; 2712 } 2713 2714 hldev->hw_is_initialized = 0; 2715 return XGE_HAL_OK; 2716 } 2717 2718 /* 2719 * __hal_device_poll - General private routine to poll the device. 2720 * @hldev: HAL device handle. 2721 * 2722 * Returns: one of the xge_hal_status_e{} enumerated types. 2723 * XGE_HAL_OK - for success. 2724 * XGE_HAL_ERR_CRITICAL - when encounters critical error. 2725 */ 2726 static xge_hal_status_e 2727 __hal_device_poll(xge_hal_device_t *hldev) 2728 { 2729 xge_hal_pci_bar0_t *bar0; 2730 u64 err_reg; 2731 2732 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2733 2734 /* Handling SERR errors by forcing a H/W reset. */ 2735 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2736 &bar0->serr_source); 2737 if (err_reg & XGE_HAL_SERR_SOURCE_ANY) { 2738 __hal_device_handle_serr(hldev, "serr_source", err_reg); 2739 return XGE_HAL_ERR_CRITICAL; 2740 } 2741 2742 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2743 &bar0->misc_int_reg); 2744 2745 if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) { 2746 hldev->stats.sw_dev_err_stats.parity_err_cnt++; 2747 __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg); 2748 return XGE_HAL_ERR_CRITICAL; 2749 } 2750 2751 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2752 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 2753 #endif 2754 { 2755 2756 /* Handling link status change error Intr */ 2757 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2758 &bar0->mac_rmac_err_reg); 2759 if (__hal_device_handle_link_state_change(hldev)) 2760 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2761 err_reg, &bar0->mac_rmac_err_reg); 2762 } 2763 2764 if (hldev->inject_serr != 0) { 2765 err_reg = hldev->inject_serr; 2766 hldev->inject_serr = 0; 2767 __hal_device_handle_serr(hldev, "inject_serr", err_reg); 2768 return XGE_HAL_ERR_CRITICAL; 2769 } 2770 2771 if (hldev->inject_ecc != 0) { 2772 err_reg = hldev->inject_ecc; 2773 hldev->inject_ecc = 0; 2774 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 2775 __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg); 2776 return XGE_HAL_ERR_CRITICAL; 2777 } 2778 2779 if (hldev->inject_bad_tcode != 0) { 2780 u8 t_code = hldev->inject_bad_tcode; 2781 xge_hal_channel_t channel; 2782 xge_hal_fifo_txd_t txd; 2783 xge_hal_ring_rxd_1_t rxd; 2784 2785 channel.devh = hldev; 2786 2787 if (hldev->inject_bad_tcode_for_chan_type == 2788 XGE_HAL_CHANNEL_TYPE_FIFO) { 2789 channel.type = XGE_HAL_CHANNEL_TYPE_FIFO; 2790 2791 } else { 2792 channel.type = XGE_HAL_CHANNEL_TYPE_RING; 2793 } 2794 2795 hldev->inject_bad_tcode = 0; 2796 2797 if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO) 2798 return xge_hal_device_handle_tcode(&channel, &txd, 2799 t_code); 2800 else 2801 return xge_hal_device_handle_tcode(&channel, &rxd, 2802 t_code); 2803 } 2804 2805 return XGE_HAL_OK; 2806 } 2807 2808 /* 2809 * __hal_verify_pcc_idle - Verify All Enbled PCC are IDLE or not 2810 * @hldev: HAL device handle. 2811 * @adp_status: Adapter Status value 2812 * Usage: See xge_hal_device_enable{}. 2813 */ 2814 xge_hal_status_e 2815 __hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status) 2816 { 2817 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && 2818 hldev->revision < 4) { 2819 /* 2820 * For Xena 1,2,3 we enable only 4 PCCs Due to 2821 * SXE-008 (Transmit DMA arbitration issue) 2822 */ 2823 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) 2824 != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) { 2825 xge_debug_device(XGE_TRACE, "%s", 2826 "PCC is not IDLE after adapter enabled!"); 2827 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 2828 } 2829 } else { 2830 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) != 2831 XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) { 2832 xge_debug_device(XGE_TRACE, "%s", 2833 "PCC is not IDLE after adapter enabled!"); 2834 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 2835 } 2836 } 2837 return XGE_HAL_OK; 2838 } 2839 2840 static void 2841 __hal_update_rxufca(xge_hal_device_t *hldev, int ring_no) 2842 { 2843 int ufc = hldev->config.ring.queue[ring_no].rti.ufc_a; 2844 int ic = hldev->stats.sw_dev_info_stats.total_intr_cnt - 2845 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt; 2846 int i; 2847 2848 /* urange_a adaptive coalescing */ 2849 if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) { 2850 if (ic > hldev->rxufca_intr_thres) { 2851 if (ufc < hldev->config.rxufca_hi_lim) { 2852 ufc += 1; 2853 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) 2854 hldev->config.ring.queue[i].rti.ufc_a = ufc; 2855 (void) __hal_device_rti_configure(hldev, 1); 2856 hldev->stats.sw_dev_info_stats. 2857 rxufca_hi_adjust_cnt++; 2858 } 2859 hldev->rxufca_intr_thres = ic + 2860 hldev->config.rxufca_intr_thres; /* def: 30 */ 2861 } else { 2862 if (ufc > hldev->config.rxufca_lo_lim) { 2863 ufc -= 1; 2864 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) 2865 hldev->config.ring.queue[i].rti.ufc_a = ufc; 2866 (void) __hal_device_rti_configure(hldev, 1); 2867 hldev->stats.sw_dev_info_stats. 2868 rxufca_lo_adjust_cnt++; 2869 } 2870 } 2871 hldev->rxufca_lbolt_time = hldev->rxufca_lbolt + 2872 hldev->config.rxufca_lbolt_period; 2873 } 2874 hldev->rxufca_lbolt++; 2875 } 2876 2877 /* 2878 * __hal_device_handle_mc - Handle MC interrupt reason 2879 * @hldev: HAL device handle. 2880 * @reason: interrupt reason 2881 */ 2882 xge_hal_status_e 2883 __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason) 2884 { 2885 xge_hal_pci_bar0_t *isrbar0 = 2886 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 2887 u64 val64; 2888 2889 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2890 &isrbar0->mc_int_status); 2891 if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT)) 2892 return XGE_HAL_OK; 2893 2894 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2895 &isrbar0->mc_err_reg); 2896 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2897 val64, &isrbar0->mc_err_reg); 2898 2899 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L || 2900 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U || 2901 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 || 2902 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 || 2903 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA && 2904 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L || 2905 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U || 2906 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L || 2907 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) { 2908 hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++; 2909 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 2910 } 2911 2912 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L || 2913 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U || 2914 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 || 2915 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 || 2916 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA && 2917 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L || 2918 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U || 2919 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L || 2920 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) { 2921 hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++; 2922 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 2923 } 2924 2925 if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) { 2926 hldev->stats.sw_dev_err_stats.sm_err_cnt++; 2927 } 2928 2929 /* those two should result in device reset */ 2930 if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 || 2931 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) { 2932 __hal_device_handle_eccerr(hldev, "mc_err_reg", val64); 2933 return XGE_HAL_ERR_CRITICAL; 2934 } 2935 2936 return XGE_HAL_OK; 2937 } 2938 2939 /* 2940 * __hal_device_handle_pic - Handle non-traffic PIC interrupt reason 2941 * @hldev: HAL device handle. 2942 * @reason: interrupt reason 2943 */ 2944 xge_hal_status_e 2945 __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason) 2946 { 2947 xge_hal_pci_bar0_t *isrbar0 = 2948 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 2949 u64 val64; 2950 2951 if (reason & XGE_HAL_PIC_INT_FLSH) { 2952 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2953 &isrbar0->flsh_int_reg); 2954 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2955 val64, &isrbar0->flsh_int_reg); 2956 /* FIXME: handle register */ 2957 } 2958 if (reason & XGE_HAL_PIC_INT_MDIO) { 2959 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2960 &isrbar0->mdio_int_reg); 2961 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2962 val64, &isrbar0->mdio_int_reg); 2963 /* FIXME: handle register */ 2964 } 2965 if (reason & XGE_HAL_PIC_INT_IIC) { 2966 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2967 &isrbar0->iic_int_reg); 2968 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2969 val64, &isrbar0->iic_int_reg); 2970 /* FIXME: handle register */ 2971 } 2972 if (reason & XGE_HAL_PIC_INT_MISC) { 2973 val64 = xge_os_pio_mem_read64(hldev->pdev, 2974 hldev->regh0, &isrbar0->misc_int_reg); 2975 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2976 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2977 /* Check for Link interrupts. If both Link Up/Down 2978 * bits are set, clear both and check adapter status 2979 */ 2980 if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) && 2981 (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) { 2982 u64 temp64; 2983 2984 xge_debug_device(XGE_TRACE, 2985 "both link up and link down detected %llx", 2986 (unsigned long long)val64); 2987 2988 temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT | 2989 XGE_HAL_MISC_INT_REG_LINK_UP_INT); 2990 xge_os_pio_mem_write64(hldev->pdev, 2991 hldev->regh0, temp64, 2992 &isrbar0->misc_int_reg); 2993 } 2994 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) { 2995 xge_debug_device(XGE_TRACE, 2996 "link up call request, misc_int %llx", 2997 (unsigned long long)val64); 2998 __hal_device_handle_link_up_ind(hldev); 2999 } 3000 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){ 3001 xge_debug_device(XGE_TRACE, 3002 "link down request, misc_int %llx", 3003 (unsigned long long)val64); 3004 __hal_device_handle_link_down_ind(hldev); 3005 } 3006 } else 3007 #endif 3008 { 3009 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3010 val64, &isrbar0->misc_int_reg); 3011 } 3012 } 3013 3014 return XGE_HAL_OK; 3015 } 3016 3017 /* 3018 * __hal_device_handle_txpic - Handle TxPIC interrupt reason 3019 * @hldev: HAL device handle. 3020 * @reason: interrupt reason 3021 */ 3022 xge_hal_status_e 3023 __hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason) 3024 { 3025 xge_hal_status_e status = XGE_HAL_OK; 3026 xge_hal_pci_bar0_t *isrbar0 = 3027 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3028 volatile u64 val64; 3029 3030 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3031 &isrbar0->pic_int_status); 3032 if ( val64 & (XGE_HAL_PIC_INT_FLSH | 3033 XGE_HAL_PIC_INT_MDIO | 3034 XGE_HAL_PIC_INT_IIC | 3035 XGE_HAL_PIC_INT_MISC) ) { 3036 status = __hal_device_handle_pic(hldev, val64); 3037 xge_os_wmb(); 3038 } 3039 3040 if (!(val64 & XGE_HAL_PIC_INT_TX)) 3041 return status; 3042 3043 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3044 &isrbar0->txpic_int_reg); 3045 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3046 val64, &isrbar0->txpic_int_reg); 3047 xge_os_wmb(); 3048 3049 if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) { 3050 int i; 3051 3052 if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL) 3053 g_xge_hal_driver->uld_callbacks.sched_timer( 3054 hldev, hldev->upper_layer_info); 3055 /* 3056 * This feature implement adaptive receive interrupt 3057 * coalecing. It is disabled by default. To enable it 3058 * set hldev->config.rxufca_lo_lim to be not equal to 3059 * hldev->config.rxufca_hi_lim. 3060 * 3061 * We are using HW timer for this feature, so 3062 * use needs to configure hldev->config.rxufca_lbolt_period 3063 * which is essentially a time slice of timer. 3064 * 3065 * For those who familiar with Linux, lbolt means jiffies 3066 * of this timer. I.e. timer tick. 3067 */ 3068 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 3069 if (hldev->config.ring.queue[i].rti.urange_a) { 3070 if (hldev->config.rxufca_lo_lim != 3071 hldev->config.rxufca_hi_lim) 3072 __hal_update_rxufca(hldev, i); 3073 } 3074 } 3075 } 3076 3077 return XGE_HAL_OK; 3078 } 3079 3080 /* 3081 * __hal_device_handle_txdma - Handle TxDMA interrupt reason 3082 * @hldev: HAL device handle. 3083 * @reason: interrupt reason 3084 */ 3085 xge_hal_status_e 3086 __hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason) 3087 { 3088 xge_hal_pci_bar0_t *isrbar0 = 3089 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3090 u64 val64, err; 3091 3092 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3093 &isrbar0->txdma_int_status); 3094 if (val64 & XGE_HAL_TXDMA_PFC_INT) { 3095 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3096 &isrbar0->pfc_err_reg); 3097 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3098 err, &isrbar0->pfc_err_reg); 3099 /* FIXME: handle register */ 3100 } 3101 if (val64 & XGE_HAL_TXDMA_TDA_INT) { 3102 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3103 &isrbar0->tda_err_reg); 3104 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3105 err, &isrbar0->tda_err_reg); 3106 /* FIXME: handle register */ 3107 } 3108 if (val64 & XGE_HAL_TXDMA_PCC_INT) { 3109 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3110 &isrbar0->pcc_err_reg); 3111 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3112 err, &isrbar0->pcc_err_reg); 3113 /* FIXME: handle register */ 3114 } 3115 if (val64 & XGE_HAL_TXDMA_TTI_INT) { 3116 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3117 &isrbar0->tti_err_reg); 3118 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3119 err, &isrbar0->tti_err_reg); 3120 /* FIXME: handle register */ 3121 } 3122 if (val64 & XGE_HAL_TXDMA_LSO_INT) { 3123 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3124 &isrbar0->lso_err_reg); 3125 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3126 err, &isrbar0->lso_err_reg); 3127 /* FIXME: handle register */ 3128 } 3129 if (val64 & XGE_HAL_TXDMA_TPA_INT) { 3130 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3131 &isrbar0->tpa_err_reg); 3132 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3133 err, &isrbar0->tpa_err_reg); 3134 /* FIXME: handle register */ 3135 } 3136 if (val64 & XGE_HAL_TXDMA_SM_INT) { 3137 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3138 &isrbar0->sm_err_reg); 3139 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3140 err, &isrbar0->sm_err_reg); 3141 /* FIXME: handle register */ 3142 } 3143 3144 return XGE_HAL_OK; 3145 } 3146 3147 /* 3148 * __hal_device_handle_txmac - Handle TxMAC interrupt reason 3149 * @hldev: HAL device handle. 3150 * @reason: interrupt reason 3151 */ 3152 xge_hal_status_e 3153 __hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason) 3154 { 3155 xge_hal_pci_bar0_t *isrbar0 = 3156 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3157 u64 val64; 3158 3159 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3160 &isrbar0->mac_int_status); 3161 if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT)) 3162 return XGE_HAL_OK; 3163 3164 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3165 &isrbar0->mac_tmac_err_reg); 3166 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3167 val64, &isrbar0->mac_tmac_err_reg); 3168 /* FIXME: handle register */ 3169 3170 return XGE_HAL_OK; 3171 } 3172 3173 /* 3174 * __hal_device_handle_txxgxs - Handle TxXGXS interrupt reason 3175 * @hldev: HAL device handle. 3176 * @reason: interrupt reason 3177 */ 3178 xge_hal_status_e 3179 __hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason) 3180 { 3181 /* FIXME: handle register */ 3182 3183 return XGE_HAL_OK; 3184 } 3185 3186 /* 3187 * __hal_device_handle_rxpic - Handle RxPIC interrupt reason 3188 * @hldev: HAL device handle. 3189 * @reason: interrupt reason 3190 */ 3191 xge_hal_status_e 3192 __hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason) 3193 { 3194 /* FIXME: handle register */ 3195 3196 return XGE_HAL_OK; 3197 } 3198 3199 /* 3200 * __hal_device_handle_rxdma - Handle RxDMA interrupt reason 3201 * @hldev: HAL device handle. 3202 * @reason: interrupt reason 3203 */ 3204 xge_hal_status_e 3205 __hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason) 3206 { 3207 xge_hal_pci_bar0_t *isrbar0 = 3208 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3209 u64 val64, err; 3210 3211 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3212 &isrbar0->rxdma_int_status); 3213 if (val64 & XGE_HAL_RXDMA_RC_INT) { 3214 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3215 &isrbar0->rc_err_reg); 3216 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3217 err, &isrbar0->rc_err_reg); 3218 /* FIXME: handle register */ 3219 } 3220 if (val64 & XGE_HAL_RXDMA_RPA_INT) { 3221 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3222 &isrbar0->rpa_err_reg); 3223 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3224 err, &isrbar0->rpa_err_reg); 3225 /* FIXME: handle register */ 3226 } 3227 if (val64 & XGE_HAL_RXDMA_RDA_INT) { 3228 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3229 &isrbar0->rda_err_reg); 3230 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3231 err, &isrbar0->rda_err_reg); 3232 /* FIXME: handle register */ 3233 } 3234 if (val64 & XGE_HAL_RXDMA_RTI_INT) { 3235 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3236 &isrbar0->rti_err_reg); 3237 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3238 err, &isrbar0->rti_err_reg); 3239 /* FIXME: handle register */ 3240 } 3241 3242 return XGE_HAL_OK; 3243 } 3244 3245 /* 3246 * __hal_device_handle_rxmac - Handle RxMAC interrupt reason 3247 * @hldev: HAL device handle. 3248 * @reason: interrupt reason 3249 */ 3250 xge_hal_status_e 3251 __hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason) 3252 { 3253 xge_hal_pci_bar0_t *isrbar0 = 3254 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3255 u64 val64; 3256 3257 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3258 &isrbar0->mac_int_status); 3259 if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT)) 3260 return XGE_HAL_OK; 3261 3262 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3263 &isrbar0->mac_rmac_err_reg); 3264 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3265 val64, &isrbar0->mac_rmac_err_reg); 3266 3267 /* FIXME: handle register */ 3268 3269 return XGE_HAL_OK; 3270 } 3271 3272 /* 3273 * __hal_device_handle_rxxgxs - Handle RxXGXS interrupt reason 3274 * @hldev: HAL device handle. 3275 * @reason: interrupt reason 3276 */ 3277 xge_hal_status_e 3278 __hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason) 3279 { 3280 /* FIXME: handle register */ 3281 3282 return XGE_HAL_OK; 3283 } 3284 3285 /** 3286 * xge_hal_device_enable - Enable device. 3287 * @hldev: HAL device handle. 3288 * 3289 * Enable the specified device: bring up the link/interface. 3290 * Returns: XGE_HAL_OK - success. 3291 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device 3292 * to a "quiescent" state. 3293 * 3294 * See also: xge_hal_status_e{}. 3295 * 3296 * Usage: See ex_open{}. 3297 */ 3298 xge_hal_status_e 3299 xge_hal_device_enable(xge_hal_device_t *hldev) 3300 { 3301 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3302 u64 val64; 3303 u64 adp_status; 3304 int i, j; 3305 3306 if (!hldev->hw_is_initialized) { 3307 xge_hal_status_e status; 3308 3309 status = __hal_device_hw_initialize(hldev); 3310 if (status != XGE_HAL_OK) { 3311 return status; 3312 } 3313 } 3314 3315 /* 3316 * Not needed in most cases, i.e. 3317 * when device_disable() is followed by reset - 3318 * the latter copies back PCI config space, along with 3319 * the bus mastership - see __hal_device_reset(). 3320 * However, there are/may-in-future be other cases, and 3321 * does not hurt. 3322 */ 3323 __hal_device_bus_master_enable(hldev); 3324 3325 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3326 /* 3327 * Configure the link stability period. 3328 */ 3329 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3330 &bar0->misc_control); 3331 if (hldev->config.link_stability_period != 3332 XGE_HAL_DEFAULT_USE_HARDCODE) { 3333 3334 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( 3335 hldev->config.link_stability_period); 3336 } else { 3337 /* 3338 * Use the link stability period 1 ms as default 3339 */ 3340 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( 3341 XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD); 3342 } 3343 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3344 val64, &bar0->misc_control); 3345 3346 /* 3347 * Clearing any possible Link up/down interrupts that 3348 * could have popped up just before Enabling the card. 3349 */ 3350 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3351 &bar0->misc_int_reg); 3352 if (val64) { 3353 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3354 val64, &bar0->misc_int_reg); 3355 xge_debug_device(XGE_TRACE, "%s","link state cleared"); 3356 } 3357 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 3358 /* 3359 * Clearing any possible Link state change interrupts that 3360 * could have popped up just before Enabling the card. 3361 */ 3362 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3363 &bar0->mac_rmac_err_reg); 3364 if (val64) { 3365 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3366 val64, &bar0->mac_rmac_err_reg); 3367 xge_debug_device(XGE_TRACE, "%s", "link state cleared"); 3368 } 3369 } 3370 3371 if (__hal_device_wait_quiescent(hldev, &val64)) { 3372 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 3373 } 3374 3375 /* Enabling Laser. */ 3376 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3377 &bar0->adapter_control); 3378 val64 |= XGE_HAL_ADAPTER_EOI_TX_ON; 3379 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 3380 &bar0->adapter_control); 3381 3382 /* let link establish */ 3383 xge_os_mdelay(1); 3384 3385 /* set link down untill poll() routine will set it up (maybe) */ 3386 hldev->link_state = XGE_HAL_LINK_DOWN; 3387 3388 /* If link is UP (adpter is connected) then enable the adapter */ 3389 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3390 &bar0->adapter_status); 3391 if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 3392 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) { 3393 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3394 &bar0->adapter_control); 3395 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 3396 } else { 3397 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3398 &bar0->adapter_control); 3399 val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON | 3400 XGE_HAL_ADAPTER_LED_ON ); 3401 } 3402 3403 val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN; /* adapter enable */ 3404 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ 3405 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64, 3406 &bar0->adapter_control); 3407 3408 /* We spin here waiting for the Link to come up. 3409 * This is the fix for the Link being unstable after the reset. */ 3410 i = 0; 3411 j = 0; 3412 do 3413 { 3414 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3415 &bar0->adapter_status); 3416 3417 /* Read the adapter control register for Adapter_enable bit */ 3418 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3419 &bar0->adapter_control); 3420 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 3421 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) && 3422 (val64 & XGE_HAL_ADAPTER_CNTL_EN)) { 3423 j++; 3424 if (j >= hldev->config.link_valid_cnt) { 3425 if (xge_hal_device_status(hldev, &adp_status) == 3426 XGE_HAL_OK) { 3427 if (__hal_verify_pcc_idle(hldev, 3428 adp_status) != XGE_HAL_OK) { 3429 return 3430 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 3431 } 3432 xge_debug_device(XGE_TRACE, 3433 "adp_status: %llx, link is up on " 3434 "adapter enable!", 3435 (unsigned long long)adp_status); 3436 val64 = xge_os_pio_mem_read64( 3437 hldev->pdev, 3438 hldev->regh0, 3439 &bar0->adapter_control); 3440 val64 = val64| 3441 (XGE_HAL_ADAPTER_EOI_TX_ON | 3442 XGE_HAL_ADAPTER_LED_ON ); 3443 xge_os_pio_mem_write64(hldev->pdev, 3444 hldev->regh0, val64, 3445 &bar0->adapter_control); 3446 xge_os_mdelay(1); 3447 3448 val64 = xge_os_pio_mem_read64( 3449 hldev->pdev, 3450 hldev->regh0, 3451 &bar0->adapter_control); 3452 break; /* out of for loop */ 3453 } else { 3454 return 3455 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 3456 } 3457 } 3458 } else { 3459 j = 0; /* Reset the count */ 3460 /* Turn on the Laser */ 3461 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3462 &bar0->adapter_control); 3463 val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON; 3464 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, 3465 val64, &bar0->adapter_control); 3466 3467 xge_os_mdelay(1); 3468 3469 /* Now re-enable it as due to noise, hardware 3470 * turned it off */ 3471 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3472 &bar0->adapter_control); 3473 val64 |= XGE_HAL_ADAPTER_CNTL_EN; 3474 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/ 3475 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 3476 &bar0->adapter_control); 3477 } 3478 xge_os_mdelay(1); /* Sleep for 1 msec */ 3479 i++; 3480 } while (i < hldev->config.link_retry_cnt); 3481 3482 __hal_device_led_actifity_fix(hldev); 3483 3484 #ifndef XGE_HAL_PROCESS_LINK_INT_IN_ISR 3485 /* Here we are performing soft reset on XGXS to force link down. 3486 * Since link is already up, we will get link state change 3487 * poll notificatoin after adapter is enabled */ 3488 3489 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3490 0x80010515001E0000ULL, &bar0->dtx_control); 3491 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3492 &bar0->dtx_control); 3493 xge_os_mdelay(1); /* Sleep for 1 msec */ 3494 3495 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3496 0x80010515001E00E0ULL, &bar0->dtx_control); 3497 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3498 &bar0->dtx_control); 3499 xge_os_mdelay(1); /* Sleep for 1 msec */ 3500 3501 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3502 0x80070515001F00E4ULL, &bar0->dtx_control); 3503 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3504 &bar0->dtx_control); 3505 3506 xge_os_mdelay(100); /* Sleep for 500 msec */ 3507 #else 3508 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 3509 #endif 3510 { 3511 /* 3512 * With some switches the link state change interrupt does not 3513 * occur even though the xgxs reset is done as per SPN-006. So, 3514 * poll the adapter status register and check if the link state 3515 * is ok. 3516 */ 3517 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3518 &bar0->adapter_status); 3519 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 3520 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT))) 3521 { 3522 xge_debug_device(XGE_TRACE, "%s", 3523 "enable device causing link state change ind.."); 3524 (void) __hal_device_handle_link_state_change(hldev); 3525 } 3526 } 3527 3528 if (hldev->config.stats_refresh_time_sec != 3529 XGE_HAL_STATS_REFRESH_DISABLE) 3530 __hal_stats_enable(&hldev->stats); 3531 3532 return XGE_HAL_OK; 3533 } 3534 3535 /** 3536 * xge_hal_device_disable - Disable Xframe adapter. 3537 * @hldev: Device handle. 3538 * 3539 * Disable this device. To gracefully reset the adapter, the host should: 3540 * 3541 * - call xge_hal_device_disable(); 3542 * 3543 * - call xge_hal_device_intr_disable(); 3544 * 3545 * - close all opened channels and clean up outstanding resources; 3546 * 3547 * - do some work (error recovery, change mtu, reset, etc); 3548 * 3549 * - call xge_hal_device_enable(); 3550 * 3551 * - open channels, replenish RxDs, etc. 3552 * 3553 * - call xge_hal_device_intr_enable(). 3554 * 3555 * Note: Disabling the device does _not_ include disabling of interrupts. 3556 * After disabling the device stops receiving new frames but those frames 3557 * that were already in the pipe will keep coming for some few milliseconds. 3558 * 3559 * Returns: XGE_HAL_OK - success. 3560 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to 3561 * a "quiescent" state. 3562 * 3563 * See also: xge_hal_status_e{}. 3564 */ 3565 xge_hal_status_e 3566 xge_hal_device_disable(xge_hal_device_t *hldev) 3567 { 3568 xge_hal_status_e status = XGE_HAL_OK; 3569 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3570 u64 val64; 3571 3572 xge_debug_device(XGE_TRACE, "%s", "turn off laser, cleanup hardware"); 3573 3574 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3575 &bar0->adapter_control); 3576 val64 = val64 & (~XGE_HAL_ADAPTER_CNTL_EN); 3577 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 3578 &bar0->adapter_control); 3579 3580 if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) { 3581 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 3582 } 3583 3584 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, 3585 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, 3586 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 3587 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); 3588 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 3589 } 3590 3591 if (hldev->config.stats_refresh_time_sec != 3592 XGE_HAL_STATS_REFRESH_DISABLE) 3593 __hal_stats_disable(&hldev->stats); 3594 #ifdef XGE_DEBUG_ASSERT 3595 else 3596 xge_assert(!hldev->stats.is_enabled); 3597 #endif 3598 3599 __hal_device_bus_master_disable(hldev); 3600 3601 return status; 3602 } 3603 3604 /** 3605 * xge_hal_device_reset - Reset device. 3606 * @hldev: HAL device handle. 3607 * 3608 * Soft-reset the device, reset the device stats except reset_cnt. 3609 * 3610 * After reset is done, will try to re-initialize HW. 3611 * 3612 * Returns: XGE_HAL_OK - success. 3613 * XGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized. 3614 * XGE_HAL_ERR_RESET_FAILED - Reset failed. 3615 * 3616 * See also: xge_hal_status_e{}. 3617 */ 3618 xge_hal_status_e 3619 xge_hal_device_reset(xge_hal_device_t *hldev) 3620 { 3621 xge_hal_status_e status; 3622 3623 /* increment the soft reset counter */ 3624 u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt; 3625 3626 xge_debug_device(XGE_ERR, "%s (%d)", "resetting the device", reset_cnt); 3627 3628 if (!hldev->is_initialized) 3629 return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED; 3630 3631 /* actual "soft" reset of the adapter */ 3632 status = __hal_device_reset(hldev); 3633 3634 /* reset all stats including saved */ 3635 __hal_stats_soft_reset(hldev, 1); 3636 3637 /* increment reset counter */ 3638 hldev->stats.sw_dev_info_stats.soft_reset_cnt = reset_cnt + 1; 3639 3640 /* re-initialize rxufca_intr_thres */ 3641 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; 3642 3643 hldev->reset_needed_after_close = 0; 3644 3645 return status; 3646 } 3647 3648 /** 3649 * xge_hal_device_status - Check whether Xframe hardware is ready for 3650 * operation. 3651 * @hldev: HAL device handle. 3652 * @hw_status: Xframe status register. Returned by HAL. 3653 * 3654 * Check whether Xframe hardware is ready for operation. 3655 * The checking includes TDMA, RDMA, PFC, PIC, MC_DRAM, and the rest 3656 * hardware functional blocks. 3657 * 3658 * Returns: XGE_HAL_OK if the device is ready for operation. Otherwise 3659 * returns XGE_HAL_FAIL. Also, fills in adapter status (in @hw_status). 3660 * 3661 * See also: xge_hal_status_e{}. 3662 * Usage: See ex_open{}. 3663 */ 3664 xge_hal_status_e 3665 xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status) 3666 { 3667 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3668 u64 tmp64; 3669 3670 tmp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3671 &bar0->adapter_status); 3672 3673 3674 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) { 3675 xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!"); 3676 return XGE_HAL_FAIL; 3677 } 3678 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) { 3679 xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!"); 3680 return XGE_HAL_FAIL; 3681 } 3682 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) { 3683 xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!"); 3684 return XGE_HAL_FAIL; 3685 } 3686 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) { 3687 xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!"); 3688 return XGE_HAL_FAIL; 3689 } 3690 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) { 3691 xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!"); 3692 return XGE_HAL_FAIL; 3693 } 3694 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) { 3695 xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!"); 3696 return XGE_HAL_FAIL; 3697 } 3698 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) { 3699 xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!"); 3700 return XGE_HAL_FAIL; 3701 } 3702 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) { 3703 xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!"); 3704 return XGE_HAL_FAIL; 3705 } 3706 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK)) { 3707 xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!"); 3708 return XGE_HAL_FAIL; 3709 } 3710 3711 *hw_status = tmp64; 3712 3713 return XGE_HAL_OK; 3714 } 3715 3716 3717 /** 3718 * xge_hal_device_intr_enable - Enable Xframe interrupts. 3719 * @hldev: HAL device handle. 3720 * @op: One of the xge_hal_device_intr_e enumerated values specifying 3721 * the type(s) of interrupts to enable. 3722 * 3723 * Enable Xframe interrupts. The function is to be executed the last in 3724 * Xframe initialization sequence. 3725 * 3726 * See also: xge_hal_device_intr_disable() 3727 */ 3728 void 3729 xge_hal_device_intr_enable(xge_hal_device_t *hldev) 3730 { 3731 xge_list_t *item; 3732 u64 val64; 3733 3734 /* PRC initialization and configuration */ 3735 xge_list_for_each(item, &hldev->ring_channels) { 3736 xge_hal_channel_h channel; 3737 channel = xge_container_of(item, xge_hal_channel_t, item); 3738 __hal_ring_prc_enable(channel); 3739 } 3740 3741 /* enable traffic only interrupts */ 3742 #if defined(XGE_HAL_MSI) 3743 /* 3744 * make sure all interrupts going to be disabled if MSI 3745 * is enabled. 3746 */ 3747 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); 3748 #else 3749 3750 /* 3751 * Enable the Tx traffic interrupts only if the TTI feature is 3752 * enabled. 3753 */ 3754 val64 = 0; 3755 if (hldev->config.tti.enabled) { 3756 val64 = XGE_HAL_TX_TRAFFIC_INTR; 3757 } 3758 3759 val64 |= XGE_HAL_RX_TRAFFIC_INTR | 3760 XGE_HAL_TX_PIC_INTR | 3761 XGE_HAL_MC_INTR | 3762 (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ? 3763 XGE_HAL_SCHED_INTR : 0); 3764 __hal_device_intr_mgmt(hldev, val64, 1); 3765 3766 #endif 3767 xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled"); 3768 } 3769 3770 3771 /** 3772 * xge_hal_device_intr_disable - Disable Xframe interrupts. 3773 * @hldev: HAL device handle. 3774 * @op: One of the xge_hal_device_intr_e enumerated values specifying 3775 * the type(s) of interrupts to disable. 3776 * 3777 * Disable Xframe interrupts. 3778 * 3779 * See also: xge_hal_device_intr_enable() 3780 */ 3781 void 3782 xge_hal_device_intr_disable(xge_hal_device_t *hldev) 3783 { 3784 xge_list_t *item; 3785 xge_hal_pci_bar0_t *bar0; 3786 u64 val64; 3787 3788 /* 3789 * Disable traffic only interrupts. 3790 * Tx traffic interrupts are used only if the TTI feature is 3791 * enabled. 3792 */ 3793 val64 = 0; 3794 if (hldev->config.tti.enabled) { 3795 val64 = XGE_HAL_TX_TRAFFIC_INTR; 3796 } 3797 3798 val64 |= XGE_HAL_RX_TRAFFIC_INTR | 3799 XGE_HAL_TX_PIC_INTR | 3800 XGE_HAL_MC_INTR | 3801 (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ? 3802 XGE_HAL_SCHED_INTR : 0); 3803 __hal_device_intr_mgmt(hldev, val64, 0); 3804 3805 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3806 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3807 0xFFFFFFFFFFFFFFFFULL, 3808 &bar0->general_int_mask); 3809 3810 3811 /* disable all configured PRCs */ 3812 xge_list_for_each(item, &hldev->ring_channels) { 3813 xge_hal_channel_h channel; 3814 channel = xge_container_of(item, xge_hal_channel_t, item); 3815 __hal_ring_prc_disable(channel); 3816 } 3817 3818 xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled"); 3819 } 3820 3821 3822 /** 3823 * xge_hal_device_mcast_enable - Enable Xframe multicast addresses. 3824 * @hldev: HAL device handle. 3825 * 3826 * Enable Xframe multicast addresses. 3827 * Returns: XGE_HAL_OK on success. 3828 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to enable mcast 3829 * feature within the time(timeout). 3830 * 3831 * See also: xge_hal_device_mcast_disable(), xge_hal_status_e{}. 3832 */ 3833 xge_hal_status_e 3834 xge_hal_device_mcast_enable(xge_hal_device_t *hldev) 3835 { 3836 u64 val64; 3837 xge_hal_pci_bar0_t *bar0; 3838 3839 if (hldev == NULL) 3840 return XGE_HAL_ERR_INVALID_DEVICE; 3841 3842 if (hldev->mcast_refcnt) 3843 return XGE_HAL_OK; 3844 3845 hldev->mcast_refcnt = 1; 3846 3847 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3848 3849 /* Enable all Multicast addresses */ 3850 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3851 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0x010203040506ULL), 3852 &bar0->rmac_addr_data0_mem); 3853 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3854 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL), 3855 &bar0->rmac_addr_data1_mem); 3856 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 3857 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 3858 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET( 3859 XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET); 3860 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 3861 &bar0->rmac_addr_cmd_mem); 3862 3863 if (__hal_device_register_poll(hldev, 3864 &bar0->rmac_addr_cmd_mem, 0, 3865 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 3866 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 3867 /* upper layer may require to repeat */ 3868 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 3869 } 3870 3871 return XGE_HAL_OK; 3872 } 3873 3874 /** 3875 * xge_hal_device_mcast_disable - Disable Xframe multicast addresses. 3876 * @hldev: HAL device handle. 3877 * 3878 * Disable Xframe multicast addresses. 3879 * Returns: XGE_HAL_OK - success. 3880 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to disable mcast 3881 * feature within the time(timeout). 3882 * 3883 * See also: xge_hal_device_mcast_enable(), xge_hal_status_e{}. 3884 */ 3885 xge_hal_status_e 3886 xge_hal_device_mcast_disable(xge_hal_device_t *hldev) 3887 { 3888 u64 val64; 3889 xge_hal_pci_bar0_t *bar0; 3890 3891 if (hldev == NULL) 3892 return XGE_HAL_ERR_INVALID_DEVICE; 3893 3894 if (hldev->mcast_refcnt == 0) 3895 return XGE_HAL_OK; 3896 3897 hldev->mcast_refcnt = 0; 3898 3899 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3900 3901 /* Disable all Multicast addresses */ 3902 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3903 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL), 3904 &bar0->rmac_addr_data0_mem); 3905 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3906 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0), 3907 &bar0->rmac_addr_data1_mem); 3908 3909 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 3910 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 3911 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET( 3912 XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET); 3913 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 3914 &bar0->rmac_addr_cmd_mem); 3915 3916 if (__hal_device_register_poll(hldev, 3917 &bar0->rmac_addr_cmd_mem, 0, 3918 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 3919 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 3920 /* upper layer may require to repeat */ 3921 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 3922 } 3923 3924 return XGE_HAL_OK; 3925 } 3926 3927 /** 3928 * xge_hal_device_promisc_enable - Enable promiscuous mode. 3929 * @hldev: HAL device handle. 3930 * 3931 * Enable promiscuous mode of Xframe operation. 3932 * 3933 * See also: xge_hal_device_promisc_disable(). 3934 */ 3935 void 3936 xge_hal_device_promisc_enable(xge_hal_device_t *hldev) 3937 { 3938 u64 val64; 3939 xge_hal_pci_bar0_t *bar0; 3940 3941 xge_assert(hldev); 3942 3943 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3944 3945 if (!hldev->is_promisc) { 3946 /* Put the NIC into promiscuous mode */ 3947 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3948 &bar0->mac_cfg); 3949 val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; 3950 3951 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3952 XGE_HAL_RMAC_CFG_KEY(0x4C0D), 3953 &bar0->rmac_cfg_key); 3954 3955 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 3956 (u32)(val64 >> 32), 3957 &bar0->mac_cfg); 3958 3959 hldev->is_promisc = 1; 3960 xge_debug_device(XGE_TRACE, 3961 "mac_cfg 0x%llx: promisc enabled", 3962 (unsigned long long)val64); 3963 } 3964 } 3965 3966 /** 3967 * xge_hal_device_promisc_disable - Disable promiscuous mode. 3968 * @hldev: HAL device handle. 3969 * 3970 * Disable promiscuous mode of Xframe operation. 3971 * 3972 * See also: xge_hal_device_promisc_enable(). 3973 */ 3974 void 3975 xge_hal_device_promisc_disable(xge_hal_device_t *hldev) 3976 { 3977 u64 val64; 3978 xge_hal_pci_bar0_t *bar0; 3979 3980 xge_assert(hldev); 3981 3982 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3983 3984 if (hldev->is_promisc) { 3985 /* Remove the NIC from promiscuous mode */ 3986 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3987 &bar0->mac_cfg); 3988 val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; 3989 3990 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3991 XGE_HAL_RMAC_CFG_KEY(0x4C0D), 3992 &bar0->rmac_cfg_key); 3993 3994 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 3995 (u32)(val64 >> 32), 3996 &bar0->mac_cfg); 3997 3998 hldev->is_promisc = 0; 3999 xge_debug_device(XGE_TRACE, 4000 "mac_cfg 0x%llx: promisc disabled", 4001 (unsigned long long)val64); 4002 } 4003 } 4004 4005 /** 4006 * xge_hal_device_macaddr_get - Get MAC addresses. 4007 * @hldev: HAL device handle. 4008 * @index: MAC address index, in the range from 0 to 4009 * XGE_HAL_MAX_MAC_ADDRESSES. 4010 * @macaddr: MAC address. Returned by HAL. 4011 * 4012 * Retrieve one of the stored MAC addresses by reading non-volatile 4013 * memory on the chip. 4014 * 4015 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported. 4016 * 4017 * Returns: XGE_HAL_OK - success. 4018 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac 4019 * address within the time(timeout). 4020 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. 4021 * 4022 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}. 4023 */ 4024 xge_hal_status_e 4025 xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index, 4026 macaddr_t *macaddr) 4027 { 4028 xge_hal_pci_bar0_t *bar0 = 4029 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4030 u64 val64; 4031 int i; 4032 4033 if (hldev == NULL) { 4034 return XGE_HAL_ERR_INVALID_DEVICE; 4035 } 4036 4037 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) { 4038 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 4039 } 4040 4041 #ifdef XGE_HAL_HERC_EMULATION 4042 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000010000000000, 4043 &bar0->rmac_addr_data0_mem); 4044 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000, 4045 &bar0->rmac_addr_data1_mem); 4046 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD | 4047 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4048 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)); 4049 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4050 &bar0->rmac_addr_cmd_mem); 4051 4052 /* poll until done */ 4053 __hal_device_register_poll(hldev, 4054 &bar0->rmac_addr_cmd_mem, 0, 4055 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD, 4056 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS); 4057 4058 #endif 4059 4060 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD | 4061 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4062 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); 4063 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4064 &bar0->rmac_addr_cmd_mem); 4065 4066 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, 4067 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4068 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4069 /* upper layer may require to repeat */ 4070 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4071 } 4072 4073 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4074 &bar0->rmac_addr_data0_mem); 4075 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4076 (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8))); 4077 } 4078 4079 #ifdef XGE_HAL_HERC_EMULATION 4080 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4081 (*macaddr)[i] = (u8)0; 4082 } 4083 (*macaddr)[1] = (u8)1; 4084 4085 #endif 4086 4087 return XGE_HAL_OK; 4088 } 4089 4090 /** 4091 * xge_hal_device_macaddr_set - Set MAC address. 4092 * @hldev: HAL device handle. 4093 * @index: MAC address index, in the range from 0 to 4094 * XGE_HAL_MAX_MAC_ADDRESSES. 4095 * @macaddr: New MAC address to configure. 4096 * 4097 * Configure one of the available MAC address "slots". 4098 * 4099 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported. 4100 * 4101 * Returns: XGE_HAL_OK - success. 4102 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac 4103 * address within the time(timeout). 4104 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. 4105 * 4106 * See also: xge_hal_device_macaddr_get(), xge_hal_status_e{}. 4107 */ 4108 xge_hal_status_e 4109 xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index, 4110 macaddr_t macaddr) 4111 { 4112 xge_hal_pci_bar0_t *bar0 = 4113 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4114 u64 val64, temp64; 4115 int i; 4116 4117 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) 4118 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 4119 4120 temp64 = 0; 4121 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4122 temp64 |= macaddr[i]; 4123 temp64 <<= 8; 4124 } 4125 temp64 >>= 8; 4126 4127 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4128 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64), 4129 &bar0->rmac_addr_data0_mem); 4130 4131 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4132 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL), 4133 &bar0->rmac_addr_data1_mem); 4134 4135 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 4136 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4137 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); 4138 4139 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4140 &bar0->rmac_addr_cmd_mem); 4141 4142 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, 4143 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4144 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4145 /* upper layer may require to repeat */ 4146 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4147 } 4148 4149 return XGE_HAL_OK; 4150 } 4151 4152 /** 4153 * xge_hal_device_macaddr_find - Finds index in the rmac table. 4154 * @hldev: HAL device handle. 4155 * @wanted: Wanted MAC address. 4156 * 4157 * See also: xge_hal_device_macaddr_set(). 4158 */ 4159 int 4160 xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted) 4161 { 4162 int i; 4163 4164 if (hldev == NULL) { 4165 return XGE_HAL_ERR_INVALID_DEVICE; 4166 } 4167 4168 for (i=1; i<XGE_HAL_MAX_MAC_ADDRESSES; i++) { 4169 macaddr_t macaddr; 4170 (void) xge_hal_device_macaddr_get(hldev, i, &macaddr); 4171 if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) { 4172 return i; 4173 } 4174 } 4175 4176 return -1; 4177 } 4178 4179 /** 4180 * xge_hal_device_mtu_set - Set MTU. 4181 * @hldev: HAL device handle. 4182 * @new_mtu: New MTU size to configure. 4183 * 4184 * Set new MTU value. Example, to use jumbo frames: 4185 * xge_hal_device_mtu_set(my_device, my_channel, 9600); 4186 * 4187 * Returns: XGE_HAL_OK on success. 4188 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control 4189 * register. 4190 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to initialize TTI/RTI 4191 * schemes. 4192 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to 4193 * a "quiescent" state. 4194 */ 4195 xge_hal_status_e 4196 xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu) 4197 { 4198 xge_hal_status_e status; 4199 4200 /* 4201 * reset needed if 1) new MTU differs, and 4202 * 2a) device was closed or 4203 * 2b) device is being upped for first time. 4204 */ 4205 if (hldev->config.mtu != new_mtu) { 4206 if (hldev->reset_needed_after_close || 4207 !hldev->mtu_first_time_set) { 4208 status = xge_hal_device_reset(hldev); 4209 if (status != XGE_HAL_OK) { 4210 xge_debug_device(XGE_TRACE, "%s", 4211 "fatal: can not reset the device"); 4212 return status; 4213 } 4214 } 4215 /* store the new MTU in device, reset will use it */ 4216 hldev->config.mtu = new_mtu; 4217 xge_debug_device(XGE_TRACE, "new MTU %d applied", 4218 new_mtu); 4219 } 4220 4221 if (!hldev->mtu_first_time_set) 4222 hldev->mtu_first_time_set = 1; 4223 4224 return XGE_HAL_OK; 4225 } 4226 4227 /** 4228 * xge_hal_device_initialize - Initialize Xframe device. 4229 * @hldev: HAL device handle. 4230 * @attr: pointer to xge_hal_device_attr_t structure 4231 * @device_config: Configuration to be _applied_ to the device, 4232 * For the Xframe configuration "knobs" please 4233 * refer to xge_hal_device_config_t and Xframe 4234 * User Guide. 4235 * 4236 * Initialize Xframe device. Note that all the arguments of this public API 4237 * are 'IN', including @hldev. Upper-layer driver (ULD) cooperates with 4238 * OS to find new Xframe device, locate its PCI and memory spaces. 4239 * 4240 * When done, the ULD allocates sizeof(xge_hal_device_t) bytes for HAL 4241 * to enable the latter to perform Xframe hardware initialization. 4242 * 4243 * Returns: XGE_HAL_OK - success. 4244 * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - Driver is not initialized. 4245 * XGE_HAL_ERR_BAD_DEVICE_CONFIG - Device configuration params are not 4246 * valid. 4247 * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed. 4248 * XGE_HAL_ERR_BAD_SUBSYSTEM_ID - Device subsystem id is invalid. 4249 * XGE_HAL_ERR_INVALID_MAC_ADDRESS - Device mac address in not valid. 4250 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac 4251 * address within the time(timeout) or TTI/RTI initialization failed. 4252 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control. 4253 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT -Device is not queiscent. 4254 * 4255 * See also: xge_hal_device_terminate(), xge_hal_status_e{} 4256 * xge_hal_device_attr_t{}. 4257 */ 4258 xge_hal_status_e 4259 xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, 4260 xge_hal_device_config_t *device_config) 4261 { 4262 int i; 4263 xge_hal_status_e status; 4264 xge_hal_channel_t *channel; 4265 u16 subsys_device; 4266 u16 subsys_vendor; 4267 int total_dram_size, ring_auto_dram_cfg, left_dram_size; 4268 int total_dram_size_max = 0; 4269 4270 xge_debug_device(XGE_TRACE, "device 0x%llx is initializing", 4271 (unsigned long long)(ulong_t)hldev); 4272 4273 /* sanity check */ 4274 if (g_xge_hal_driver == NULL || 4275 !g_xge_hal_driver->is_initialized) { 4276 return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED; 4277 } 4278 4279 xge_os_memzero(hldev, sizeof(xge_hal_device_t)); 4280 4281 /* 4282 * validate a common part of Xframe-I/II configuration 4283 * (and run check_card() later, once PCI inited - see below) 4284 */ 4285 status = __hal_device_config_check_common(device_config); 4286 if (status != XGE_HAL_OK) 4287 return status; 4288 4289 /* apply config */ 4290 xge_os_memcpy(&hldev->config, device_config, 4291 sizeof(xge_hal_device_config_t)); 4292 4293 /* save original attr */ 4294 xge_os_memcpy(&hldev->orig_attr, attr, 4295 sizeof(xge_hal_device_attr_t)); 4296 4297 /* initialize rxufca_intr_thres */ 4298 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; 4299 4300 hldev->regh0 = attr->regh0; 4301 hldev->regh1 = attr->regh1; 4302 hldev->regh2 = attr->regh2; 4303 hldev->isrbar0 = hldev->bar0 = attr->bar0; 4304 hldev->bar1 = attr->bar1; 4305 hldev->bar2 = attr->bar2; 4306 hldev->pdev = attr->pdev; 4307 hldev->irqh = attr->irqh; 4308 hldev->cfgh = attr->cfgh; 4309 4310 hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh, 4311 g_xge_hal_driver->config.queue_size_initial, 4312 g_xge_hal_driver->config.queue_size_max, 4313 __hal_device_event_queued, hldev); 4314 if (hldev->queueh == NULL) 4315 return XGE_HAL_ERR_OUT_OF_MEMORY; 4316 4317 hldev->magic = XGE_HAL_MAGIC; 4318 4319 xge_assert(hldev->regh0); 4320 xge_assert(hldev->regh1); 4321 xge_assert(hldev->bar0); 4322 xge_assert(hldev->bar1); 4323 xge_assert(hldev->pdev); 4324 xge_assert(hldev->irqh); 4325 xge_assert(hldev->cfgh); 4326 4327 /* initialize some PCI/PCI-X fields of this PCI device. */ 4328 __hal_device_pci_init(hldev); 4329 4330 /* 4331 * initlialize lists to properly handling a potential 4332 * terminate request 4333 */ 4334 xge_list_init(&hldev->free_channels); 4335 xge_list_init(&hldev->fifo_channels); 4336 xge_list_init(&hldev->ring_channels); 4337 4338 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 4339 /* fixups for xena */ 4340 hldev->config.rth_en = 0; 4341 hldev->config.rth_spdm_en = 0; 4342 hldev->config.rts_mac_en = 0; 4343 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA; 4344 4345 status = __hal_device_config_check_xena(device_config); 4346 if (status != XGE_HAL_OK) { 4347 xge_hal_device_terminate(hldev); 4348 return status; 4349 } 4350 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 4351 /* fixups for herc */ 4352 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC; 4353 status = __hal_device_config_check_herc(device_config); 4354 if (status != XGE_HAL_OK) { 4355 xge_hal_device_terminate(hldev); 4356 return status; 4357 } 4358 } else { 4359 xge_debug_device(XGE_ERR, 4360 "detected unknown device_id 0x%x", hldev->device_id); 4361 xge_hal_device_terminate(hldev); 4362 return XGE_HAL_ERR_BAD_DEVICE_ID; 4363 } 4364 4365 /* allocate and initialize FIFO types of channels according to 4366 * configuration */ 4367 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { 4368 if (!device_config->fifo.queue[i].configured) 4369 continue; 4370 4371 channel = __hal_channel_allocate(hldev, i, 4372 XGE_HAL_CHANNEL_TYPE_FIFO); 4373 if (channel == NULL) { 4374 xge_debug_device(XGE_ERR, 4375 "fifo: __hal_channel_allocate failed"); 4376 xge_hal_device_terminate(hldev); 4377 return XGE_HAL_ERR_OUT_OF_MEMORY; 4378 } 4379 /* add new channel to the device */ 4380 xge_list_insert(&channel->item, &hldev->free_channels); 4381 } 4382 4383 /* 4384 * automatic DRAM adjustment 4385 */ 4386 total_dram_size = 0; 4387 ring_auto_dram_cfg = 0; 4388 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 4389 if (!device_config->ring.queue[i].configured) 4390 continue; 4391 if (device_config->ring.queue[i].dram_size_mb == 4392 XGE_HAL_DEFAULT_USE_HARDCODE) { 4393 ring_auto_dram_cfg++; 4394 continue; 4395 } 4396 total_dram_size += device_config->ring.queue[i].dram_size_mb; 4397 } 4398 left_dram_size = total_dram_size_max - total_dram_size; 4399 if (left_dram_size < 0 || 4400 (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0)) { 4401 xge_debug_device(XGE_ERR, 4402 "ring config: exceeded DRAM size %d MB", 4403 total_dram_size_max); 4404 xge_hal_device_terminate(hldev); 4405 return XGE_HAL_BADCFG_RING_QUEUE_SIZE; 4406 } 4407 4408 /* 4409 * allocate and initialize RING types of channels according to 4410 * configuration 4411 */ 4412 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 4413 if (!device_config->ring.queue[i].configured) 4414 continue; 4415 4416 if (device_config->ring.queue[i].dram_size_mb == 4417 XGE_HAL_DEFAULT_USE_HARDCODE) { 4418 hldev->config.ring.queue[i].dram_size_mb = 4419 device_config->ring.queue[i].dram_size_mb = 4420 left_dram_size / ring_auto_dram_cfg; 4421 } 4422 4423 channel = __hal_channel_allocate(hldev, i, 4424 XGE_HAL_CHANNEL_TYPE_RING); 4425 if (channel == NULL) { 4426 xge_debug_device(XGE_ERR, 4427 "ring: __hal_channel_allocate failed"); 4428 xge_hal_device_terminate(hldev); 4429 return XGE_HAL_ERR_OUT_OF_MEMORY; 4430 } 4431 /* add new channel to the device */ 4432 xge_list_insert(&channel->item, &hldev->free_channels); 4433 } 4434 4435 /* get subsystem IDs */ 4436 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 4437 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), 4438 &subsys_device); 4439 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 4440 xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id), 4441 &subsys_vendor); 4442 xge_debug_device(XGE_TRACE, 4443 "subsystem_id %04x:%04x", 4444 subsys_vendor, subsys_device); 4445 4446 /* reset device initially */ 4447 (void) __hal_device_reset(hldev); 4448 4449 /* set host endian before, to assure proper action */ 4450 status = __hal_device_set_swapper(hldev); 4451 if (status != XGE_HAL_OK) { 4452 xge_debug_device(XGE_ERR, 4453 "__hal_device_set_swapper failed"); 4454 xge_hal_device_terminate(hldev); 4455 (void) __hal_device_reset(hldev); 4456 return status; 4457 } 4458 4459 #ifndef XGE_HAL_HERC_EMULATION 4460 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 4461 __hal_device_xena_fix_mac(hldev); 4462 #endif 4463 4464 /* MAC address initialization. 4465 * For now only one mac address will be read and used. */ 4466 status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]); 4467 if (status != XGE_HAL_OK) { 4468 xge_debug_device(XGE_ERR, 4469 "xge_hal_device_macaddr_get failed"); 4470 xge_hal_device_terminate(hldev); 4471 return status; 4472 } 4473 4474 if (hldev->macaddr[0][0] == 0xFF && 4475 hldev->macaddr[0][1] == 0xFF && 4476 hldev->macaddr[0][2] == 0xFF && 4477 hldev->macaddr[0][3] == 0xFF && 4478 hldev->macaddr[0][4] == 0xFF && 4479 hldev->macaddr[0][5] == 0xFF) { 4480 xge_debug_device(XGE_ERR, 4481 "xge_hal_device_macaddr_get returns all FFs"); 4482 xge_hal_device_terminate(hldev); 4483 return XGE_HAL_ERR_INVALID_MAC_ADDRESS; 4484 } 4485 4486 xge_debug_device(XGE_TRACE, 4487 "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x", 4488 hldev->macaddr[0][0], hldev->macaddr[0][1], 4489 hldev->macaddr[0][2], hldev->macaddr[0][3], 4490 hldev->macaddr[0][4], hldev->macaddr[0][5]); 4491 4492 status = __hal_stats_initialize(&hldev->stats, hldev); 4493 if (status != XGE_HAL_OK) { 4494 xge_debug_device(XGE_ERR, 4495 "__hal_stats_initialize failed"); 4496 xge_hal_device_terminate(hldev); 4497 return status; 4498 } 4499 4500 status = __hal_device_hw_initialize(hldev); 4501 if (status != XGE_HAL_OK) { 4502 xge_debug_device(XGE_ERR, 4503 "__hal_device_hw_initialize failed"); 4504 xge_hal_device_terminate(hldev); 4505 return status; 4506 } 4507 4508 hldev->dump_buf = xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE); 4509 if (hldev->dump_buf == NULL) { 4510 xge_debug_device(XGE_ERR, 4511 "__hal_device_hw_initialize failed"); 4512 xge_hal_device_terminate(hldev); 4513 return XGE_HAL_ERR_OUT_OF_MEMORY; 4514 } 4515 4516 4517 /* Xena-only: need to serialize fifo posts across all device fifos */ 4518 #if defined(XGE_HAL_TX_MULTI_POST) 4519 xge_os_spin_lock_init(&hldev->xena_post_lock, hldev->pdev); 4520 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 4521 xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh); 4522 #endif 4523 4524 hldev->is_initialized = 1; 4525 4526 return XGE_HAL_OK; 4527 } 4528 4529 /** 4530 * xge_hal_device_terminating - Mark the device as 'terminating'. 4531 * @devh: HAL device handle. 4532 * 4533 * Mark the device as 'terminating', going to terminate. Can be used 4534 * to serialize termination with other running processes/contexts. 4535 * 4536 * See also: xge_hal_device_terminate(). 4537 */ 4538 void 4539 xge_hal_device_terminating(xge_hal_device_h devh) 4540 { 4541 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 4542 hldev->terminating = 1; 4543 } 4544 4545 /** 4546 * xge_hal_device_terminate - Terminate Xframe device. 4547 * @hldev: HAL device handle. 4548 * 4549 * Terminate HAL device. 4550 * 4551 * See also: xge_hal_device_initialize(). 4552 */ 4553 void 4554 xge_hal_device_terminate(xge_hal_device_t *hldev) 4555 { 4556 xge_assert(g_xge_hal_driver != NULL); 4557 xge_assert(hldev != NULL); 4558 xge_assert(hldev->magic == XGE_HAL_MAGIC); 4559 4560 xge_queue_flush(hldev->queueh); 4561 4562 hldev->terminating = 1; 4563 hldev->is_initialized = 0; 4564 hldev->in_poll = 0; 4565 hldev->magic = XGE_HAL_DEAD; 4566 4567 #if defined(XGE_HAL_TX_MULTI_POST) 4568 xge_os_spin_lock_destroy(&hldev->xena_post_lock, hldev->pdev); 4569 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 4570 xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev); 4571 #endif 4572 4573 xge_debug_device(XGE_TRACE, "device %llx is terminating", 4574 (unsigned long long)(ulong_t)hldev); 4575 4576 xge_assert(xge_list_is_empty(&hldev->fifo_channels)); 4577 xge_assert(xge_list_is_empty(&hldev->ring_channels)); 4578 4579 if (hldev->stats.is_initialized) { 4580 __hal_stats_terminate(&hldev->stats); 4581 } 4582 4583 /* close if open and free all channels */ 4584 while (!xge_list_is_empty(&hldev->free_channels)) { 4585 xge_hal_channel_t *channel = (xge_hal_channel_t*) 4586 hldev->free_channels.next; 4587 4588 xge_assert(!channel->is_open); 4589 xge_list_remove(&channel->item); 4590 __hal_channel_free(channel); 4591 } 4592 4593 if (hldev->queueh) { 4594 xge_queue_destroy(hldev->queueh); 4595 } 4596 4597 if (hldev->spdm_table) { 4598 xge_os_free(hldev->pdev, 4599 hldev->spdm_table[0], 4600 (sizeof(xge_hal_spdm_entry_t) * 4601 hldev->spdm_max_entries)); 4602 xge_os_free(hldev->pdev, 4603 hldev->spdm_table, 4604 (sizeof(xge_hal_spdm_entry_t *) * 4605 hldev->spdm_max_entries)); 4606 xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev); 4607 hldev->spdm_table = NULL; 4608 } 4609 4610 if (hldev->dump_buf) { 4611 xge_os_free(hldev->pdev, hldev->dump_buf, 4612 XGE_HAL_DUMP_BUF_SIZE); 4613 hldev->dump_buf = NULL; 4614 } 4615 } 4616 4617 /** 4618 * xge_hal_device_handle_tcode - Handle transfer code. 4619 * @channelh: Channel handle. 4620 * @dtrh: Descriptor handle. 4621 * @t_code: One of the enumerated (and documented in the Xframe user guide) 4622 * "transfer codes". 4623 * 4624 * Handle descriptor's transfer code. The latter comes with each completed 4625 * descriptor, see xge_hal_fifo_dtr_next_completed() and 4626 * xge_hal_ring_dtr_next_completed(). 4627 * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h. 4628 * 4629 * Returns: one of the xge_hal_status_e{} enumerated types. 4630 * XGE_HAL_OK - for success. 4631 * XGE_HAL_ERR_CRITICAL - when encounters critical error. 4632 */ 4633 xge_hal_status_e 4634 xge_hal_device_handle_tcode (xge_hal_channel_h channelh, 4635 xge_hal_dtr_h dtrh, u8 t_code) 4636 { 4637 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 4638 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh; 4639 4640 if (t_code > 15) { 4641 xge_os_printf("invalid t_code %d", t_code); 4642 return XGE_HAL_OK; 4643 } 4644 4645 if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { 4646 hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++; 4647 4648 #if defined(XGE_HAL_DEBUG_BAD_TCODE) 4649 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 4650 xge_os_printf("%llx:%llx:%llx:%llx", 4651 txdp->control_1, txdp->control_2, txdp->buffer_pointer, 4652 txdp->host_control); 4653 #endif 4654 4655 /* handle link "down" immediately without going through 4656 * xge_hal_device_poll() routine. */ 4657 if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) { 4658 /* link is down */ 4659 if (hldev->link_state != XGE_HAL_LINK_DOWN) { 4660 xge_hal_pci_bar0_t *bar0 = 4661 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4662 u64 val64; 4663 4664 hldev->link_state = XGE_HAL_LINK_DOWN; 4665 4666 val64 = xge_os_pio_mem_read64(hldev->pdev, 4667 hldev->regh0, &bar0->adapter_control); 4668 4669 /* turn off LED */ 4670 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 4671 xge_os_pio_mem_write64(hldev->pdev, 4672 hldev->regh0, val64, 4673 &bar0->adapter_control); 4674 4675 g_xge_hal_driver->uld_callbacks.link_down( 4676 hldev->upper_layer_info); 4677 } 4678 } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER || 4679 t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) { 4680 __hal_device_handle_targetabort(hldev); 4681 return XGE_HAL_ERR_CRITICAL; 4682 } 4683 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 4684 hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++; 4685 4686 #if defined(XGE_HAL_DEBUG_BAD_TCODE) 4687 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 4688 xge_os_printf("%llx:%llx:%llx:%llx", rxdp->control_1, 4689 rxdp->control_2, rxdp->buffer0_ptr, 4690 rxdp->host_control); 4691 #endif 4692 if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) { 4693 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 4694 __hal_device_handle_eccerr(hldev, "rxd_t_code", 4695 (u64)t_code); 4696 return XGE_HAL_ERR_CRITICAL; 4697 } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY || 4698 t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) { 4699 hldev->stats.sw_dev_err_stats.parity_err_cnt++; 4700 __hal_device_handle_parityerr(hldev, "rxd_t_code", 4701 (u64)t_code); 4702 return XGE_HAL_ERR_CRITICAL; 4703 } 4704 } 4705 return XGE_HAL_OK; 4706 } 4707 4708 /** 4709 * xge_hal_device_link_state - Get link state. 4710 * @devh: HAL device handle. 4711 * @ls: Link state, see xge_hal_device_link_state_e{}. 4712 * 4713 * Get link state. 4714 * Returns: XGE_HAL_OK. 4715 * See also: xge_hal_device_link_state_e{}. 4716 */ 4717 xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh, 4718 xge_hal_device_link_state_e *ls) 4719 { 4720 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 4721 4722 xge_assert(ls != NULL); 4723 *ls = hldev->link_state; 4724 return XGE_HAL_OK; 4725 } 4726 4727 /** 4728 * xge_hal_device_sched_timer - Configure scheduled device interrupt. 4729 * @devh: HAL device handle. 4730 * @interval_us: Time interval, in miscoseconds. 4731 * Unlike transmit and receive interrupts, 4732 * the scheduled interrupt is generated independently of 4733 * traffic, but purely based on time. 4734 * @one_shot: 1 - generate scheduled interrupt only once. 4735 * 0 - generate scheduled interrupt periodically at the specified 4736 * @interval_us interval. 4737 * 4738 * (Re-)configure scheduled interrupt. Can be called at runtime to change 4739 * the setting, generate one-shot interrupts based on the resource and/or 4740 * traffic conditions, other purposes. 4741 * See also: xge_hal_device_config_t{}. 4742 */ 4743 void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us, 4744 int one_shot) 4745 { 4746 u64 val64; 4747 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 4748 xge_hal_pci_bar0_t *bar0 = 4749 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4750 unsigned int interval = hldev->config.pci_freq_mherz * interval_us; 4751 4752 interval = __hal_fix_time_ival_herc(hldev, interval); 4753 4754 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4755 &bar0->scheduled_int_ctrl); 4756 if (interval) { 4757 val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK; 4758 val64 |= XGE_HAL_SCHED_INT_PERIOD(interval); 4759 if (one_shot) { 4760 val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT; 4761 } 4762 val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN; 4763 } else { 4764 val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN; 4765 } 4766 4767 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4768 val64, &bar0->scheduled_int_ctrl); 4769 4770 xge_debug_device(XGE_TRACE, "sched_timer 0x%llx: %s", 4771 (unsigned long long)val64, 4772 interval ? "enabled" : "disabled"); 4773 } 4774 4775 /** 4776 * xge_hal_device_check_id - Verify device ID. 4777 * @devh: HAL device handle. 4778 * 4779 * Verify device ID. 4780 * Returns: one of the xge_hal_card_e{} enumerated types. 4781 * See also: xge_hal_card_e{}. 4782 */ 4783 xge_hal_card_e 4784 xge_hal_device_check_id(xge_hal_device_h devh) 4785 { 4786 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 4787 switch (hldev->device_id) { 4788 case XGE_PCI_DEVICE_ID_XENA_1: 4789 case XGE_PCI_DEVICE_ID_XENA_2: 4790 return XGE_HAL_CARD_XENA; 4791 case XGE_PCI_DEVICE_ID_HERC_1: 4792 case XGE_PCI_DEVICE_ID_HERC_2: 4793 return XGE_HAL_CARD_HERC; 4794 default: 4795 return XGE_HAL_CARD_UNKNOWN; 4796 } 4797 } 4798 4799 /** 4800 * xge_hal_device_pci_info_get - Get PCI bus informations such as width, 4801 * frequency, and mode from previously stored values. 4802 * @devh: HAL device handle. 4803 * @pci_mode: pointer to a variable of enumerated type 4804 * xge_hal_pci_mode_e{}. 4805 * @bus_frequency: pointer to a variable of enumerated type 4806 * xge_hal_pci_bus_frequency_e{}. 4807 * @bus_width: pointer to a variable of enumerated type 4808 * xge_hal_pci_bus_width_e{}. 4809 * 4810 * Get pci mode, frequency, and PCI bus width. 4811 * Returns: one of the xge_hal_status_e{} enumerated types. 4812 * XGE_HAL_OK - for success. 4813 * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle. 4814 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. 4815 */ 4816 xge_hal_status_e 4817 xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, 4818 xge_hal_pci_bus_frequency_e *bus_frequency, 4819 xge_hal_pci_bus_width_e *bus_width) 4820 { 4821 xge_hal_status_e rc_status; 4822 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 4823 4824 if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) { 4825 rc_status = XGE_HAL_ERR_INVALID_DEVICE; 4826 xge_debug_device(XGE_ERR, 4827 "xge_hal_device_pci_info_get error, rc %d for device %p", 4828 rc_status, hldev); 4829 4830 return rc_status; 4831 } 4832 4833 *pci_mode = hldev->pci_mode; 4834 *bus_frequency = hldev->bus_frequency; 4835 *bus_width = hldev->bus_width; 4836 rc_status = XGE_HAL_OK; 4837 return rc_status; 4838 } 4839 4840 /** 4841 * xge_hal_reinitialize_hw 4842 * @hldev: private member of the device structure. 4843 * 4844 * This function will soft reset the NIC and re-initalize all the 4845 * I/O registers to the values they had after it's inital initialization 4846 * through the probe function. 4847 */ 4848 int xge_hal_reinitialize_hw(xge_hal_device_t * hldev) 4849 { 4850 (void) xge_hal_device_reset(hldev); 4851 if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) { 4852 xge_hal_device_terminate(hldev); 4853 (void) __hal_device_reset(hldev); 4854 return 1; 4855 } 4856 return 0; 4857 } 4858 4859 4860 /* 4861 * __hal_read_spdm_entry_line 4862 * @hldev: pointer to xge_hal_device_t structure 4863 * @spdm_line: spdm line in the spdm entry to be read. 4864 * @spdm_entry: spdm entry of the spdm_line in the SPDM table. 4865 * @spdm_line_val: Contains the value stored in the spdm line. 4866 * 4867 * SPDM table contains upto a maximum of 256 spdm entries. 4868 * Each spdm entry contains 8 lines and each line stores 8 bytes. 4869 * This function reads the spdm line(addressed by @spdm_line) 4870 * of the spdm entry(addressed by @spdm_entry) in 4871 * the SPDM table. 4872 */ 4873 xge_hal_status_e 4874 __hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line, 4875 u16 spdm_entry, u64 *spdm_line_val) 4876 { 4877 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4878 u64 val64; 4879 4880 val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE | 4881 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) | 4882 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry); 4883 4884 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4885 &bar0->rts_rth_spdm_mem_ctrl); 4886 4887 /* poll until done */ 4888 if (__hal_device_register_poll(hldev, 4889 &bar0->rts_rth_spdm_mem_ctrl, 0, 4890 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE, 4891 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4892 4893 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4894 } 4895 4896 *spdm_line_val = xge_os_pio_mem_read64(hldev->pdev, 4897 hldev->regh0, &bar0->rts_rth_spdm_mem_data); 4898 return XGE_HAL_OK; 4899 } 4900 4901 4902 /* 4903 * __hal_get_free_spdm_entry 4904 * @hldev: pointer to xge_hal_device_t structure 4905 * @spdm_entry: Contains an index to the unused spdm entry in the SPDM table. 4906 * 4907 * This function returns an index of unused spdm entry in the SPDM 4908 * table. 4909 */ 4910 static xge_hal_status_e 4911 __hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry) 4912 { 4913 xge_hal_status_e status; 4914 u64 spdm_line_val=0; 4915 4916 /* 4917 * Search in the local SPDM table for a free slot. 4918 */ 4919 *spdm_entry = 0; 4920 for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) { 4921 if (hldev->spdm_table[*spdm_entry]->in_use) { 4922 break; 4923 } 4924 } 4925 4926 if (*spdm_entry >= hldev->spdm_max_entries) { 4927 return XGE_HAL_ERR_SPDM_TABLE_FULL; 4928 } 4929 4930 /* 4931 * Make sure that the corresponding spdm entry in the SPDM 4932 * table is free. 4933 * Seventh line of the spdm entry contains information about 4934 * whether the entry is free or not. 4935 */ 4936 if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry, 4937 &spdm_line_val)) != XGE_HAL_OK) { 4938 return status; 4939 } 4940 4941 /* BIT(63) in spdm_line 7 corresponds to entry_enable bit */ 4942 if ((spdm_line_val & BIT(63))) { 4943 /* 4944 * Log a warning 4945 */ 4946 xge_debug_device(XGE_ERR, "Local SPDM table is not " 4947 "consistent with the actual one for the spdm " 4948 "entry %d\n", *spdm_entry); 4949 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; 4950 } 4951 4952 return XGE_HAL_OK; 4953 } 4954 4955 4956 4957 /** 4958 * xge_hal_spdm_entry_add - Add a new entry to the SPDM table. 4959 * @devh: HAL device handle. 4960 * @src_ip: Source ip address(IPv4/IPv6). 4961 * @dst_ip: Destination ip address(IPv4/IPv6). 4962 * @l4_sp: L4 source port. 4963 * @l4_dp: L4 destination port. 4964 * @is_tcp: Set to 1, if the protocol is TCP. 4965 * 0, if the protocol is UDP. 4966 * @is_ipv4: Set to 1, if the protocol is IPv4. 4967 * 0, if the protocol is IPv6. 4968 * @tgt_queue: Target queue to route the receive packet. 4969 * 4970 * This function add a new entry to the SPDM table. 4971 * 4972 * Returns: XGE_HAL_OK - success. 4973 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. 4974 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in 4975 * the time(timeout). 4976 * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full. 4977 * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry. 4978 * 4979 * See also: xge_hal_spdm_entry_remove{}. 4980 */ 4981 xge_hal_status_e 4982 xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, 4983 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, 4984 u8 is_tcp, u8 is_ipv4, u8 tgt_queue) 4985 { 4986 4987 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 4988 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4989 u32 jhash_value; 4990 u32 jhash_init_val; 4991 u32 jhash_golden_ratio; 4992 u64 val64; 4993 int off; 4994 u16 spdm_entry; 4995 u8 msg[XGE_HAL_JHASH_MSG_LEN]; 4996 int ipaddr_len; 4997 xge_hal_status_e status; 4998 4999 5000 if (!hldev->config.rth_spdm_en) { 5001 return XGE_HAL_ERR_SPDM_NOT_ENABLED; 5002 } 5003 5004 if ((tgt_queue < XGE_HAL_MIN_RING_NUM) || 5005 (tgt_queue > XGE_HAL_MAX_RING_NUM)) { 5006 return XGE_HAL_ERR_SPDM_INVALID_ENTRY; 5007 } 5008 5009 5010 /* 5011 * Calculate the jenkins hash. 5012 */ 5013 /* 5014 * Create the Jenkins hash algorithm key. 5015 * key = {L3SA, L3DA, L4SP, L4DP}, if SPDM is configured to 5016 * use L4 information. Otherwize key = {L3SA, L3DA}. 5017 */ 5018 5019 if (is_ipv4) { 5020 ipaddr_len = 4; // In bytes 5021 } else { 5022 ipaddr_len = 16; 5023 } 5024 5025 /* 5026 * Jenkins hash algorithm expects the key in the big endian 5027 * format. Since key is the byte array, memcpy won't work in the 5028 * case of little endian. So, the current code extracts each 5029 * byte starting from MSB and store it in the key. 5030 */ 5031 if (is_ipv4) { 5032 for (off = 0; off < ipaddr_len; off++) { 5033 u32 mask = vBIT32(0xff,(off*8),8); 5034 int shift = 32-(off+1)*8; 5035 msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift); 5036 msg[off+ipaddr_len] = 5037 (u8)((dst_ip->ipv4.addr & mask) >> shift); 5038 } 5039 } else { 5040 for (off = 0; off < ipaddr_len; off++) { 5041 int loc = off % 8; 5042 u64 mask = vBIT(0xff,(loc*8),8); 5043 int shift = 64-(loc+1)*8; 5044 5045 msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask) 5046 >> shift); 5047 msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8] 5048 & mask) >> shift); 5049 } 5050 } 5051 5052 off = (2*ipaddr_len); 5053 5054 if (hldev->config.rth_spdm_use_l4) { 5055 msg[off] = (u8)((l4_sp & 0xff00) >> 8); 5056 msg[off + 1] = (u8)(l4_sp & 0xff); 5057 msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8); 5058 msg[off + 3] = (u8)(l4_dp & 0xff); 5059 off += 4; 5060 } 5061 5062 /* 5063 * Calculate jenkins hash for this configuration 5064 */ 5065 val64 = xge_os_pio_mem_read64(hldev->pdev, 5066 hldev->regh0, 5067 &bar0->rts_rth_jhash_cfg); 5068 jhash_golden_ratio = (u32)(val64 >> 32); 5069 jhash_init_val = (u32)(val64 & 0xffffffff); 5070 5071 jhash_value = __hal_calc_jhash(msg, off, 5072 jhash_golden_ratio, 5073 jhash_init_val); 5074 5075 xge_os_spin_lock(&hldev->spdm_lock); 5076 5077 /* 5078 * Locate a free slot in the SPDM table. To avoid a seach in the 5079 * actual SPDM table, which is very expensive in terms of time, 5080 * we are maintaining a local copy of the table and the search for 5081 * the free entry is performed in the local table. 5082 */ 5083 if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry)) 5084 != XGE_HAL_OK) { 5085 xge_os_spin_unlock(&hldev->spdm_lock); 5086 return status; 5087 } 5088 5089 /* 5090 * Add this entry to the SPDM table 5091 */ 5092 status = __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp, 5093 is_tcp, is_ipv4, tgt_queue, 5094 jhash_value, /* calculated jhash */ 5095 spdm_entry); 5096 5097 xge_os_spin_unlock(&hldev->spdm_lock); 5098 5099 return status; 5100 } 5101 5102 /** 5103 * xge_hal_spdm_entry_remove - Remove an entry from the SPDM table. 5104 * @devh: HAL device handle. 5105 * @src_ip: Source ip address(IPv4/IPv6). 5106 * @dst_ip: Destination ip address(IPv4/IPv6). 5107 * @l4_sp: L4 source port. 5108 * @l4_dp: L4 destination port. 5109 * @is_tcp: Set to 1, if the protocol is TCP. 5110 * 0, if the protocol os UDP. 5111 * @is_ipv4: Set to 1, if the protocol is IPv4. 5112 * 0, if the protocol is IPv6. 5113 * 5114 * This function remove an entry from the SPDM table. 5115 * 5116 * Returns: XGE_HAL_OK - success. 5117 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. 5118 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in 5119 * the time(timeout). 5120 * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM 5121 * table. 5122 * 5123 * See also: xge_hal_spdm_entry_add{}. 5124 */ 5125 xge_hal_status_e 5126 xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, 5127 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, 5128 u8 is_tcp, u8 is_ipv4) 5129 { 5130 5131 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5132 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5133 u64 val64; 5134 u16 spdm_entry; 5135 xge_hal_status_e status; 5136 u64 spdm_line_arr[8]; 5137 u8 line_no; 5138 u8 spdm_is_tcp; 5139 u8 spdm_is_ipv4; 5140 u16 spdm_l4_sp; 5141 u16 spdm_l4_dp; 5142 5143 if (!hldev->config.rth_spdm_en) { 5144 return XGE_HAL_ERR_SPDM_NOT_ENABLED; 5145 } 5146 5147 xge_os_spin_lock(&hldev->spdm_lock); 5148 5149 /* 5150 * Poll the rxpic_int_reg register until spdm ready bit is set or 5151 * timeout happens. 5152 */ 5153 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, 5154 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 5155 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 5156 5157 /* upper layer may require to repeat */ 5158 xge_os_spin_unlock(&hldev->spdm_lock); 5159 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 5160 } 5161 5162 /* 5163 * Clear the SPDM READY bit. 5164 */ 5165 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5166 &bar0->rxpic_int_reg); 5167 val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY; 5168 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5169 &bar0->rxpic_int_reg); 5170 5171 /* 5172 * Search in the local SPDM table to get the index of the 5173 * corresponding entry in the SPDM table. 5174 */ 5175 spdm_entry = 0; 5176 for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) { 5177 if ((!hldev->spdm_table[spdm_entry]->in_use) || 5178 (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) || 5179 (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) || 5180 (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) || 5181 (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) { 5182 continue; 5183 } 5184 5185 /* 5186 * Compare the src/dst IP addresses of source and target 5187 */ 5188 if (is_ipv4) { 5189 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr 5190 != src_ip->ipv4.addr) || 5191 (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr 5192 != dst_ip->ipv4.addr)) { 5193 continue; 5194 } 5195 } else { 5196 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0] 5197 != src_ip->ipv6.addr[0]) || 5198 (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1] 5199 != src_ip->ipv6.addr[1]) || 5200 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0] 5201 != dst_ip->ipv6.addr[0]) || 5202 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1] 5203 != dst_ip->ipv6.addr[1])) { 5204 continue; 5205 } 5206 } 5207 break; 5208 } 5209 5210 if (spdm_entry >= hldev->spdm_max_entries) { 5211 xge_os_spin_unlock(&hldev->spdm_lock); 5212 return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND; 5213 } 5214 5215 /* 5216 * Retrieve the corresponding entry from the SPDM table and 5217 * make sure that the data is consistent. 5218 */ 5219 for(line_no = 0; line_no < 8; line_no++) { 5220 5221 /* 5222 * SPDM line 2,3,4 are valid only for IPv6 entry. 5223 * SPDM line 5 & 6 are reserved. We don't have to 5224 * read these entries in the above cases. 5225 */ 5226 if (((is_ipv4) && 5227 ((line_no == 2)||(line_no == 3)||(line_no == 4))) || 5228 (line_no == 5) || 5229 (line_no == 6)) { 5230 continue; 5231 } 5232 5233 if ((status = __hal_read_spdm_entry_line( 5234 hldev, 5235 line_no, 5236 spdm_entry, 5237 &spdm_line_arr[line_no])) 5238 != XGE_HAL_OK) { 5239 xge_os_spin_unlock(&hldev->spdm_lock); 5240 return status; 5241 } 5242 } 5243 5244 /* 5245 * Seventh line of the spdm entry contains the entry_enable 5246 * bit. Make sure that the entry_enable bit of this spdm entry 5247 * is set. 5248 * To remove an entry from the SPDM table, reset this 5249 * bit. 5250 */ 5251 if (!(spdm_line_arr[7] & BIT(63))) { 5252 /* 5253 * Log a warning 5254 */ 5255 xge_debug_device(XGE_ERR, "Local SPDM table is not " 5256 "consistent with the actual one for the spdm " 5257 "entry %d \n", spdm_entry); 5258 goto err_exit; 5259 } 5260 5261 /* 5262 * Retreive the L4 SP/DP, src/dst ip addresses from the SPDM 5263 * table and do a comparision. 5264 */ 5265 spdm_is_tcp = (u8)((spdm_line_arr[0] & BIT(59)) >> 4); 5266 spdm_is_ipv4 = (u8)(spdm_line_arr[0] & BIT(63)); 5267 spdm_l4_sp = (u16)(spdm_line_arr[0] >> 48); 5268 spdm_l4_dp = (u16)((spdm_line_arr[0] >> 32) & 0xffff); 5269 5270 5271 if ((spdm_is_tcp != is_tcp) || 5272 (spdm_is_ipv4 != is_ipv4) || 5273 (spdm_l4_sp != l4_sp) || 5274 (spdm_l4_dp != l4_dp)) { 5275 /* 5276 * Log a warning 5277 */ 5278 xge_debug_device(XGE_ERR, "Local SPDM table is not " 5279 "consistent with the actual one for the spdm " 5280 "entry %d \n", spdm_entry); 5281 goto err_exit; 5282 } 5283 5284 if (is_ipv4) { 5285 /* Upper 32 bits of spdm_line(64 bit) contains the 5286 * src IPv4 address. Lower 32 bits of spdm_line 5287 * contains the destination IPv4 address. 5288 */ 5289 u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32); 5290 u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff); 5291 5292 if ((temp_src_ip != src_ip->ipv4.addr) || 5293 (temp_dst_ip != dst_ip->ipv4.addr)) { 5294 xge_debug_device(XGE_ERR, "Local SPDM table is not " 5295 "consistent with the actual one for the spdm " 5296 "entry %d \n", spdm_entry); 5297 goto err_exit; 5298 } 5299 5300 } else { 5301 /* 5302 * SPDM line 1 & 2 contains the src IPv6 address. 5303 * SPDM line 3 & 4 contains the dst IPv6 address. 5304 */ 5305 if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) || 5306 (spdm_line_arr[2] != src_ip->ipv6.addr[1]) || 5307 (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) || 5308 (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) { 5309 5310 /* 5311 * Log a warning 5312 */ 5313 xge_debug_device(XGE_ERR, "Local SPDM table is not " 5314 "consistent with the actual one for the spdm " 5315 "entry %d \n", spdm_entry); 5316 goto err_exit; 5317 } 5318 } 5319 5320 /* 5321 * Reset the entry_enable bit to zero 5322 */ 5323 spdm_line_arr[7] &= ~BIT(63); 5324 5325 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 5326 spdm_line_arr[7], 5327 (void *)((char *)hldev->spdm_mem_base + 5328 (spdm_entry * 64) + (7 * 8))); 5329 5330 /* 5331 * Wait for the operation to be completed. 5332 */ 5333 if (__hal_device_register_poll(hldev, 5334 &bar0->rxpic_int_reg, 1, 5335 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 5336 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 5337 xge_os_spin_unlock(&hldev->spdm_lock); 5338 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 5339 } 5340 5341 /* 5342 * Make the corresponding spdm entry in the local SPDM table 5343 * available for future use. 5344 */ 5345 hldev->spdm_table[spdm_entry]->in_use = 0; 5346 xge_os_spin_unlock(&hldev->spdm_lock); 5347 5348 return XGE_HAL_OK; 5349 5350 err_exit: 5351 xge_os_spin_unlock(&hldev->spdm_lock); 5352 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; 5353 } 5354 5355 /* 5356 * __hal_calc_jhash - Calculate Jenkins hash. 5357 * @msg: Jenkins hash algorithm key. 5358 * @length: Length of the key. 5359 * @golden_ratio: Jenkins hash golden ratio. 5360 * @init_value: Jenkins hash initial value. 5361 * 5362 * This function implements the Jenkins based algorithm used for the 5363 * calculation of the RTH hash. 5364 * Returns: Jenkins hash value. 5365 * 5366 */ 5367 u32 __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value) 5368 { 5369 5370 register u32 a,b,c,len; 5371 5372 /* 5373 * Set up the internal state 5374 */ 5375 len = length; 5376 a = b = golden_ratio; /* the golden ratio; an arbitrary value */ 5377 c = init_value; /* the previous hash value */ 5378 5379 /* handle most of the key */ 5380 while (len >= 12) 5381 { 5382 a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16) 5383 + ((u32)msg[3]<<24)); 5384 b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16) 5385 + ((u32)msg[7]<<24)); 5386 c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16) 5387 + ((u32)msg[11]<<24)); 5388 mix(a,b,c); 5389 msg += 12; len -= 12; 5390 } 5391 5392 /* handle the last 11 bytes */ 5393 c += length; 5394 switch(len) /* all the case statements fall through */ 5395 { 5396 case 11: c+= ((u32)msg[10]<<24); 5397 break; 5398 case 10: c+= ((u32)msg[9]<<16); 5399 break; 5400 case 9 : c+= ((u32)msg[8]<<8); 5401 break; 5402 /* the first byte of c is reserved for the length */ 5403 case 8 : b+= ((u32)msg[7]<<24); 5404 break; 5405 case 7 : b+= ((u32)msg[6]<<16); 5406 break; 5407 case 6 : b+= ((u32)msg[5]<<8); 5408 break; 5409 case 5 : b+= msg[4]; 5410 break; 5411 case 4 : a+= ((u32)msg[3]<<24); 5412 break; 5413 case 3 : a+= ((u32)msg[2]<<16); 5414 break; 5415 case 2 : a+= ((u32)msg[1]<<8); 5416 break; 5417 case 1 : a+= msg[0]; 5418 break; 5419 /* case 0: nothing left to add */ 5420 } 5421 5422 mix(a,b,c); 5423 5424 /* report the result */ 5425 return c; 5426 } 5427 5428 #if defined(XGE_HAL_MSI) | defined(XGE_HAL_MSI_X) 5429 /* 5430 * __hal_device_rti_set 5431 * @ring: The post_qid of the ring. 5432 * @channel: HAL channel of the ring. 5433 * 5434 * This function stores the RTI value associated for the MSI and 5435 * also unmasks this particular RTI in the rti_mask register. 5436 */ 5437 static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel) 5438 { 5439 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 5440 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 5441 u64 val64; 5442 5443 #if defined(XGE_HAL_MSI) 5444 channel->rti = (u8)ring_qid; 5445 #endif 5446 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5447 &bar0->rx_traffic_mask); 5448 val64 &= ~BIT(ring_qid); 5449 xge_os_pio_mem_write64(hldev->pdev, 5450 hldev->regh0, val64, 5451 &bar0->rx_traffic_mask); 5452 } 5453 5454 /* 5455 * __hal_device_tti_set 5456 * @ring: The post_qid of the FIFO. 5457 * @channel: HAL channel the FIFO. 5458 * 5459 * This function stores the TTI value associated for the MSI and 5460 * also unmasks this particular TTI in the tti_mask register. 5461 */ 5462 static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel) 5463 { 5464 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 5465 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 5466 u64 val64; 5467 5468 #if defined(XGE_HAL_MSI) 5469 channel->tti = (u8)fifo_qid; 5470 #endif 5471 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5472 &bar0->tx_traffic_mask); 5473 val64 &= ~BIT(fifo_qid); 5474 xge_os_pio_mem_write64(hldev->pdev, 5475 hldev->regh0, val64, 5476 &bar0->tx_traffic_mask); 5477 } 5478 #endif 5479 5480 #if defined(XGE_HAL_MSI) 5481 /** 5482 * xge_hal_channel_msi_set - Associate a RTI with a ring or TTI with a 5483 * FIFO for a given MSI. 5484 * @channelh: HAL channel handle. 5485 * @msi: MSI Number associated with the channel. 5486 * @msi_msg: The MSI message associated with the MSI number above. 5487 * 5488 * This API will associate a given channel (either Ring or FIFO) with the 5489 * given MSI number. It will alo program the Tx_Mat/Rx_Mat tables in the 5490 * hardware to indicate this association to the hardware. 5491 */ 5492 xge_hal_status_e 5493 xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg) 5494 { 5495 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 5496 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 5497 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 5498 u64 val64; 5499 5500 channel->msi_msg = msi_msg; 5501 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 5502 int ring = channel->post_qid; 5503 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d," 5504 " MSI: %d\n", channel->msi_msg, ring, msi); 5505 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5506 &bar0->rx_mat); 5507 val64 |= XGE_HAL_SET_RX_MAT(ring, msi); 5508 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5509 &bar0->rx_mat); 5510 __hal_device_rti_set(ring, channel); 5511 } else { 5512 int fifo = channel->post_qid; 5513 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d," 5514 " MSI: %d\n", channel->msi_msg, fifo, msi); 5515 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5516 &bar0->tx_mat[0]); 5517 val64 |= XGE_HAL_SET_TX_MAT(fifo, msi); 5518 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5519 &bar0->tx_mat[0]); 5520 __hal_device_tti_set(fifo, channel); 5521 } 5522 5523 return XGE_HAL_OK; 5524 } 5525 #endif 5526 #if defined(XGE_HAL_MSI_X) 5527 /* 5528 * __hal_set_xmsi_vals 5529 * @devh: HAL device handle. 5530 * @msix_value: 32bit MSI-X value transferred across PCI to @msix_address. 5531 * Filled in by this function. 5532 * @msix_address: 32bit MSI-X DMA address. 5533 * Filled in by this function. 5534 * @msix_idx: index that corresponds to the (@msix_value, @msix_address) 5535 * entry in the table of MSI-X (value, address) pairs. 5536 * 5537 * This function will program the hardware associating the given 5538 * address/value cobination to the specified msi number. 5539 */ 5540 static void __hal_set_xmsi_vals (xge_hal_device_h devh, 5541 u32 *msix_value, 5542 u64 *msix_addr, 5543 int msix_idx) 5544 { 5545 int cnt = 0; 5546 5547 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 5548 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 5549 u64 val64; 5550 5551 val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE; 5552 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 5553 (u32)(val64 >> 32), &bar0->xmsi_access); 5554 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, 5555 (u32)(val64), &bar0->xmsi_access); 5556 do { 5557 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5558 &bar0->xmsi_access); 5559 if (val64 & XGE_HAL_XMSI_STROBE) 5560 break; 5561 cnt++; 5562 xge_os_mdelay(20); 5563 } while(cnt < 5); 5564 *msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5565 &bar0->xmsi_data)); 5566 *msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5567 &bar0->xmsi_address); 5568 } 5569 5570 /** 5571 * xge_hal_channel_msix_set - Associate MSI-X with a channel. 5572 * @channelh: HAL channel handle. 5573 * @msix_idx: index that corresponds to a particular (@msix_value, 5574 * @msix_address) entry in the MSI-X table. 5575 * 5576 * This API associates a given channel (either Ring or FIFO) with the 5577 * given MSI-X number. It programs the Xframe's Tx_Mat/Rx_Mat tables 5578 * to indicate this association. 5579 */ 5580 xge_hal_status_e 5581 xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx) 5582 { 5583 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 5584 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 5585 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 5586 u64 val64; 5587 u16 msi_control_reg; 5588 5589 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 5590 /* Currently Ring and RTI is one on one. */ 5591 int ring = channel->post_qid; 5592 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5593 &bar0->rx_mat); 5594 val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx); 5595 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5596 &bar0->rx_mat); 5597 __hal_device_rti_set(ring, channel); 5598 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { 5599 int fifo = channel->post_qid; 5600 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5601 &bar0->tx_mat[0]); 5602 val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx); 5603 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5604 &bar0->tx_mat[0]); 5605 __hal_device_tti_set(fifo, channel); 5606 } 5607 channel->msix_idx = msix_idx; 5608 __hal_set_xmsi_vals(hldev, &channel->msix_data, 5609 &channel->msix_address, 5610 channel->msix_idx); 5611 5612 /* 5613 * To enable MSI-X, MSI also needs to be enabled, due to a bug 5614 * in the herc NIC. (Temp change, needs to be removed later) 5615 */ 5616 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 5617 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &msi_control_reg); 5618 5619 msi_control_reg |= 0x1; /* Enable MSI */ 5620 5621 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 5622 xge_offsetof(xge_hal_pci_config_le_t, msi_control), msi_control_reg); 5623 5624 5625 /* Enable the MSI-X interrupt */ 5626 { 5627 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5628 &bar0->xmsi_mask_reg); 5629 val64 &= ~(1LL << ( 63 - msix_idx )); 5630 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5631 &bar0->xmsi_mask_reg); 5632 } 5633 5634 return XGE_HAL_OK; 5635 } 5636 #endif 5637 5638 #if defined(XGE_HAL_CONFIG_LRO) 5639 /** 5640 * xge_hal_lro_terminate - Terminate lro resources. 5641 * @lro_scale: Amount of lro memory. 5642 * @hldev: Hal device structure. 5643 * 5644 */ 5645 void 5646 xge_hal_lro_terminate(u32 lro_scale, 5647 xge_hal_device_t *hldev) 5648 { 5649 } 5650 5651 /** 5652 * xge_hal_lro_init - Initiate lro resources. 5653 * @lro_scale: Amount of lro memory. 5654 * @hldev: Hal device structure. 5655 * Note: For time being I am using only one LRO per device. Later on size 5656 * will be increased. 5657 */ 5658 xge_hal_status_e 5659 xge_hal_lro_init(u32 lro_scale, 5660 xge_hal_device_t *hldev) 5661 { 5662 int i; 5663 for(i = 0; i < XGE_HAL_MAX_LRO_SESSIONS; i++) 5664 hldev->g_lro_pool[i].in_use = 0; 5665 5666 return XGE_HAL_OK; 5667 } 5668 #endif 5669 5670 5671 /** 5672 * xge_hal_device_poll - HAL device "polling" entry point. 5673 * @hldev: HAL device. 5674 * 5675 * HAL "polling" entry point. Note that this is part of HAL public API. 5676 * Upper-Layer driver _must_ periodically poll HAL via 5677 * xge_hal_device_poll(). 5678 * 5679 * HAL uses caller's execution context to serially process accumulated 5680 * slow-path events, such as link state changes and hardware error 5681 * indications. 5682 * 5683 * The rate of polling could be somewhere between 500us to 10ms, 5684 * depending on requirements (e.g., the requirement to support fail-over 5685 * could mean that 500us or even 100us polling interval need to be used). 5686 * 5687 * The need and motivation for external polling includes 5688 * 5689 * - remove the error-checking "burden" from the HAL interrupt handler 5690 * (see xge_hal_device_handle_irq()); 5691 * 5692 * - remove the potential source of portability issues by _not_ 5693 * implementing separate polling thread within HAL itself. 5694 * 5695 * See also: xge_hal_event_e{}, xge_hal_driver_config_t{}. 5696 * Usage: See ex_slow_path{}. 5697 */ 5698 void 5699 xge_hal_device_poll(xge_hal_device_h devh) 5700 { 5701 unsigned char item_buf[sizeof(xge_queue_item_t) + 5702 XGE_DEFAULT_EVENT_MAX_DATA_SIZE]; 5703 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf; 5704 xge_queue_status_e qstatus; 5705 xge_hal_status_e hstatus; 5706 int i = 0; 5707 int queue_has_critical_event = 0; 5708 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 5709 5710 _again: 5711 if (!hldev->is_initialized || 5712 hldev->terminating || 5713 hldev->magic != XGE_HAL_MAGIC) 5714 return; 5715 5716 if (!queue_has_critical_event) 5717 queue_has_critical_event = 5718 __queue_get_reset_critical(hldev->queueh); 5719 5720 hldev->in_poll = 1; 5721 while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) { 5722 5723 qstatus = xge_queue_consume(hldev->queueh, 5724 XGE_DEFAULT_EVENT_MAX_DATA_SIZE, 5725 item); 5726 if (qstatus == XGE_QUEUE_IS_EMPTY) 5727 break; 5728 5729 xge_debug_queue(XGE_TRACE, 5730 "queueh 0x%llx consumed event: %d ctxt 0x%llx", 5731 (u64)(ulong_t)hldev->queueh, item->event_type, 5732 (u64)(ulong_t)item->context); 5733 5734 if (!hldev->is_initialized || 5735 hldev->magic != XGE_HAL_MAGIC) { 5736 hldev->in_poll = 0; 5737 return; 5738 } 5739 5740 switch (item->event_type) { 5741 case XGE_HAL_EVENT_LINK_IS_UP: { 5742 if (!queue_has_critical_event && 5743 g_xge_hal_driver->uld_callbacks.link_up) { 5744 g_xge_hal_driver->uld_callbacks.link_up( 5745 hldev->upper_layer_info); 5746 hldev->link_state = XGE_HAL_LINK_UP; 5747 } 5748 } break; 5749 case XGE_HAL_EVENT_LINK_IS_DOWN: { 5750 if (!queue_has_critical_event && 5751 g_xge_hal_driver->uld_callbacks.link_down) { 5752 g_xge_hal_driver->uld_callbacks.link_down( 5753 hldev->upper_layer_info); 5754 hldev->link_state = XGE_HAL_LINK_DOWN; 5755 } 5756 } break; 5757 case XGE_HAL_EVENT_SERR: 5758 case XGE_HAL_EVENT_ECCERR: 5759 case XGE_HAL_EVENT_PARITYERR: 5760 case XGE_HAL_EVENT_TARGETABORT: 5761 case XGE_HAL_EVENT_SLOT_FREEZE: { 5762 void *item_data = xge_queue_item_data(item); 5763 int event_type = item->event_type; 5764 u64 val64 = *((u64*)item_data); 5765 5766 if (event_type != XGE_HAL_EVENT_SLOT_FREEZE) 5767 if (xge_hal_device_is_slot_freeze(hldev)) 5768 event_type = XGE_HAL_EVENT_SLOT_FREEZE; 5769 if (g_xge_hal_driver->uld_callbacks.crit_err) { 5770 g_xge_hal_driver->uld_callbacks.crit_err( 5771 hldev->upper_layer_info, 5772 event_type, 5773 val64); 5774 /* handle one critical event per poll cycle */ 5775 hldev->in_poll = 0; 5776 return; 5777 } 5778 } break; 5779 default: { 5780 xge_debug_queue(XGE_TRACE, 5781 "got non-HAL event %d", 5782 item->event_type); 5783 } break; 5784 } 5785 5786 /* broadcast this event */ 5787 if (g_xge_hal_driver->uld_callbacks.event) 5788 g_xge_hal_driver->uld_callbacks.event(item); 5789 } 5790 5791 if (g_xge_hal_driver->uld_callbacks.before_device_poll) { 5792 if (g_xge_hal_driver->uld_callbacks.before_device_poll( 5793 hldev) != 0) { 5794 hldev->in_poll = 0; 5795 return; 5796 } 5797 } 5798 5799 hstatus = __hal_device_poll(hldev); 5800 if (g_xge_hal_driver->uld_callbacks.after_device_poll) 5801 g_xge_hal_driver->uld_callbacks.after_device_poll(hldev); 5802 5803 /* 5804 * handle critical error right away: 5805 * - walk the device queue again 5806 * - drop non-critical events, if any 5807 * - look for the 1st critical 5808 */ 5809 if (hstatus == XGE_HAL_ERR_CRITICAL) { 5810 queue_has_critical_event = 1; 5811 goto _again; 5812 } 5813 5814 hldev->in_poll = 0; 5815 } 5816 5817 /** 5818 * xge_hal_rts_rth_init - Set enhanced mode for RTS hashing. 5819 * @hldev: HAL device handle. 5820 * 5821 * This function is used to set the adapter to enhanced mode. 5822 * 5823 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 5824 */ 5825 void 5826 xge_hal_rts_rth_init(xge_hal_device_t *hldev) 5827 { 5828 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5829 u64 val64; 5830 5831 /* 5832 * Set the receive traffic steering mode from default(classic) 5833 * to enhanced. 5834 */ 5835 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5836 &bar0->rts_ctrl); 5837 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 5838 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 5839 val64, &bar0->rts_ctrl); 5840 } 5841 5842 /** 5843 * xge_hal_rts_rth_clr - Clear RTS hashing. 5844 * @hldev: HAL device handle. 5845 * 5846 * This function is used to clear all RTS hashing related stuff. 5847 * It brings the adapter out from enhanced mode to classic mode. 5848 * It also clears RTS_RTH_CFG register i.e clears hash type, function etc. 5849 * 5850 * See also: xge_hal_rts_rth_set(), xge_hal_rts_rth_itable_set(). 5851 */ 5852 void 5853 xge_hal_rts_rth_clr(xge_hal_device_t *hldev) 5854 { 5855 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5856 u64 val64; 5857 5858 /* 5859 * Set the receive traffic steering mode from default(classic) 5860 * to enhanced. 5861 */ 5862 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5863 &bar0->rts_ctrl); 5864 val64 &= ~XGE_HAL_RTS_CTRL_ENHANCED_MODE; 5865 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 5866 val64, &bar0->rts_ctrl); 5867 val64 = 0; 5868 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5869 &bar0->rts_rth_cfg); 5870 } 5871 5872 /** 5873 * xge_hal_rts_rth_set - Set/configure RTS hashing. 5874 * @hldev: HAL device handle. 5875 * @def_q: default queue 5876 * @hash_type: hash type i.e TcpIpV4, TcpIpV6 etc. 5877 * @bucket_size: no of least significant bits to be used for hashing. 5878 * 5879 * Used to set/configure all RTS hashing related stuff. 5880 * - set the steering mode to enhanced. 5881 * - set hash function i.e algo selection. 5882 * - set the default queue. 5883 * 5884 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(). 5885 */ 5886 void 5887 xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type, 5888 u16 bucket_size) 5889 { 5890 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5891 u64 val64; 5892 5893 val64 = XGE_HAL_RTS_DEFAULT_Q(def_q); 5894 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5895 &bar0->rts_default_q); 5896 5897 val64 = hash_type; 5898 val64 |= XGE_HAL_RTS_RTH_EN; 5899 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size); 5900 val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS; 5901 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5902 &bar0->rts_rth_cfg); 5903 } 5904 5905 /** 5906 * xge_hal_rts_rth_start - Start RTS hashing. 5907 * @hldev: HAL device handle. 5908 * 5909 * Used to Start RTS hashing . 5910 * 5911 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start. 5912 */ 5913 void 5914 xge_hal_rts_rth_start(xge_hal_device_t *hldev) 5915 { 5916 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5917 u64 val64; 5918 5919 5920 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5921 &bar0->rts_rth_cfg); 5922 val64 |= XGE_HAL_RTS_RTH_EN; 5923 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5924 &bar0->rts_rth_cfg); 5925 } 5926 5927 /** 5928 * xge_hal_rts_rth_stop - Stop the RTS hashing. 5929 * @hldev: HAL device handle. 5930 * 5931 * Used to Staop RTS hashing . 5932 * 5933 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start. 5934 */ 5935 void 5936 xge_hal_rts_rth_stop(xge_hal_device_t *hldev) 5937 { 5938 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5939 u64 val64; 5940 5941 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5942 &bar0->rts_rth_cfg); 5943 val64 &= ~XGE_HAL_RTS_RTH_EN; 5944 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5945 &bar0->rts_rth_cfg); 5946 } 5947 5948 /** 5949 * xge_hal_rts_rth_itable_set - Set/configure indirection table (IT). 5950 * @hldev: HAL device handle. 5951 * @itable: Pointer to the indirection table 5952 * @itable_size: no of least significant bits to be used for hashing 5953 * 5954 * Used to set/configure indirection table. 5955 * It enables the required no of entries in the IT. 5956 * It adds entries to the IT. 5957 * 5958 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 5959 */ 5960 xge_hal_status_e 5961 xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size) 5962 { 5963 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5964 u64 val64; 5965 u32 idx; 5966 5967 for (idx = 0; idx < itable_size; idx++) { 5968 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | 5969 XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]); 5970 5971 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5972 &bar0->rts_rth_map_mem_data); 5973 5974 /* execute */ 5975 val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | 5976 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | 5977 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx)); 5978 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5979 &bar0->rts_rth_map_mem_ctrl); 5980 5981 /* poll until done */ 5982 if (__hal_device_register_poll(hldev, 5983 &bar0->rts_rth_map_mem_ctrl, 0, 5984 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, 5985 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 5986 /* upper layer may require to repeat */ 5987 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 5988 } 5989 } 5990 5991 return XGE_HAL_OK; 5992 } 5993 5994 5995 /** 5996 * xge_hal_device_rts_rth_key_set - Configure 40byte secret for hash calc. 5997 * 5998 * @hldev: HAL device handle. 5999 * @KeySize: Number of 64-bit words 6000 * @Key: upto 40-byte array of 8-bit values 6001 * This function configures the 40-byte secret which is used for hash 6002 * calculation. 6003 * 6004 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 6005 */ 6006 void 6007 xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key) 6008 { 6009 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *) hldev->bar0; 6010 u64 val64; 6011 u32 entry, nreg, i; 6012 6013 entry = 0; 6014 nreg = 0; 6015 6016 while( KeySize ) { 6017 val64 = 0; 6018 for ( i = 0; i < 8 ; i++) { 6019 /* Prepare 64-bit word for 'nreg' containing 8 keys. */ 6020 if (i) 6021 val64 <<= 8; 6022 val64 |= Key[entry++]; 6023 } 6024 6025 KeySize--; 6026 6027 /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/ 6028 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6029 &bar0->rts_rth_hash_mask[nreg++]); 6030 } 6031 6032 while( nreg < 5 ) { 6033 /* Clear the rest if key is less than 40 bytes */ 6034 val64 = 0; 6035 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6036 &bar0->rts_rth_hash_mask[nreg++]); 6037 } 6038 } 6039 6040 6041 /** 6042 * xge_hal_device_is_closed - Device is closed 6043 * 6044 * @hldev: HAL device handle. 6045 */ 6046 int 6047 xge_hal_device_is_closed(xge_hal_device_h devh) 6048 { 6049 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6050 6051 if (xge_list_is_empty(&hldev->fifo_channels) && 6052 xge_list_is_empty(&hldev->ring_channels)) 6053 return 1; 6054 6055 return 0; 6056 } 6057 6058