1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2018 Solarflare Communications Inc. 5 * Copyright 2019-2022 Xilinx Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published 9 * by the Free Software Foundation, incorporated herein by reference. 10 */ 11 12 #include "net_driver.h" 13 #include <linux/module.h> 14 #include "efx_common.h" 15 #include "efx_channels.h" 16 #include "io.h" 17 #include "ef100_nic.h" 18 #include "ef100_netdev.h" 19 #include "ef100_sriov.h" 20 #include "ef100_regs.h" 21 #include "ef100.h" 22 23 #define EFX_EF100_PCI_DEFAULT_BAR 2 24 25 /* Number of bytes at start of vendor specified extended capability that indicate 26 * that the capability is vendor specified. i.e. offset from value returned by 27 * pci_find_next_ext_capability() to beginning of vendor specified capability 28 * header. 29 */ 30 #define PCI_EXT_CAP_HDR_LENGTH 4 31 32 /* Expected size of a Xilinx continuation address table entry. */ 33 #define ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH 16 34 35 struct ef100_func_ctl_window { 36 bool valid; 37 unsigned int bar; 38 u64 offset; 39 }; 40 41 static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset, 42 struct ef100_func_ctl_window *result); 43 44 /* Number of bytes to offset when reading bit position x with dword accessors. */ 45 #define ROUND_DOWN_TO_DWORD(x) (((x) & (~31)) >> 3) 46 47 #define EXTRACT_BITS(x, lbn, width) \ 48 (((x) >> ((lbn) & 31)) & ((1ull << (width)) - 1)) 49 50 static u32 _ef100_pci_get_bar_bits_with_width(struct efx_nic *efx, 51 int structure_start, 52 int lbn, int width) 53 { 54 efx_dword_t dword; 55 56 efx_readd(efx, &dword, structure_start + ROUND_DOWN_TO_DWORD(lbn)); 57 58 return EXTRACT_BITS(le32_to_cpu(dword.u32[0]), lbn, width); 59 } 60 61 #define ef100_pci_get_bar_bits(efx, entry_location, bitdef) \ 62 _ef100_pci_get_bar_bits_with_width(efx, entry_location, \ 63 ESF_GZ_CFGBAR_ ## bitdef ## _LBN, \ 64 ESF_GZ_CFGBAR_ ## bitdef ## _WIDTH) 65 66 static int ef100_pci_parse_ef100_entry(struct efx_nic *efx, int entry_location, 67 struct ef100_func_ctl_window *result) 68 { 69 u64 offset = ef100_pci_get_bar_bits(efx, entry_location, EF100_FUNC_CTL_WIN_OFF) << 70 ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT; 71 u32 bar = ef100_pci_get_bar_bits(efx, entry_location, EF100_BAR); 72 73 netif_dbg(efx, probe, efx->net_dev, 74 "Found EF100 function control window bar=%d offset=0x%llx\n", 75 bar, offset); 76 77 if (result->valid) { 78 netif_err(efx, probe, efx->net_dev, 79 "Duplicated EF100 table entry.\n"); 80 return -EINVAL; 81 } 82 83 if (bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_EXPANSION_ROM || 84 bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_INVALID) { 85 netif_err(efx, probe, efx->net_dev, 86 "Bad BAR value of %d in Xilinx capabilities EF100 entry.\n", 87 bar); 88 return -EINVAL; 89 } 90 91 result->bar = bar; 92 result->offset = offset; 93 result->valid = true; 94 return 0; 95 } 96 97 static bool ef100_pci_does_bar_overflow(struct efx_nic *efx, int bar, 98 u64 next_entry) 99 { 100 return next_entry + ESE_GZ_CFGBAR_ENTRY_HEADER_SIZE > 101 pci_resource_len(efx->pci_dev, bar); 102 } 103 104 /* Parse a Xilinx capabilities table entry describing a continuation to a new 105 * sub-table. 106 */ 107 static int ef100_pci_parse_continue_entry(struct efx_nic *efx, int entry_location, 108 struct ef100_func_ctl_window *result) 109 { 110 unsigned int previous_bar; 111 efx_oword_t entry; 112 u64 offset; 113 int rc = 0; 114 u32 bar; 115 116 efx_reado(efx, &entry, entry_location); 117 118 bar = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_CONT_CAP_BAR); 119 120 offset = EFX_OWORD_FIELD64(entry, ESF_GZ_CFGBAR_CONT_CAP_OFFSET) << 121 ESE_GZ_CONT_CAP_OFFSET_BYTES_SHIFT; 122 123 previous_bar = efx->mem_bar; 124 125 if (bar == ESE_GZ_VSEC_BAR_NUM_EXPANSION_ROM || 126 bar == ESE_GZ_VSEC_BAR_NUM_INVALID) { 127 netif_err(efx, probe, efx->net_dev, 128 "Bad BAR value of %d in Xilinx capabilities sub-table.\n", 129 bar); 130 return -EINVAL; 131 } 132 133 if (bar != previous_bar) { 134 efx_fini_io(efx); 135 136 if (ef100_pci_does_bar_overflow(efx, bar, offset)) { 137 netif_err(efx, probe, efx->net_dev, 138 "Xilinx table will overrun BAR[%d] offset=0x%llx\n", 139 bar, offset); 140 return -EINVAL; 141 } 142 143 /* Temporarily map new BAR. */ 144 rc = efx_init_io(efx, bar, 145 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), 146 pci_resource_len(efx->pci_dev, bar)); 147 if (rc) { 148 netif_err(efx, probe, efx->net_dev, 149 "Mapping new BAR for Xilinx table failed, rc=%d\n", rc); 150 return rc; 151 } 152 } 153 154 rc = ef100_pci_walk_xilinx_table(efx, offset, result); 155 if (rc) 156 return rc; 157 158 if (bar != previous_bar) { 159 efx_fini_io(efx); 160 161 /* Put old BAR back. */ 162 rc = efx_init_io(efx, previous_bar, 163 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), 164 pci_resource_len(efx->pci_dev, previous_bar)); 165 if (rc) { 166 netif_err(efx, probe, efx->net_dev, 167 "Putting old BAR back failed, rc=%d\n", rc); 168 return rc; 169 } 170 } 171 172 return 0; 173 } 174 175 /* Iterate over the Xilinx capabilities table in the currently mapped BAR and 176 * call ef100_pci_parse_ef100_entry() on any EF100 entries and 177 * ef100_pci_parse_continue_entry() on any table continuations. 178 */ 179 static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset, 180 struct ef100_func_ctl_window *result) 181 { 182 u64 current_entry = offset; 183 int rc = 0; 184 185 while (true) { 186 u32 id = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_FORMAT); 187 u32 last = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_LAST); 188 u32 rev = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_REV); 189 u32 entry_size; 190 191 if (id == ESE_GZ_CFGBAR_ENTRY_LAST) 192 return 0; 193 194 entry_size = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_SIZE); 195 196 netif_dbg(efx, probe, efx->net_dev, 197 "Seen Xilinx table entry 0x%x size 0x%x at 0x%llx in BAR[%d]\n", 198 id, entry_size, current_entry, efx->mem_bar); 199 200 if (entry_size < sizeof(u32) * 2) { 201 netif_err(efx, probe, efx->net_dev, 202 "Xilinx table entry too short len=0x%x\n", entry_size); 203 return -EINVAL; 204 } 205 206 switch (id) { 207 case ESE_GZ_CFGBAR_ENTRY_EF100: 208 if (rev != ESE_GZ_CFGBAR_ENTRY_REV_EF100 || 209 entry_size < ESE_GZ_CFGBAR_ENTRY_SIZE_EF100) { 210 netif_err(efx, probe, efx->net_dev, 211 "Bad length or rev for EF100 entry in Xilinx capabilities table. entry_size=%d rev=%d.\n", 212 entry_size, rev); 213 return -EINVAL; 214 } 215 216 rc = ef100_pci_parse_ef100_entry(efx, current_entry, 217 result); 218 if (rc) 219 return rc; 220 break; 221 case ESE_GZ_CFGBAR_ENTRY_CONT_CAP_ADDR: 222 if (rev != 0 || entry_size < ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH) { 223 netif_err(efx, probe, efx->net_dev, 224 "Bad length or rev for continue entry in Xilinx capabilities table. entry_size=%d rev=%d.\n", 225 entry_size, rev); 226 return -EINVAL; 227 } 228 229 rc = ef100_pci_parse_continue_entry(efx, current_entry, result); 230 if (rc) 231 return rc; 232 break; 233 default: 234 /* Ignore unknown table entries. */ 235 break; 236 } 237 238 if (last) 239 return 0; 240 241 current_entry += entry_size; 242 243 if (ef100_pci_does_bar_overflow(efx, efx->mem_bar, current_entry)) { 244 netif_err(efx, probe, efx->net_dev, 245 "Xilinx table overrun at position=0x%llx.\n", 246 current_entry); 247 return -EINVAL; 248 } 249 } 250 } 251 252 static int _ef100_pci_get_config_bits_with_width(struct efx_nic *efx, 253 int structure_start, int lbn, 254 int width, u32 *result) 255 { 256 int rc, pos = structure_start + ROUND_DOWN_TO_DWORD(lbn); 257 u32 temp; 258 259 rc = pci_read_config_dword(efx->pci_dev, pos, &temp); 260 if (rc) { 261 netif_err(efx, probe, efx->net_dev, 262 "Failed to read PCI config dword at %d\n", 263 pos); 264 return rc; 265 } 266 267 *result = EXTRACT_BITS(temp, lbn, width); 268 269 return 0; 270 } 271 272 #define ef100_pci_get_config_bits(efx, entry_location, bitdef, result) \ 273 _ef100_pci_get_config_bits_with_width(efx, entry_location, \ 274 ESF_GZ_VSEC_ ## bitdef ## _LBN, \ 275 ESF_GZ_VSEC_ ## bitdef ## _WIDTH, result) 276 277 /* Call ef100_pci_walk_xilinx_table() for the Xilinx capabilities table pointed 278 * to by this PCI_EXT_CAP_ID_VNDR. 279 */ 280 static int ef100_pci_parse_xilinx_cap(struct efx_nic *efx, int vndr_cap, 281 bool has_offset_hi, 282 struct ef100_func_ctl_window *result) 283 { 284 u32 offset_high = 0; 285 u32 offset_lo = 0; 286 u64 offset = 0; 287 u32 bar = 0; 288 int rc = 0; 289 290 rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_BAR, &bar); 291 if (rc) { 292 netif_err(efx, probe, efx->net_dev, 293 "Failed to read ESF_GZ_VSEC_TBL_BAR, rc=%d\n", 294 rc); 295 return rc; 296 } 297 298 if (bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_EXPANSION_ROM || 299 bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_INVALID) { 300 netif_err(efx, probe, efx->net_dev, 301 "Bad BAR value of %d in Xilinx capabilities sub-table.\n", 302 bar); 303 return -EINVAL; 304 } 305 306 rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_LO, &offset_lo); 307 if (rc) { 308 netif_err(efx, probe, efx->net_dev, 309 "Failed to read ESF_GZ_VSEC_TBL_OFF_LO, rc=%d\n", 310 rc); 311 return rc; 312 } 313 314 /* Get optional extension to 64bit offset. */ 315 if (has_offset_hi) { 316 rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_HI, &offset_high); 317 if (rc) { 318 netif_err(efx, probe, efx->net_dev, 319 "Failed to read ESF_GZ_VSEC_TBL_OFF_HI, rc=%d\n", 320 rc); 321 return rc; 322 } 323 } 324 325 offset = (((u64)offset_lo) << ESE_GZ_VSEC_TBL_OFF_LO_BYTES_SHIFT) | 326 (((u64)offset_high) << ESE_GZ_VSEC_TBL_OFF_HI_BYTES_SHIFT); 327 328 if (offset > pci_resource_len(efx->pci_dev, bar) - sizeof(u32) * 2) { 329 netif_err(efx, probe, efx->net_dev, 330 "Xilinx table will overrun BAR[%d] offset=0x%llx\n", 331 bar, offset); 332 return -EINVAL; 333 } 334 335 /* Temporarily map BAR. */ 336 rc = efx_init_io(efx, bar, 337 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), 338 pci_resource_len(efx->pci_dev, bar)); 339 if (rc) { 340 netif_err(efx, probe, efx->net_dev, 341 "efx_init_io failed, rc=%d\n", rc); 342 return rc; 343 } 344 345 rc = ef100_pci_walk_xilinx_table(efx, offset, result); 346 347 /* Unmap temporarily mapped BAR. */ 348 efx_fini_io(efx); 349 return rc; 350 } 351 352 /* Call ef100_pci_parse_ef100_entry() for each Xilinx PCI_EXT_CAP_ID_VNDR 353 * capability. 354 */ 355 static int ef100_pci_find_func_ctrl_window(struct efx_nic *efx, 356 struct ef100_func_ctl_window *result) 357 { 358 int num_xilinx_caps = 0; 359 int cap = 0; 360 361 result->valid = false; 362 363 while ((cap = pci_find_next_ext_capability(efx->pci_dev, cap, PCI_EXT_CAP_ID_VNDR)) != 0) { 364 int vndr_cap = cap + PCI_EXT_CAP_HDR_LENGTH; 365 u32 vsec_ver = 0; 366 u32 vsec_len = 0; 367 u32 vsec_id = 0; 368 int rc = 0; 369 370 num_xilinx_caps++; 371 372 rc = ef100_pci_get_config_bits(efx, vndr_cap, ID, &vsec_id); 373 if (rc) { 374 netif_err(efx, probe, efx->net_dev, 375 "Failed to read ESF_GZ_VSEC_ID, rc=%d\n", 376 rc); 377 return rc; 378 } 379 380 rc = ef100_pci_get_config_bits(efx, vndr_cap, VER, &vsec_ver); 381 if (rc) { 382 netif_err(efx, probe, efx->net_dev, 383 "Failed to read ESF_GZ_VSEC_VER, rc=%d\n", 384 rc); 385 return rc; 386 } 387 388 /* Get length of whole capability - i.e. starting at cap */ 389 rc = ef100_pci_get_config_bits(efx, vndr_cap, LEN, &vsec_len); 390 if (rc) { 391 netif_err(efx, probe, efx->net_dev, 392 "Failed to read ESF_GZ_VSEC_LEN, rc=%d\n", 393 rc); 394 return rc; 395 } 396 397 if (vsec_id == ESE_GZ_XILINX_VSEC_ID && 398 vsec_ver == ESE_GZ_VSEC_VER_XIL_CFGBAR && 399 vsec_len >= ESE_GZ_VSEC_LEN_MIN) { 400 bool has_offset_hi = (vsec_len >= ESE_GZ_VSEC_LEN_HIGH_OFFT); 401 402 rc = ef100_pci_parse_xilinx_cap(efx, vndr_cap, 403 has_offset_hi, result); 404 if (rc) 405 return rc; 406 } 407 } 408 409 if (num_xilinx_caps && !result->valid) { 410 netif_err(efx, probe, efx->net_dev, 411 "Seen %d Xilinx tables, but no EF100 entry.\n", 412 num_xilinx_caps); 413 return -EINVAL; 414 } 415 416 return 0; 417 } 418 419 /* Final NIC shutdown 420 * This is called only at module unload (or hotplug removal). A PF can call 421 * this on its VFs to ensure they are unbound first. 422 */ 423 static void ef100_pci_remove(struct pci_dev *pci_dev) 424 { 425 struct efx_nic *efx = pci_get_drvdata(pci_dev); 426 struct efx_probe_data *probe_data; 427 428 if (!efx) 429 return; 430 431 probe_data = container_of(efx, struct efx_probe_data, efx); 432 ef100_remove_netdev(probe_data); 433 #ifdef CONFIG_SFC_SRIOV 434 efx_fini_struct_tc(efx); 435 #endif 436 437 ef100_remove(efx); 438 efx_fini_io(efx); 439 440 pci_dbg(pci_dev, "shutdown successful\n"); 441 442 pci_set_drvdata(pci_dev, NULL); 443 efx_fini_struct(efx); 444 kfree(probe_data); 445 }; 446 447 static int ef100_pci_probe(struct pci_dev *pci_dev, 448 const struct pci_device_id *entry) 449 { 450 struct ef100_func_ctl_window fcw = { 0 }; 451 struct efx_probe_data *probe_data; 452 struct efx_nic *efx; 453 int rc; 454 455 /* Allocate probe data and struct efx_nic */ 456 probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL); 457 if (!probe_data) 458 return -ENOMEM; 459 probe_data->pci_dev = pci_dev; 460 efx = &probe_data->efx; 461 462 efx->type = (const struct efx_nic_type *)entry->driver_data; 463 464 efx->pci_dev = pci_dev; 465 pci_set_drvdata(pci_dev, efx); 466 rc = efx_init_struct(efx, pci_dev); 467 if (rc) 468 goto fail; 469 470 efx->vi_stride = EF100_DEFAULT_VI_STRIDE; 471 pci_info(pci_dev, "Solarflare EF100 NIC detected\n"); 472 473 rc = ef100_pci_find_func_ctrl_window(efx, &fcw); 474 if (rc) { 475 pci_err(pci_dev, 476 "Error looking for ef100 function control window, rc=%d\n", 477 rc); 478 goto fail; 479 } 480 481 if (!fcw.valid) { 482 /* Extended capability not found - use defaults. */ 483 fcw.bar = EFX_EF100_PCI_DEFAULT_BAR; 484 fcw.offset = 0; 485 fcw.valid = true; 486 } 487 488 if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) { 489 pci_err(pci_dev, "Func control window overruns BAR\n"); 490 rc = -EIO; 491 goto fail; 492 } 493 494 /* Set up basic I/O (BAR mappings etc) */ 495 rc = efx_init_io(efx, fcw.bar, 496 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), 497 pci_resource_len(efx->pci_dev, fcw.bar)); 498 if (rc) 499 goto fail; 500 501 efx->reg_base = fcw.offset; 502 503 rc = efx->type->probe(efx); 504 if (rc) 505 goto fail; 506 507 efx->state = STATE_PROBED; 508 rc = ef100_probe_netdev(probe_data); 509 if (rc) 510 goto fail; 511 512 pci_dbg(pci_dev, "initialisation successful\n"); 513 514 return 0; 515 516 fail: 517 ef100_pci_remove(pci_dev); 518 return rc; 519 } 520 521 #ifdef CONFIG_SFC_SRIOV 522 static int ef100_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 523 { 524 struct efx_nic *efx = pci_get_drvdata(dev); 525 int rc; 526 527 if (efx->type->sriov_configure) { 528 rc = efx->type->sriov_configure(efx, num_vfs); 529 if (rc) 530 return rc; 531 else 532 return num_vfs; 533 } 534 return -ENOENT; 535 } 536 #endif 537 538 /* PCI device ID table */ 539 static const struct pci_device_id ef100_pci_table[] = { 540 {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */ 541 .driver_data = (unsigned long) &ef100_pf_nic_type }, 542 {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x1100), /* Riverhead VF */ 543 .driver_data = (unsigned long) &ef100_vf_nic_type }, 544 {0} /* end of list */ 545 }; 546 547 struct pci_driver ef100_pci_driver = { 548 .name = "sfc_ef100", 549 .id_table = ef100_pci_table, 550 .probe = ef100_pci_probe, 551 .remove = ef100_pci_remove, 552 #ifdef CONFIG_SFC_SRIOV 553 .sriov_configure = ef100_pci_sriov_configure, 554 #endif 555 .err_handler = &efx_err_handlers, 556 }; 557 558 MODULE_DEVICE_TABLE(pci, ef100_pci_table); 559