1 /* 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Cavium, Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /*$FreeBSD$*/ 34 35 #include "lio_bsd.h" 36 #include "lio_common.h" 37 #include "lio_droq.h" 38 #include "lio_iq.h" 39 #include "lio_response_manager.h" 40 #include "lio_device.h" 41 #include "cn23xx_pf_device.h" 42 #include "lio_main.h" 43 #include "lio_rss.h" 44 45 static int 46 lio_cn23xx_pf_soft_reset(struct octeon_device *oct) 47 { 48 49 lio_write_csr64(oct, LIO_CN23XX_SLI_WIN_WR_MASK_REG, 0xFF); 50 51 lio_dev_dbg(oct, "BIST enabled for CN23XX soft reset\n"); 52 53 lio_write_csr64(oct, LIO_CN23XX_SLI_SCRATCH1, 0x1234ULL); 54 55 /* Initiate chip-wide soft reset */ 56 lio_pci_readq(oct, LIO_CN23XX_RST_SOFT_RST); 57 lio_pci_writeq(oct, 1, LIO_CN23XX_RST_SOFT_RST); 58 59 /* Wait for 100ms as Octeon resets. */ 60 lio_mdelay(100); 61 62 if (lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH1)) { 63 lio_dev_err(oct, "Soft reset failed\n"); 64 return (1); 65 } 66 67 lio_dev_dbg(oct, "Reset completed\n"); 68 69 /* restore the reset value */ 70 lio_write_csr64(oct, LIO_CN23XX_SLI_WIN_WR_MASK_REG, 0xFF); 71 72 return (0); 73 } 74 75 static void 76 lio_cn23xx_pf_enable_error_reporting(struct octeon_device *oct) 77 { 78 uint32_t corrtable_err_status, uncorrectable_err_mask, regval; 79 80 regval = lio_read_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_DEVCTL); 81 if (regval & LIO_CN23XX_CFG_PCIE_DEVCTL_MASK) { 82 uncorrectable_err_mask = 0; 83 corrtable_err_status = 0; 84 uncorrectable_err_mask = 85 lio_read_pci_cfg(oct, 86 LIO_CN23XX_CFG_PCIE_UNCORRECT_ERR_MASK); 87 corrtable_err_status = 88 lio_read_pci_cfg(oct, 89 LIO_CN23XX_CFG_PCIE_CORRECT_ERR_STATUS); 90 lio_dev_err(oct, "PCI-E Fatal error detected;\n" 91 "\tdev_ctl_status_reg = 0x%08x\n" 92 "\tuncorrectable_error_mask_reg = 0x%08x\n" 93 "\tcorrectable_error_status_reg = 0x%08x\n", 94 regval, uncorrectable_err_mask, 95 corrtable_err_status); 96 } 97 98 regval |= 0xf; /* Enable Link error reporting */ 99 100 lio_dev_dbg(oct, "Enabling PCI-E error reporting..\n"); 101 lio_write_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_DEVCTL, regval); 102 } 103 104 static uint32_t 105 lio_cn23xx_pf_coprocessor_clock(struct octeon_device *oct) 106 { 107 /* 108 * Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER 109 * for SLI. 110 */ 111 112 /* TBD: get the info in Hand-shake */ 113 return (((lio_pci_readq(oct, LIO_CN23XX_RST_BOOT) >> 24) & 0x3f) * 50); 114 } 115 116 uint32_t 117 lio_cn23xx_pf_get_oq_ticks(struct octeon_device *oct, uint32_t time_intr_in_us) 118 { 119 /* This gives the SLI clock per microsec */ 120 uint32_t oqticks_per_us = lio_cn23xx_pf_coprocessor_clock(oct); 121 122 oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us; 123 124 /* This gives the clock cycles per millisecond */ 125 oqticks_per_us *= 1000; 126 127 /* This gives the oq ticks (1024 core clock cycles) per millisecond */ 128 oqticks_per_us /= 1024; 129 130 /* 131 * time_intr is in microseconds. The next 2 steps gives the oq ticks 132 * corresponding to time_intr. 133 */ 134 oqticks_per_us *= time_intr_in_us; 135 oqticks_per_us /= 1000; 136 137 return (oqticks_per_us); 138 } 139 140 static void 141 lio_cn23xx_pf_setup_global_mac_regs(struct octeon_device *oct) 142 { 143 uint64_t reg_val; 144 uint16_t mac_no = oct->pcie_port; 145 uint16_t pf_num = oct->pf_num; 146 /* programming SRN and TRS for each MAC(0..3) */ 147 148 lio_dev_dbg(oct, "%s: Using pcie port %d\n", __func__, mac_no); 149 /* By default, mapping all 64 IOQs to a single MACs */ 150 151 reg_val = 152 lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)); 153 154 /* setting SRN <6:0> */ 155 reg_val = pf_num * LIO_CN23XX_PF_MAX_RINGS; 156 157 /* setting TRS <23:16> */ 158 reg_val = reg_val | 159 (oct->sriov_info.trs << LIO_CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS); 160 161 /* write these settings to MAC register */ 162 lio_write_csr64(oct, LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num), 163 reg_val); 164 165 lio_dev_dbg(oct, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016lx\n", mac_no, 166 pf_num, 167 lio_read_csr64(oct, 168 LIO_CN23XX_SLI_PKT_MAC_RINFO64(mac_no, 169 pf_num))); 170 } 171 172 static int 173 lio_cn23xx_pf_reset_io_queues(struct octeon_device *oct) 174 { 175 uint64_t d64; 176 uint32_t ern, loop = BUSY_READING_REG_PF_LOOP_COUNT; 177 uint32_t q_no, srn; 178 int ret_val = 0; 179 180 srn = oct->sriov_info.pf_srn; 181 ern = srn + oct->sriov_info.num_pf_rings; 182 183 /* As per HRM reg description, s/w cant write 0 to ENB. */ 184 /* to make the queue off, need to set the RST bit. */ 185 186 /* Reset the Enable bit for all the 64 IQs. */ 187 for (q_no = srn; q_no < ern; q_no++) { 188 /* set RST bit to 1. This bit applies to both IQ and OQ */ 189 d64 = lio_read_csr64(oct, 190 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 191 d64 = d64 | LIO_CN23XX_PKT_INPUT_CTL_RST; 192 lio_write_csr64(oct, 193 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); 194 } 195 196 /* wait until the RST bit is clear or the RST and quiet bits are set */ 197 for (q_no = srn; q_no < ern; q_no++) { 198 volatile uint64_t reg_val = 199 lio_read_csr64(oct, 200 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 201 while ((reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) && 202 !(reg_val & LIO_CN23XX_PKT_INPUT_CTL_QUIET) && 203 loop) { 204 reg_val = lio_read_csr64(oct, 205 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 206 loop--; 207 } 208 209 if (!loop) { 210 lio_dev_err(oct, 211 "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", 212 q_no); 213 return (-1); 214 } 215 216 reg_val &= ~LIO_CN23XX_PKT_INPUT_CTL_RST; 217 lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), 218 reg_val); 219 220 reg_val = lio_read_csr64(oct, 221 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 222 if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) { 223 lio_dev_err(oct, "clearing the reset failed for qno: %u\n", 224 q_no); 225 ret_val = -1; 226 } 227 } 228 229 return (ret_val); 230 } 231 232 static int 233 lio_cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) 234 { 235 struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; 236 struct lio_instr_queue *iq; 237 uint64_t intr_threshold; 238 uint64_t pf_num, reg_val; 239 uint32_t q_no, ern, srn; 240 241 pf_num = oct->pf_num; 242 243 srn = oct->sriov_info.pf_srn; 244 ern = srn + oct->sriov_info.num_pf_rings; 245 246 if (lio_cn23xx_pf_reset_io_queues(oct)) 247 return (-1); 248 249 /* 250 * Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg 251 * for all queues.Only PF can set these bits. 252 * bits 29:30 indicate the MAC num. 253 * bits 32:47 indicate the PVF num. 254 */ 255 for (q_no = 0; q_no < ern; q_no++) { 256 reg_val = oct->pcie_port << 257 LIO_CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; 258 259 reg_val |= pf_num << LIO_CN23XX_PKT_INPUT_CTL_PF_NUM_POS; 260 261 lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), 262 reg_val); 263 } 264 265 /* 266 * Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for 267 * pf queues 268 */ 269 for (q_no = srn; q_no < ern; q_no++) { 270 uint32_t inst_cnt_reg; 271 272 iq = oct->instr_queue[q_no]; 273 if (iq != NULL) 274 inst_cnt_reg = iq->inst_cnt_reg; 275 else 276 inst_cnt_reg = LIO_CN23XX_SLI_IQ_INSTR_COUNT64(q_no); 277 278 reg_val = 279 lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 280 281 reg_val |= LIO_CN23XX_PKT_INPUT_CTL_MASK; 282 283 lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), 284 reg_val); 285 286 /* Set WMARK level for triggering PI_INT */ 287 /* intr_threshold = LIO_CN23XX_DEF_IQ_INTR_THRESHOLD & */ 288 intr_threshold = LIO_GET_IQ_INTR_PKT_CFG(cn23xx->conf) & 289 LIO_CN23XX_PKT_IN_DONE_WMARK_MASK; 290 291 lio_write_csr64(oct, inst_cnt_reg, 292 (lio_read_csr64(oct, inst_cnt_reg) & 293 ~(LIO_CN23XX_PKT_IN_DONE_WMARK_MASK << 294 LIO_CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) | 295 (intr_threshold << 296 LIO_CN23XX_PKT_IN_DONE_WMARK_BIT_POS)); 297 } 298 return (0); 299 } 300 301 static void 302 lio_cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) 303 { 304 struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; 305 uint64_t time_threshold; 306 uint32_t ern, q_no, reg_val, srn; 307 308 srn = oct->sriov_info.pf_srn; 309 ern = srn + oct->sriov_info.num_pf_rings; 310 311 if (LIO_GET_IS_SLI_BP_ON_CFG(cn23xx->conf)) { 312 lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_WMARK, 32); 313 } else { 314 /* Set Output queue watermark to 0 to disable backpressure */ 315 lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_WMARK, 0); 316 } 317 318 for (q_no = srn; q_no < ern; q_no++) { 319 reg_val = lio_read_csr32(oct, 320 LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no)); 321 322 /* set IPTR & DPTR */ 323 reg_val |= LIO_CN23XX_PKT_OUTPUT_CTL_DPTR; 324 325 /* reset BMODE */ 326 reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_BMODE); 327 328 /* 329 * No Relaxed Ordering, No Snoop, 64-bit Byte swap for 330 * Output Queue ScatterList reset ROR_P, NSR_P 331 */ 332 reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_ROR_P); 333 reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_NSR_P); 334 335 #if BYTE_ORDER == LITTLE_ENDIAN 336 reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_ES_P); 337 #else /* BYTE_ORDER != LITTLE_ENDIAN */ 338 reg_val |= (LIO_CN23XX_PKT_OUTPUT_CTL_ES_P); 339 #endif /* BYTE_ORDER == LITTLE_ENDIAN */ 340 341 /* 342 * No Relaxed Ordering, No Snoop, 64-bit Byte swap for 343 * Output Queue Data reset ROR, NSR 344 */ 345 reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_ROR); 346 reg_val &= ~(LIO_CN23XX_PKT_OUTPUT_CTL_NSR); 347 /* set the ES bit */ 348 reg_val |= (LIO_CN23XX_PKT_OUTPUT_CTL_ES); 349 350 /* write all the selected settings */ 351 lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no), 352 reg_val); 353 354 /* 355 * Enabling these interrupt in oct->fn_list.enable_interrupt() 356 * routine which called after IOQ init. 357 * Set up interrupt packet and time thresholds 358 * for all the OQs 359 */ 360 time_threshold =lio_cn23xx_pf_get_oq_ticks( 361 oct, (uint32_t)LIO_GET_OQ_INTR_TIME_CFG(cn23xx->conf)); 362 363 lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), 364 (LIO_GET_OQ_INTR_PKT_CFG(cn23xx->conf) | 365 (time_threshold << 32))); 366 } 367 368 /* Setting the water mark level for pko back pressure * */ 369 lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_WMARK, 0x40); 370 371 /* Enable channel-level backpressure */ 372 if (oct->pf_num) 373 lio_write_csr64(oct, LIO_CN23XX_SLI_OUT_BP_EN2_W1S, 374 0xffffffffffffffffULL); 375 else 376 lio_write_csr64(oct, LIO_CN23XX_SLI_OUT_BP_EN_W1S, 377 0xffffffffffffffffULL); 378 } 379 380 static int 381 lio_cn23xx_pf_setup_device_regs(struct octeon_device *oct) 382 { 383 384 lio_cn23xx_pf_enable_error_reporting(oct); 385 386 /* program the MAC(0..3)_RINFO before setting up input/output regs */ 387 lio_cn23xx_pf_setup_global_mac_regs(oct); 388 389 if (lio_cn23xx_pf_setup_global_input_regs(oct)) 390 return (-1); 391 392 lio_cn23xx_pf_setup_global_output_regs(oct); 393 394 /* 395 * Default error timeout value should be 0x200000 to avoid host hang 396 * when reads invalid register 397 */ 398 lio_write_csr64(oct, LIO_CN23XX_SLI_WINDOW_CTL, 399 LIO_CN23XX_SLI_WINDOW_CTL_DEFAULT); 400 401 /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */ 402 lio_write_csr64(oct, LIO_CN23XX_SLI_PKT_IN_JABBER, 403 LIO_CN23XX_MAX_INPUT_JABBER); 404 return (0); 405 } 406 407 static void 408 lio_cn23xx_pf_setup_iq_regs(struct octeon_device *oct, uint32_t iq_no) 409 { 410 struct lio_instr_queue *iq = oct->instr_queue[iq_no]; 411 uint64_t pkt_in_done; 412 413 iq_no += oct->sriov_info.pf_srn; 414 415 /* Write the start of the input queue's ring and its size */ 416 lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_BASE_ADDR64(iq_no), 417 iq->base_addr_dma); 418 lio_write_csr32(oct, LIO_CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count); 419 420 /* 421 * Remember the doorbell & instruction count register addr 422 * for this queue 423 */ 424 iq->doorbell_reg = LIO_CN23XX_SLI_IQ_DOORBELL(iq_no); 425 iq->inst_cnt_reg = LIO_CN23XX_SLI_IQ_INSTR_COUNT64(iq_no); 426 lio_dev_dbg(oct, "InstQ[%d]:dbell reg @ 0x%x instcnt_reg @ 0x%x\n", 427 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); 428 429 /* 430 * Store the current instruction counter (used in flush_iq 431 * calculation) 432 */ 433 pkt_in_done = lio_read_csr64(oct, iq->inst_cnt_reg); 434 435 if (oct->msix_on) { 436 /* Set CINT_ENB to enable IQ interrupt */ 437 lio_write_csr64(oct, iq->inst_cnt_reg, 438 (pkt_in_done | LIO_CN23XX_INTR_CINT_ENB)); 439 } else { 440 /* 441 * Clear the count by writing back what we read, but don't 442 * enable interrupts 443 */ 444 lio_write_csr64(oct, iq->inst_cnt_reg, pkt_in_done); 445 } 446 447 iq->reset_instr_cnt = 0; 448 } 449 450 static void 451 lio_cn23xx_pf_setup_oq_regs(struct octeon_device *oct, uint32_t oq_no) 452 { 453 struct lio_droq *droq = oct->droq[oq_no]; 454 struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; 455 uint64_t cnt_threshold; 456 uint64_t time_threshold; 457 uint32_t reg_val; 458 459 oq_no += oct->sriov_info.pf_srn; 460 461 lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_BASE_ADDR64(oq_no), 462 droq->desc_ring_dma); 463 lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count); 464 465 lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no), 466 droq->buffer_size); 467 468 /* pkt_sent and pkts_credit regs */ 469 droq->pkts_sent_reg = LIO_CN23XX_SLI_OQ_PKTS_SENT(oq_no); 470 droq->pkts_credit_reg = LIO_CN23XX_SLI_OQ_PKTS_CREDIT(oq_no); 471 472 if (!oct->msix_on) { 473 /* 474 * Enable this output queue to generate Packet Timer 475 * Interrupt 476 */ 477 reg_val = 478 lio_read_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(oq_no)); 479 reg_val |= LIO_CN23XX_PKT_OUTPUT_CTL_TENB; 480 lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(oq_no), 481 reg_val); 482 483 /* 484 * Enable this output queue to generate Packet Count 485 * Interrupt 486 */ 487 reg_val = 488 lio_read_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(oq_no)); 489 reg_val |= LIO_CN23XX_PKT_OUTPUT_CTL_CENB; 490 lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(oq_no), 491 reg_val); 492 } else { 493 time_threshold = lio_cn23xx_pf_get_oq_ticks(oct, 494 (uint32_t)LIO_GET_OQ_INTR_TIME_CFG(cn23xx->conf)); 495 cnt_threshold = (uint32_t)LIO_GET_OQ_INTR_PKT_CFG(cn23xx->conf); 496 497 lio_write_csr64(oct, LIO_CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no), 498 ((time_threshold << 32 | cnt_threshold))); 499 } 500 } 501 502 503 static int 504 lio_cn23xx_pf_enable_io_queues(struct octeon_device *oct) 505 { 506 uint64_t reg_val; 507 uint32_t ern, loop = BUSY_READING_REG_PF_LOOP_COUNT; 508 uint32_t q_no, srn; 509 510 srn = oct->sriov_info.pf_srn; 511 ern = srn + oct->num_iqs; 512 513 for (q_no = srn; q_no < ern; q_no++) { 514 /* set the corresponding IQ IS_64B bit */ 515 if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) { 516 reg_val = lio_read_csr64(oct, 517 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 518 reg_val = reg_val | LIO_CN23XX_PKT_INPUT_CTL_IS_64B; 519 lio_write_csr64(oct, 520 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), 521 reg_val); 522 } 523 /* set the corresponding IQ ENB bit */ 524 if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) { 525 /* 526 * IOQs are in reset by default in PEM2 mode, 527 * clearing reset bit 528 */ 529 reg_val = lio_read_csr64(oct, 530 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 531 532 if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) { 533 while ((reg_val & 534 LIO_CN23XX_PKT_INPUT_CTL_RST) && 535 !(reg_val & 536 LIO_CN23XX_PKT_INPUT_CTL_QUIET) && 537 loop) { 538 reg_val = lio_read_csr64(oct, 539 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 540 loop--; 541 } 542 if (!loop) { 543 lio_dev_err(oct, "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", 544 q_no); 545 return (-1); 546 } 547 reg_val = reg_val & 548 ~LIO_CN23XX_PKT_INPUT_CTL_RST; 549 lio_write_csr64(oct, 550 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), 551 reg_val); 552 553 reg_val = lio_read_csr64(oct, 554 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 555 if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) { 556 lio_dev_err(oct, "clearing the reset failed for qno: %u\n", 557 q_no); 558 return (-1); 559 } 560 } 561 reg_val = lio_read_csr64(oct, 562 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 563 reg_val = reg_val | LIO_CN23XX_PKT_INPUT_CTL_RING_ENB; 564 lio_write_csr64(oct, 565 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), 566 reg_val); 567 } 568 } 569 for (q_no = srn; q_no < ern; q_no++) { 570 uint32_t reg_val; 571 /* set the corresponding OQ ENB bit */ 572 if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) { 573 reg_val = lio_read_csr32(oct, 574 LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no)); 575 reg_val = reg_val | LIO_CN23XX_PKT_OUTPUT_CTL_RING_ENB; 576 lio_write_csr32(oct, 577 LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no), 578 reg_val); 579 } 580 } 581 return (0); 582 } 583 584 static void 585 lio_cn23xx_pf_disable_io_queues(struct octeon_device *oct) 586 { 587 volatile uint64_t d64; 588 volatile uint32_t d32; 589 int loop; 590 unsigned int q_no; 591 uint32_t ern, srn; 592 593 srn = oct->sriov_info.pf_srn; 594 ern = srn + oct->num_iqs; 595 596 /* Disable Input Queues. */ 597 for (q_no = srn; q_no < ern; q_no++) { 598 loop = lio_ms_to_ticks(1000); 599 600 /* start the Reset for a particular ring */ 601 d64 = lio_read_csr64(oct, 602 LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 603 d64 &= ~LIO_CN23XX_PKT_INPUT_CTL_RING_ENB; 604 d64 |= LIO_CN23XX_PKT_INPUT_CTL_RST; 605 lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), 606 d64); 607 608 /* 609 * Wait until hardware indicates that the particular IQ 610 * is out of reset. 611 */ 612 d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_IOQ_RING_RST); 613 while (!(d64 & BIT_ULL(q_no)) && loop--) { 614 d64 = lio_read_csr64(oct, 615 LIO_CN23XX_SLI_PKT_IOQ_RING_RST); 616 lio_sleep_timeout(1); 617 loop--; 618 } 619 620 /* Reset the doorbell register for this Input Queue. */ 621 lio_write_csr32(oct, LIO_CN23XX_SLI_IQ_DOORBELL(q_no), 622 0xFFFFFFFF); 623 while (((lio_read_csr64(oct, 624 LIO_CN23XX_SLI_IQ_DOORBELL(q_no))) != 625 0ULL) && loop--) { 626 lio_sleep_timeout(1); 627 } 628 } 629 630 /* Disable Output Queues. */ 631 for (q_no = srn; q_no < ern; q_no++) { 632 loop = lio_ms_to_ticks(1000); 633 634 /* 635 * Wait until hardware indicates that the particular IQ 636 * is out of reset.It given that SLI_PKT_RING_RST is 637 * common for both IQs and OQs 638 */ 639 d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_PKT_IOQ_RING_RST); 640 while (!(d64 & BIT_ULL(q_no)) && loop--) { 641 d64 = lio_read_csr64(oct, 642 LIO_CN23XX_SLI_PKT_IOQ_RING_RST); 643 lio_sleep_timeout(1); 644 loop--; 645 } 646 647 /* Reset the doorbell register for this Output Queue. */ 648 lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_CREDIT(q_no), 649 0xFFFFFFFF); 650 while ((lio_read_csr64(oct, 651 LIO_CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) != 652 0ULL) && loop--) { 653 lio_sleep_timeout(1); 654 } 655 656 /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */ 657 d32 = lio_read_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_SENT(q_no)); 658 lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKTS_SENT(q_no), d32); 659 } 660 } 661 662 static uint64_t 663 lio_cn23xx_pf_msix_interrupt_handler(void *dev) 664 { 665 struct lio_ioq_vector *ioq_vector = (struct lio_ioq_vector *)dev; 666 struct octeon_device *oct = ioq_vector->oct_dev; 667 struct lio_droq *droq = oct->droq[ioq_vector->droq_index]; 668 uint64_t pkts_sent; 669 uint64_t ret = 0; 670 671 if (droq == NULL) { 672 lio_dev_err(oct, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n", 673 oct->pf_num, ioq_vector->ioq_num); 674 return (0); 675 } 676 pkts_sent = lio_read_csr64(oct, droq->pkts_sent_reg); 677 678 /* 679 * If our device has interrupted, then proceed. Also check 680 * for all f's if interrupt was triggered on an error 681 * and the PCI read fails. 682 */ 683 if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL)) 684 return (ret); 685 686 /* Write count reg in sli_pkt_cnts to clear these int. */ 687 if (pkts_sent & LIO_CN23XX_INTR_PO_INT) 688 ret |= LIO_MSIX_PO_INT; 689 690 if (pkts_sent & LIO_CN23XX_INTR_PI_INT) 691 /* We will clear the count when we update the read_index. */ 692 ret |= LIO_MSIX_PI_INT; 693 694 /* 695 * Never need to handle msix mbox intr for pf. They arrive on the last 696 * msix 697 */ 698 return (ret); 699 } 700 701 static void 702 lio_cn23xx_pf_interrupt_handler(void *dev) 703 { 704 struct octeon_device *oct = (struct octeon_device *)dev; 705 struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; 706 uint64_t intr64; 707 708 lio_dev_dbg(oct, "In %s octeon_dev @ %p\n", __func__, oct); 709 intr64 = lio_read_csr64(oct, cn23xx->intr_sum_reg64); 710 711 oct->int_status = 0; 712 713 if (intr64 & LIO_CN23XX_INTR_ERR) 714 lio_dev_err(oct, "Error Intr: 0x%016llx\n", 715 LIO_CAST64(intr64)); 716 717 if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) { 718 if (intr64 & LIO_CN23XX_INTR_PKT_DATA) 719 oct->int_status |= LIO_DEV_INTR_PKT_DATA; 720 } 721 722 if (intr64 & (LIO_CN23XX_INTR_DMA0_FORCE)) 723 oct->int_status |= LIO_DEV_INTR_DMA0_FORCE; 724 725 if (intr64 & (LIO_CN23XX_INTR_DMA1_FORCE)) 726 oct->int_status |= LIO_DEV_INTR_DMA1_FORCE; 727 728 /* Clear the current interrupts */ 729 lio_write_csr64(oct, cn23xx->intr_sum_reg64, intr64); 730 } 731 732 static void 733 lio_cn23xx_pf_bar1_idx_setup(struct octeon_device *oct, uint64_t core_addr, 734 uint32_t idx, int valid) 735 { 736 volatile uint64_t bar1; 737 uint64_t reg_adr; 738 739 if (!valid) { 740 reg_adr = lio_pci_readq(oct, 741 LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, 742 idx)); 743 bar1 = reg_adr; 744 lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL), 745 LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, 746 idx)); 747 reg_adr = lio_pci_readq(oct, 748 LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, 749 idx)); 750 bar1 = reg_adr; 751 return; 752 } 753 /* 754 * The PEM(0..3)_BAR1_INDEX(0..15)[ADDR_IDX]<23:4> stores 755 * bits <41:22> of the Core Addr 756 */ 757 lio_pci_writeq(oct, (((core_addr >> 22) << 4) | LIO_PCI_BAR1_MASK), 758 LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); 759 760 bar1 = lio_pci_readq(oct, LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, 761 idx)); 762 } 763 764 static void 765 lio_cn23xx_pf_bar1_idx_write(struct octeon_device *oct, uint32_t idx, 766 uint32_t mask) 767 { 768 769 lio_pci_writeq(oct, mask, 770 LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); 771 } 772 773 static uint32_t 774 lio_cn23xx_pf_bar1_idx_read(struct octeon_device *oct, uint32_t idx) 775 { 776 777 return ((uint32_t)lio_pci_readq(oct, 778 LIO_CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, 779 idx))); 780 } 781 782 /* always call with lock held */ 783 static uint32_t 784 lio_cn23xx_pf_update_read_index(struct lio_instr_queue *iq) 785 { 786 struct octeon_device *oct = iq->oct_dev; 787 uint32_t new_idx; 788 uint32_t last_done; 789 uint32_t pkt_in_done = lio_read_csr32(oct, iq->inst_cnt_reg); 790 791 last_done = pkt_in_done - iq->pkt_in_done; 792 iq->pkt_in_done = pkt_in_done; 793 794 /* 795 * Modulo of the new index with the IQ size will give us 796 * the new index. The iq->reset_instr_cnt is always zero for 797 * cn23xx, so no extra adjustments are needed. 798 */ 799 new_idx = (iq->octeon_read_index + 800 ((uint32_t)(last_done & LIO_CN23XX_PKT_IN_DONE_CNT_MASK))) % 801 iq->max_count; 802 803 return (new_idx); 804 } 805 806 static void 807 lio_cn23xx_pf_enable_interrupt(struct octeon_device *oct, uint8_t intr_flag) 808 { 809 struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; 810 uint64_t intr_val = 0; 811 812 /* Divide the single write to multiple writes based on the flag. */ 813 /* Enable Interrupt */ 814 if (intr_flag == OCTEON_ALL_INTR) { 815 lio_write_csr64(oct, cn23xx->intr_enb_reg64, 816 cn23xx->intr_mask64); 817 } else if (intr_flag & OCTEON_OUTPUT_INTR) { 818 intr_val = lio_read_csr64(oct, cn23xx->intr_enb_reg64); 819 intr_val |= LIO_CN23XX_INTR_PKT_DATA; 820 lio_write_csr64(oct, cn23xx->intr_enb_reg64, intr_val); 821 } 822 } 823 824 static void 825 lio_cn23xx_pf_disable_interrupt(struct octeon_device *oct, uint8_t intr_flag) 826 { 827 struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; 828 uint64_t intr_val = 0; 829 830 /* Disable Interrupts */ 831 if (intr_flag == OCTEON_ALL_INTR) { 832 lio_write_csr64(oct, cn23xx->intr_enb_reg64, 0); 833 } else if (intr_flag & OCTEON_OUTPUT_INTR) { 834 intr_val = lio_read_csr64(oct, cn23xx->intr_enb_reg64); 835 intr_val &= ~LIO_CN23XX_INTR_PKT_DATA; 836 lio_write_csr64(oct, cn23xx->intr_enb_reg64, intr_val); 837 } 838 } 839 840 static void 841 lio_cn23xx_pf_get_pcie_qlmport(struct octeon_device *oct) 842 { 843 oct->pcie_port = (lio_read_csr32(oct, 844 LIO_CN23XX_SLI_MAC_NUMBER)) & 0xff; 845 846 lio_dev_dbg(oct, "CN23xx uses PCIE Port %d\n", 847 oct->pcie_port); 848 } 849 850 static void 851 lio_cn23xx_pf_get_pf_num(struct octeon_device *oct) 852 { 853 uint32_t fdl_bit; 854 855 /* Read Function Dependency Link reg to get the function number */ 856 fdl_bit = lio_read_pci_cfg(oct, LIO_CN23XX_PCIE_SRIOV_FDL); 857 oct->pf_num = ((fdl_bit >> LIO_CN23XX_PCIE_SRIOV_FDL_BIT_POS) & 858 LIO_CN23XX_PCIE_SRIOV_FDL_MASK); 859 } 860 861 static void 862 lio_cn23xx_pf_setup_reg_address(struct octeon_device *oct) 863 { 864 struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; 865 866 oct->reg_list.pci_win_wr_addr = LIO_CN23XX_SLI_WIN_WR_ADDR64; 867 868 oct->reg_list.pci_win_rd_addr_hi = LIO_CN23XX_SLI_WIN_RD_ADDR_HI; 869 oct->reg_list.pci_win_rd_addr_lo = LIO_CN23XX_SLI_WIN_RD_ADDR64; 870 oct->reg_list.pci_win_rd_addr = LIO_CN23XX_SLI_WIN_RD_ADDR64; 871 872 oct->reg_list.pci_win_wr_data_hi = LIO_CN23XX_SLI_WIN_WR_DATA_HI; 873 oct->reg_list.pci_win_wr_data_lo = LIO_CN23XX_SLI_WIN_WR_DATA_LO; 874 oct->reg_list.pci_win_wr_data = LIO_CN23XX_SLI_WIN_WR_DATA64; 875 876 oct->reg_list.pci_win_rd_data = LIO_CN23XX_SLI_WIN_RD_DATA64; 877 878 lio_cn23xx_pf_get_pcie_qlmport(oct); 879 880 cn23xx->intr_mask64 = LIO_CN23XX_INTR_MASK; 881 if (!oct->msix_on) 882 cn23xx->intr_mask64 |= LIO_CN23XX_INTR_PKT_TIME; 883 884 cn23xx->intr_sum_reg64 = 885 LIO_CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); 886 cn23xx->intr_enb_reg64 = 887 LIO_CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); 888 } 889 890 static int 891 lio_cn23xx_pf_sriov_config(struct octeon_device *oct) 892 { 893 struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; 894 uint32_t num_pf_rings, total_rings, max_rings; 895 cn23xx->conf = (struct lio_config *)lio_get_config_info(oct, LIO_23XX); 896 897 max_rings = LIO_CN23XX_PF_MAX_RINGS; 898 899 if (oct->sriov_info.num_pf_rings) { 900 num_pf_rings = oct->sriov_info.num_pf_rings; 901 if (num_pf_rings > max_rings) { 902 num_pf_rings = min(mp_ncpus, max_rings); 903 lio_dev_warn(oct, "num_queues_per_pf requested %u is more than available rings (%u). Reducing to %u\n", 904 oct->sriov_info.num_pf_rings, 905 max_rings, num_pf_rings); 906 } 907 } else { 908 #ifdef RSS 909 num_pf_rings = min(rss_getnumbuckets(), mp_ncpus); 910 #else 911 num_pf_rings = min(mp_ncpus, max_rings); 912 #endif 913 914 } 915 916 total_rings = num_pf_rings; 917 oct->sriov_info.trs = total_rings; 918 oct->sriov_info.pf_srn = total_rings - num_pf_rings; 919 oct->sriov_info.num_pf_rings = num_pf_rings; 920 921 lio_dev_dbg(oct, "trs:%d pf_srn:%d num_pf_rings:%d\n", 922 oct->sriov_info.trs, oct->sriov_info.pf_srn, 923 oct->sriov_info.num_pf_rings); 924 925 return (0); 926 } 927 928 int 929 lio_cn23xx_pf_setup_device(struct octeon_device *oct) 930 { 931 uint64_t BAR0, BAR1; 932 uint32_t data32; 933 934 data32 = lio_read_pci_cfg(oct, 0x10); 935 BAR0 = (uint64_t)(data32 & ~0xf); 936 data32 = lio_read_pci_cfg(oct, 0x14); 937 BAR0 |= ((uint64_t)data32 << 32); 938 data32 = lio_read_pci_cfg(oct, 0x18); 939 BAR1 = (uint64_t)(data32 & ~0xf); 940 data32 = lio_read_pci_cfg(oct, 0x1c); 941 BAR1 |= ((uint64_t)data32 << 32); 942 943 if (!BAR0 || !BAR1) { 944 if (!BAR0) 945 lio_dev_err(oct, "Device BAR0 unassigned\n"); 946 947 if (!BAR1) 948 lio_dev_err(oct, "Device BAR1 unassigned\n"); 949 950 return (1); 951 } 952 953 if (lio_map_pci_barx(oct, 0)) 954 return (1); 955 956 if (lio_map_pci_barx(oct, 1)) { 957 lio_dev_err(oct, "%s CN23XX BAR1 map failed\n", __func__); 958 lio_unmap_pci_barx(oct, 0); 959 return (1); 960 } 961 962 lio_cn23xx_pf_get_pf_num(oct); 963 964 if (lio_cn23xx_pf_sriov_config(oct)) { 965 lio_unmap_pci_barx(oct, 0); 966 lio_unmap_pci_barx(oct, 1); 967 return (1); 968 } 969 lio_write_csr64(oct, LIO_CN23XX_SLI_MAC_CREDIT_CNT, 970 0x3F802080802080ULL); 971 972 oct->fn_list.setup_iq_regs = lio_cn23xx_pf_setup_iq_regs; 973 oct->fn_list.setup_oq_regs = lio_cn23xx_pf_setup_oq_regs; 974 oct->fn_list.process_interrupt_regs = lio_cn23xx_pf_interrupt_handler; 975 oct->fn_list.msix_interrupt_handler = 976 lio_cn23xx_pf_msix_interrupt_handler; 977 978 oct->fn_list.soft_reset = lio_cn23xx_pf_soft_reset; 979 oct->fn_list.setup_device_regs = lio_cn23xx_pf_setup_device_regs; 980 oct->fn_list.update_iq_read_idx = lio_cn23xx_pf_update_read_index; 981 982 oct->fn_list.bar1_idx_setup = lio_cn23xx_pf_bar1_idx_setup; 983 oct->fn_list.bar1_idx_write = lio_cn23xx_pf_bar1_idx_write; 984 oct->fn_list.bar1_idx_read = lio_cn23xx_pf_bar1_idx_read; 985 986 oct->fn_list.enable_interrupt = lio_cn23xx_pf_enable_interrupt; 987 oct->fn_list.disable_interrupt = lio_cn23xx_pf_disable_interrupt; 988 989 oct->fn_list.enable_io_queues = lio_cn23xx_pf_enable_io_queues; 990 oct->fn_list.disable_io_queues = lio_cn23xx_pf_disable_io_queues; 991 992 lio_cn23xx_pf_setup_reg_address(oct); 993 994 oct->coproc_clock_rate = 1000000ULL * 995 lio_cn23xx_pf_coprocessor_clock(oct); 996 997 return (0); 998 } 999 1000 int 1001 lio_cn23xx_pf_fw_loaded(struct octeon_device *oct) 1002 { 1003 uint64_t val; 1004 1005 val = lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2); 1006 return ((val >> SCR2_BIT_FW_LOADED) & 1ULL); 1007 } 1008 1009