1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (C) 2009 - 2019 Broadcom */ 3 4 #include <linux/bitfield.h> 5 #include <linux/bitops.h> 6 #include <linux/clk.h> 7 #include <linux/compiler.h> 8 #include <linux/delay.h> 9 #include <linux/init.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/iopoll.h> 13 #include <linux/ioport.h> 14 #include <linux/irqchip/chained_irq.h> 15 #include <linux/irqchip/irq-msi-lib.h> 16 #include <linux/irqdomain.h> 17 #include <linux/kernel.h> 18 #include <linux/list.h> 19 #include <linux/log2.h> 20 #include <linux/module.h> 21 #include <linux/msi.h> 22 #include <linux/of_address.h> 23 #include <linux/of_irq.h> 24 #include <linux/of_pci.h> 25 #include <linux/of_platform.h> 26 #include <linux/pci.h> 27 #include <linux/pci-ecam.h> 28 #include <linux/printk.h> 29 #include <linux/regulator/consumer.h> 30 #include <linux/reset.h> 31 #include <linux/sizes.h> 32 #include <linux/slab.h> 33 #include <linux/string.h> 34 #include <linux/types.h> 35 36 #include "../pci.h" 37 38 /* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */ 39 #define BRCM_PCIE_CAP_REGS 0x00ac 40 41 /* Broadcom STB PCIe Register Offsets */ 42 #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188 43 #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc 44 #define PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN 0x0 45 46 #define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c 47 #define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff 48 49 #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc 50 #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK 0x1f0 51 #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00 52 53 #define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8 54 #define PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK 0xf8 55 56 #define PCIE_RC_DL_MDIO_ADDR 0x1100 57 #define PCIE_RC_DL_MDIO_WR_DATA 0x1104 58 #define PCIE_RC_DL_MDIO_RD_DATA 0x1108 59 60 #define PCIE_RC_PL_REG_PHY_CTL_1 0x1804 61 #define PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK 0x8 62 63 #define PCIE_RC_PL_PHY_CTL_15 0x184c 64 #define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000 65 #define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff 66 67 #define PCIE_MISC_MISC_CTRL 0x4008 68 #define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80 69 #define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400 70 #define PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK 0x1000 71 #define PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK 0x2000 72 #define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK 0x300000 73 74 #define PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK 0xf8000000 75 #define PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK 0x07c00000 76 #define PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK 0x0000001f 77 #define SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK 78 79 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c 80 #define PCIE_MEM_WIN0_LO(win) \ 81 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8) 82 83 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI 0x4010 84 #define PCIE_MEM_WIN0_HI(win) \ 85 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8) 86 87 /* 88 * NOTE: You may see the term "BAR" in a number of register names used by 89 * this driver. The term is an artifact of when the HW core was an 90 * endpoint device (EP). Now it is a root complex (RC) and anywhere a 91 * register has the term "BAR" it is related to an inbound window. 92 */ 93 94 #define PCIE_BRCM_MAX_INBOUND_WINS 16 95 #define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c 96 #define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f 97 98 #define PCIE_MISC_RC_BAR4_CONFIG_LO 0x40d4 99 100 101 #define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044 102 #define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048 103 104 #define PCIE_MISC_MSI_DATA_CONFIG 0x404c 105 #define PCIE_MISC_MSI_DATA_CONFIG_VAL_32 0xffe06540 106 #define PCIE_MISC_MSI_DATA_CONFIG_VAL_8 0xfff86540 107 108 #define PCIE_MISC_PCIE_CTRL 0x4064 109 #define PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK 0x1 110 #define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK 0x4 111 112 #define PCIE_MISC_PCIE_STATUS 0x4068 113 #define PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK 0x80 114 #define PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK 0x20 115 #define PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK 0x10 116 #define PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK 0x40 117 118 #define PCIE_MISC_REVISION 0x406c 119 #define BRCM_PCIE_HW_REV_33 0x0303 120 #define BRCM_PCIE_HW_REV_3_20 0x0320 121 122 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT 0x4070 123 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK 0xfff00000 124 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK 0xfff0 125 #define PCIE_MEM_WIN0_BASE_LIMIT(win) \ 126 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4) 127 128 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI 0x4080 129 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK 0xff 130 #define PCIE_MEM_WIN0_BASE_HI(win) \ 131 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8) 132 133 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI 0x4084 134 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK 0xff 135 #define PCIE_MEM_WIN0_LIMIT_HI(win) \ 136 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8) 137 138 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2 139 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000 140 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000 141 #define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000 142 #define PCIE_CLKREQ_MASK \ 143 (PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \ 144 PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK) 145 146 #define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP 0x40ac 147 #define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK BIT(0) 148 #define PCIE_MISC_UBUS_BAR4_CONFIG_REMAP 0x410c 149 150 #define PCIE_MSI_INTR2_BASE 0x4500 151 152 /* Offsets from INTR2_CPU and MSI_INTR2 BASE offsets */ 153 #define MSI_INT_STATUS 0x0 154 #define MSI_INT_CLR 0x8 155 #define MSI_INT_MASK_SET 0x10 156 #define MSI_INT_MASK_CLR 0x14 157 158 #define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1 159 #define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0 160 161 #define RGR1_SW_INIT_1_INIT_GENERIC_MASK 0x2 162 #define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT 0x1 163 #define RGR1_SW_INIT_1_INIT_7278_MASK 0x1 164 #define RGR1_SW_INIT_1_INIT_7278_SHIFT 0x0 165 166 /* PCIe parameters */ 167 #define BRCM_NUM_PCIE_OUT_WINS 0x4 168 #define BRCM_INT_PCI_MSI_NR 32 169 #define BRCM_INT_PCI_MSI_LEGACY_NR 8 170 #define BRCM_INT_PCI_MSI_SHIFT 0 171 #define BRCM_INT_PCI_MSI_MASK GENMASK(BRCM_INT_PCI_MSI_NR - 1, 0) 172 #define BRCM_INT_PCI_MSI_LEGACY_MASK GENMASK(31, \ 173 32 - BRCM_INT_PCI_MSI_LEGACY_NR) 174 175 /* MSI target addresses */ 176 #define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL 177 #define BRCM_MSI_TARGET_ADDR_GT_4GB 0xffffffffcULL 178 179 /* MDIO registers */ 180 #define MDIO_PORT0 0x0 181 #define MDIO_DATA_MASK 0x7fffffff 182 #define MDIO_PORT_MASK 0xf0000 183 #define MDIO_PORT_EXT_MASK 0x200000 184 #define MDIO_REGAD_MASK 0xffff 185 #define MDIO_CMD_MASK 0x00100000 186 #define MDIO_CMD_READ 0x1 187 #define MDIO_CMD_WRITE 0x0 188 #define MDIO_DATA_DONE_MASK 0x80000000 189 #define MDIO_RD_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 1 : 0) 190 #define MDIO_WT_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 0 : 1) 191 #define SSC_REGS_ADDR 0x1100 192 #define SET_ADDR_OFFSET 0x1f 193 #define SSC_CNTL_OFFSET 0x2 194 #define SSC_CNTL_OVRD_EN_MASK 0x8000 195 #define SSC_CNTL_OVRD_VAL_MASK 0x4000 196 #define SSC_STATUS_OFFSET 0x1 197 #define SSC_STATUS_SSC_MASK 0x400 198 #define SSC_STATUS_PLL_LOCK_MASK 0x800 199 #define PCIE_BRCM_MAX_MEMC 3 200 201 #define IDX_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_INDEX]) 202 #define DATA_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_DATA]) 203 #define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->cfg->offsets[RGR1_SW_INIT_1]) 204 #define HARD_DEBUG(pcie) ((pcie)->cfg->offsets[PCIE_HARD_DEBUG]) 205 #define INTR2_CPU_BASE(pcie) ((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE]) 206 207 /* Rescal registers */ 208 #define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700 209 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS 0x3 210 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK 0x4 211 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT 0x2 212 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK 0x2 213 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT 0x1 214 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK 0x1 215 #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT 0x0 216 217 /* Forward declarations */ 218 struct brcm_pcie; 219 220 enum { 221 RGR1_SW_INIT_1, 222 EXT_CFG_INDEX, 223 EXT_CFG_DATA, 224 PCIE_HARD_DEBUG, 225 PCIE_INTR2_CPU_BASE, 226 }; 227 228 enum pcie_soc_base { 229 GENERIC, 230 BCM2711, 231 BCM4908, 232 BCM7278, 233 BCM7425, 234 BCM7435, 235 BCM7712, 236 }; 237 238 struct inbound_win { 239 u64 size; 240 u64 pci_offset; 241 u64 cpu_addr; 242 }; 243 244 /* 245 * The RESCAL block is tied to PCIe controller #1, regardless of the number of 246 * controllers, and turning off PCIe controller #1 prevents access to the RESCAL 247 * register blocks, therefore no other controller can access this register 248 * space, and depending upon the bus fabric we may get a timeout (UBUS/GISB), 249 * or a hang (AXI). 250 */ 251 #define CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN BIT(0) 252 253 struct pcie_cfg_data { 254 const int *offsets; 255 const enum pcie_soc_base soc_base; 256 const bool has_phy; 257 const u32 quirks; 258 u8 num_inbound_wins; 259 int (*perst_set)(struct brcm_pcie *pcie, u32 val); 260 int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); 261 int (*post_setup)(struct brcm_pcie *pcie); 262 }; 263 264 struct subdev_regulators { 265 unsigned int num_supplies; 266 struct regulator_bulk_data supplies[]; 267 }; 268 269 struct brcm_msi { 270 struct device *dev; 271 void __iomem *base; 272 struct device_node *np; 273 struct irq_domain *inner_domain; 274 struct mutex lock; /* guards the alloc/free operations */ 275 u64 target_addr; 276 int irq; 277 DECLARE_BITMAP(used, BRCM_INT_PCI_MSI_NR); 278 bool legacy; 279 /* Some chips have MSIs in bits [31..24] of a shared register. */ 280 int legacy_shift; 281 int nr; /* No. of MSI available, depends on chip */ 282 /* This is the base pointer for interrupt status/set/clr regs */ 283 void __iomem *intr_base; 284 }; 285 286 /* Internal PCIe Host Controller Information.*/ 287 struct brcm_pcie { 288 struct device *dev; 289 void __iomem *base; 290 struct clk *clk; 291 struct device_node *np; 292 bool ssc; 293 int gen; 294 u64 msi_target_addr; 295 struct brcm_msi *msi; 296 struct reset_control *rescal; 297 struct reset_control *perst_reset; 298 struct reset_control *bridge_reset; 299 struct reset_control *swinit_reset; 300 int num_memc; 301 u64 memc_size[PCIE_BRCM_MAX_MEMC]; 302 u32 hw_rev; 303 struct subdev_regulators *sr; 304 bool ep_wakeup_capable; 305 const struct pcie_cfg_data *cfg; 306 }; 307 308 static inline bool is_bmips(const struct brcm_pcie *pcie) 309 { 310 return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425; 311 } 312 313 /* 314 * This is to convert the size of the inbound "BAR" region to the 315 * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE 316 */ 317 static int brcm_pcie_encode_ibar_size(u64 size) 318 { 319 int log2_in = ilog2(size); 320 321 if (log2_in >= 12 && log2_in <= 15) 322 /* Covers 4KB to 32KB (inclusive) */ 323 return (log2_in - 12) + 0x1c; 324 else if (log2_in >= 16 && log2_in <= 36) 325 /* Covers 64KB to 64GB, (inclusive) */ 326 return log2_in - 15; 327 /* Something is awry so disable */ 328 return 0; 329 } 330 331 static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd) 332 { 333 u32 pkt = 0; 334 335 pkt |= FIELD_PREP(MDIO_PORT_EXT_MASK, port >> 4); 336 pkt |= FIELD_PREP(MDIO_PORT_MASK, port); 337 pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad); 338 pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd); 339 340 return pkt; 341 } 342 343 /* negative return value indicates error */ 344 static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val) 345 { 346 u32 data; 347 int err; 348 349 writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ), 350 base + PCIE_RC_DL_MDIO_ADDR); 351 readl(base + PCIE_RC_DL_MDIO_ADDR); 352 err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_RD_DATA, data, 353 MDIO_RD_DONE(data), 10, 100); 354 *val = FIELD_GET(MDIO_DATA_MASK, data); 355 356 return err; 357 } 358 359 /* negative return value indicates error */ 360 static int brcm_pcie_mdio_write(void __iomem *base, u8 port, 361 u8 regad, u16 wrdata) 362 { 363 u32 data; 364 int err; 365 366 writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE), 367 base + PCIE_RC_DL_MDIO_ADDR); 368 readl(base + PCIE_RC_DL_MDIO_ADDR); 369 writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA); 370 371 err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data, 372 MDIO_WT_DONE(data), 10, 100); 373 return err; 374 } 375 376 /* 377 * Configures device for Spread Spectrum Clocking (SSC) mode; a negative 378 * return value indicates error. 379 */ 380 static int brcm_pcie_set_ssc(struct brcm_pcie *pcie) 381 { 382 int pll, ssc; 383 int ret; 384 u32 tmp; 385 386 ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 387 SSC_REGS_ADDR); 388 if (ret < 0) 389 return ret; 390 391 ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0, 392 SSC_CNTL_OFFSET, &tmp); 393 if (ret < 0) 394 return ret; 395 396 u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK); 397 u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK); 398 ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, 399 SSC_CNTL_OFFSET, tmp); 400 if (ret < 0) 401 return ret; 402 403 usleep_range(1000, 2000); 404 ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0, 405 SSC_STATUS_OFFSET, &tmp); 406 if (ret < 0) 407 return ret; 408 409 ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp); 410 pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp); 411 412 return ssc && pll ? 0 : -EIO; 413 } 414 415 /* Limits operation to a specific generation (1, 2, or 3) */ 416 static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen) 417 { 418 u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); 419 u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); 420 421 u32p_replace_bits(&lnkcap, gen, PCI_EXP_LNKCAP_SLS); 422 writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); 423 424 u16p_replace_bits(&lnkctl2, gen, PCI_EXP_LNKCTL2_TLS); 425 writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); 426 } 427 428 static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie, 429 u8 win, u64 cpu_addr, 430 u64 pcie_addr, u64 size) 431 { 432 u32 cpu_addr_mb_high, limit_addr_mb_high; 433 phys_addr_t cpu_addr_mb, limit_addr_mb; 434 int high_addr_shift; 435 u32 tmp; 436 437 /* Set the base of the pcie_addr window */ 438 writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win)); 439 writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win)); 440 441 /* Write the addr base & limit lower bits (in MBs) */ 442 cpu_addr_mb = cpu_addr / SZ_1M; 443 limit_addr_mb = (cpu_addr + size - 1) / SZ_1M; 444 445 tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win)); 446 u32p_replace_bits(&tmp, cpu_addr_mb, 447 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK); 448 u32p_replace_bits(&tmp, limit_addr_mb, 449 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK); 450 writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win)); 451 452 if (is_bmips(pcie)) 453 return; 454 455 /* Write the cpu & limit addr upper bits */ 456 high_addr_shift = 457 HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK); 458 459 cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift; 460 tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win)); 461 u32p_replace_bits(&tmp, cpu_addr_mb_high, 462 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK); 463 writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win)); 464 465 limit_addr_mb_high = limit_addr_mb >> high_addr_shift; 466 tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win)); 467 u32p_replace_bits(&tmp, limit_addr_mb_high, 468 PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK); 469 writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win)); 470 } 471 472 #define BRCM_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ 473 MSI_FLAG_USE_DEF_CHIP_OPS | \ 474 MSI_FLAG_NO_AFFINITY) 475 476 #define BRCM_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ 477 MSI_FLAG_MULTI_PCI_MSI) 478 479 static const struct msi_parent_ops brcm_msi_parent_ops = { 480 .required_flags = BRCM_MSI_FLAGS_REQUIRED, 481 .supported_flags = BRCM_MSI_FLAGS_SUPPORTED, 482 .bus_select_token = DOMAIN_BUS_PCI_MSI, 483 .chip_flags = MSI_CHIP_FLAG_SET_ACK, 484 .prefix = "BRCM-", 485 .init_dev_msi_info = msi_lib_init_dev_msi_info, 486 }; 487 488 static void brcm_pcie_msi_isr(struct irq_desc *desc) 489 { 490 struct irq_chip *chip = irq_desc_get_chip(desc); 491 unsigned long status; 492 struct brcm_msi *msi; 493 struct device *dev; 494 u32 bit; 495 496 chained_irq_enter(chip, desc); 497 msi = irq_desc_get_handler_data(desc); 498 dev = msi->dev; 499 500 status = readl(msi->intr_base + MSI_INT_STATUS); 501 status >>= msi->legacy_shift; 502 503 for_each_set_bit(bit, &status, msi->nr) { 504 int ret; 505 ret = generic_handle_domain_irq(msi->inner_domain, bit); 506 if (ret) 507 dev_dbg(dev, "unexpected MSI\n"); 508 } 509 510 chained_irq_exit(chip, desc); 511 } 512 513 static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 514 { 515 struct brcm_msi *msi = irq_data_get_irq_chip_data(data); 516 517 msg->address_lo = lower_32_bits(msi->target_addr); 518 msg->address_hi = upper_32_bits(msi->target_addr); 519 msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq; 520 } 521 522 static void brcm_msi_ack_irq(struct irq_data *data) 523 { 524 struct brcm_msi *msi = irq_data_get_irq_chip_data(data); 525 const int shift_amt = data->hwirq + msi->legacy_shift; 526 527 writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR); 528 } 529 530 531 static struct irq_chip brcm_msi_bottom_irq_chip = { 532 .name = "BRCM STB MSI", 533 .irq_compose_msi_msg = brcm_msi_compose_msi_msg, 534 .irq_ack = brcm_msi_ack_irq, 535 }; 536 537 static int brcm_msi_alloc(struct brcm_msi *msi, unsigned int nr_irqs) 538 { 539 int hwirq; 540 541 mutex_lock(&msi->lock); 542 hwirq = bitmap_find_free_region(msi->used, msi->nr, 543 order_base_2(nr_irqs)); 544 mutex_unlock(&msi->lock); 545 546 return hwirq; 547 } 548 549 static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq, 550 unsigned int nr_irqs) 551 { 552 mutex_lock(&msi->lock); 553 bitmap_release_region(msi->used, hwirq, order_base_2(nr_irqs)); 554 mutex_unlock(&msi->lock); 555 } 556 557 static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 558 unsigned int nr_irqs, void *args) 559 { 560 struct brcm_msi *msi = domain->host_data; 561 int hwirq, i; 562 563 hwirq = brcm_msi_alloc(msi, nr_irqs); 564 565 if (hwirq < 0) 566 return hwirq; 567 568 for (i = 0; i < nr_irqs; i++) 569 irq_domain_set_info(domain, virq + i, (irq_hw_number_t)hwirq + i, 570 &brcm_msi_bottom_irq_chip, domain->host_data, 571 handle_edge_irq, NULL, NULL); 572 return 0; 573 } 574 575 static void brcm_irq_domain_free(struct irq_domain *domain, 576 unsigned int virq, unsigned int nr_irqs) 577 { 578 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 579 struct brcm_msi *msi = irq_data_get_irq_chip_data(d); 580 581 brcm_msi_free(msi, d->hwirq, nr_irqs); 582 } 583 584 static const struct irq_domain_ops msi_domain_ops = { 585 .alloc = brcm_irq_domain_alloc, 586 .free = brcm_irq_domain_free, 587 }; 588 589 static int brcm_allocate_domains(struct brcm_msi *msi) 590 { 591 struct device *dev = msi->dev; 592 593 struct irq_domain_info info = { 594 .fwnode = of_fwnode_handle(msi->np), 595 .ops = &msi_domain_ops, 596 .host_data = msi, 597 .size = msi->nr, 598 }; 599 600 msi->inner_domain = msi_create_parent_irq_domain(&info, &brcm_msi_parent_ops); 601 if (!msi->inner_domain) { 602 dev_err(dev, "failed to create MSI domain\n"); 603 return -ENOMEM; 604 } 605 606 return 0; 607 } 608 609 static void brcm_free_domains(struct brcm_msi *msi) 610 { 611 irq_domain_remove(msi->inner_domain); 612 } 613 614 static void brcm_msi_remove(struct brcm_pcie *pcie) 615 { 616 struct brcm_msi *msi = pcie->msi; 617 618 if (!msi) 619 return; 620 irq_set_chained_handler_and_data(msi->irq, NULL, NULL); 621 brcm_free_domains(msi); 622 } 623 624 static void brcm_msi_set_regs(struct brcm_msi *msi) 625 { 626 u32 val = msi->legacy ? BRCM_INT_PCI_MSI_LEGACY_MASK : 627 BRCM_INT_PCI_MSI_MASK; 628 629 writel(val, msi->intr_base + MSI_INT_MASK_CLR); 630 writel(val, msi->intr_base + MSI_INT_CLR); 631 632 /* 633 * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI 634 * enable, which we set to 1. 635 */ 636 writel(lower_32_bits(msi->target_addr) | 0x1, 637 msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO); 638 writel(upper_32_bits(msi->target_addr), 639 msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI); 640 641 val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32; 642 writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG); 643 } 644 645 static int brcm_pcie_enable_msi(struct brcm_pcie *pcie) 646 { 647 struct brcm_msi *msi; 648 int irq, ret; 649 struct device *dev = pcie->dev; 650 651 irq = irq_of_parse_and_map(dev->of_node, 1); 652 if (irq <= 0) { 653 dev_err(dev, "cannot map MSI interrupt\n"); 654 return -ENODEV; 655 } 656 657 msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL); 658 if (!msi) 659 return -ENOMEM; 660 661 mutex_init(&msi->lock); 662 msi->dev = dev; 663 msi->base = pcie->base; 664 msi->np = pcie->np; 665 msi->target_addr = pcie->msi_target_addr; 666 msi->irq = irq; 667 msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33; 668 669 /* 670 * Sanity check to make sure that the 'used' bitmap in struct brcm_msi 671 * is large enough. 672 */ 673 BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR); 674 675 if (msi->legacy) { 676 msi->intr_base = msi->base + INTR2_CPU_BASE(pcie); 677 msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR; 678 msi->legacy_shift = 24; 679 } else { 680 msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE; 681 msi->nr = BRCM_INT_PCI_MSI_NR; 682 msi->legacy_shift = 0; 683 } 684 685 ret = brcm_allocate_domains(msi); 686 if (ret) 687 return ret; 688 689 irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi); 690 691 brcm_msi_set_regs(msi); 692 pcie->msi = msi; 693 694 return 0; 695 } 696 697 /* The controller is capable of serving in both RC and EP roles */ 698 static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie) 699 { 700 void __iomem *base = pcie->base; 701 u32 val = readl(base + PCIE_MISC_PCIE_STATUS); 702 703 return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val); 704 } 705 706 static bool brcm_pcie_link_up(struct brcm_pcie *pcie) 707 { 708 u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS); 709 u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val); 710 u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val); 711 712 return dla && plu; 713 } 714 715 static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus, 716 unsigned int devfn, int where) 717 { 718 struct brcm_pcie *pcie = bus->sysdata; 719 void __iomem *base = pcie->base; 720 int idx; 721 722 /* Accesses to the RC go right to the RC registers if !devfn */ 723 if (pci_is_root_bus(bus)) 724 return devfn ? NULL : base + PCIE_ECAM_REG(where); 725 726 /* An access to our HW w/o link-up will cause a CPU Abort */ 727 if (!brcm_pcie_link_up(pcie)) 728 return NULL; 729 730 /* For devices, write to the config space index register */ 731 idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0); 732 writel(idx, base + IDX_ADDR(pcie)); 733 return base + DATA_ADDR(pcie) + PCIE_ECAM_REG(where); 734 } 735 736 static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus, 737 unsigned int devfn, int where) 738 { 739 struct brcm_pcie *pcie = bus->sysdata; 740 void __iomem *base = pcie->base; 741 int idx; 742 743 /* Accesses to the RC go right to the RC registers if !devfn */ 744 if (pci_is_root_bus(bus)) 745 return devfn ? NULL : base + PCIE_ECAM_REG(where); 746 747 /* An access to our HW w/o link-up will cause a CPU Abort */ 748 if (!brcm_pcie_link_up(pcie)) 749 return NULL; 750 751 /* For devices, write to the config space index register */ 752 idx = PCIE_ECAM_OFFSET(bus->number, devfn, where); 753 writel(idx, base + IDX_ADDR(pcie)); 754 return base + DATA_ADDR(pcie); 755 } 756 757 static int brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val) 758 { 759 u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK; 760 u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT; 761 int ret = 0; 762 763 if (pcie->bridge_reset) { 764 if (val) 765 ret = reset_control_assert(pcie->bridge_reset); 766 else 767 ret = reset_control_deassert(pcie->bridge_reset); 768 769 if (ret) 770 dev_err(pcie->dev, "failed to %s 'bridge' reset, err=%d\n", 771 val ? "assert" : "deassert", ret); 772 773 return ret; 774 } 775 776 tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); 777 tmp = (tmp & ~mask) | ((val << shift) & mask); 778 writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); 779 780 return ret; 781 } 782 783 static int brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val) 784 { 785 u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK; 786 u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT; 787 788 tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); 789 tmp = (tmp & ~mask) | ((val << shift) & mask); 790 writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); 791 792 return 0; 793 } 794 795 static int brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val) 796 { 797 int ret; 798 799 if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n")) 800 return -EINVAL; 801 802 if (val) 803 ret = reset_control_assert(pcie->perst_reset); 804 else 805 ret = reset_control_deassert(pcie->perst_reset); 806 807 if (ret) 808 dev_err(pcie->dev, "failed to %s 'perst' reset, err=%d\n", 809 val ? "assert" : "deassert", ret); 810 return ret; 811 } 812 813 static int brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val) 814 { 815 u32 tmp; 816 817 /* Perst bit has moved and assert value is 0 */ 818 tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL); 819 u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK); 820 writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL); 821 822 return 0; 823 } 824 825 static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val) 826 { 827 u32 tmp; 828 829 tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); 830 u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK); 831 writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); 832 833 return 0; 834 } 835 836 static int brcm_pcie_post_setup_bcm2712(struct brcm_pcie *pcie) 837 { 838 static const u16 data[] = { 0x50b9, 0xbda1, 0x0094, 0x97b4, 0x5030, 839 0x5030, 0x0007 }; 840 static const u8 regs[] = { 0x16, 0x17, 0x18, 0x19, 0x1b, 0x1c, 0x1e }; 841 int ret, i; 842 u32 tmp; 843 844 /* Allow a 54MHz (xosc) refclk source */ 845 ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 0x1600); 846 if (ret < 0) 847 return ret; 848 849 for (i = 0; i < ARRAY_SIZE(regs); i++) { 850 ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, regs[i], data[i]); 851 if (ret < 0) 852 return ret; 853 } 854 855 usleep_range(100, 200); 856 857 /* 858 * Set L1SS sub-state timers to avoid lengthy state transitions, 859 * PM clock period is 18.52ns (1/54MHz, round down). 860 */ 861 tmp = readl(pcie->base + PCIE_RC_PL_PHY_CTL_15); 862 tmp &= ~PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK; 863 tmp |= 0x12; 864 writel(tmp, pcie->base + PCIE_RC_PL_PHY_CTL_15); 865 866 return 0; 867 } 868 869 static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size, 870 u64 cpu_addr, u64 pci_offset) 871 { 872 b->size = size; 873 b->cpu_addr = cpu_addr; 874 b->pci_offset = pci_offset; 875 (*count)++; 876 } 877 878 static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie, 879 struct inbound_win inbound_wins[]) 880 { 881 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 882 u64 pci_offset, cpu_addr, size = 0, tot_size = 0; 883 struct resource_entry *entry; 884 struct device *dev = pcie->dev; 885 u64 lowest_pcie_addr = ~(u64)0; 886 int ret, i = 0; 887 u8 n = 0; 888 889 /* 890 * The HW registers (and PCIe) use order-1 numbering for BARs. As such, 891 * we have inbound_wins[0] unused and BAR1 starts at inbound_wins[1]. 892 */ 893 struct inbound_win *b_begin = &inbound_wins[1]; 894 struct inbound_win *b = b_begin; 895 896 /* 897 * STB chips beside 7712 disable the first inbound window default. 898 * Rather being mapped to system memory it is mapped to the 899 * internal registers of the SoC. This feature is deprecated, has 900 * security considerations, and is not implemented in our modern 901 * SoCs. 902 */ 903 if (pcie->cfg->soc_base != BCM7712) 904 add_inbound_win(b++, &n, 0, 0, 0); 905 906 resource_list_for_each_entry(entry, &bridge->dma_ranges) { 907 u64 pcie_start = entry->res->start - entry->offset; 908 u64 cpu_start = entry->res->start; 909 910 size = resource_size(entry->res); 911 tot_size += size; 912 if (pcie_start < lowest_pcie_addr) 913 lowest_pcie_addr = pcie_start; 914 /* 915 * 7712 and newer chips may have many BARs, with each 916 * offering a non-overlapping viewport to system memory. 917 * That being said, each BARs size must still be a power of 918 * two. 919 */ 920 if (pcie->cfg->soc_base == BCM7712) 921 add_inbound_win(b++, &n, size, cpu_start, pcie_start); 922 923 if (n > pcie->cfg->num_inbound_wins) 924 break; 925 } 926 927 if (lowest_pcie_addr == ~(u64)0) { 928 dev_err(dev, "DT node has no dma-ranges\n"); 929 return -EINVAL; 930 } 931 932 /* 933 * 7712 and newer chips do not have an internal memory mapping system 934 * that enables multiple memory controllers. As such, it can return 935 * now w/o doing special configuration. 936 */ 937 if (pcie->cfg->soc_base == BCM7712) 938 return n; 939 940 ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1, 941 PCIE_BRCM_MAX_MEMC); 942 if (ret <= 0) { 943 /* Make an educated guess */ 944 pcie->num_memc = 1; 945 pcie->memc_size[0] = 1ULL << fls64(tot_size - 1); 946 } else { 947 pcie->num_memc = ret; 948 } 949 950 /* Each memc is viewed through a "port" that is a power of 2 */ 951 for (i = 0, size = 0; i < pcie->num_memc; i++) 952 size += pcie->memc_size[i]; 953 954 /* Our HW mandates that the window size must be a power of 2 */ 955 size = 1ULL << fls64(size - 1); 956 957 /* 958 * For STB chips, the BAR2 cpu_addr is hardwired to the start 959 * of system memory, so we set it to 0. 960 */ 961 cpu_addr = 0; 962 pci_offset = lowest_pcie_addr; 963 964 /* 965 * We validate the inbound memory view even though we should trust 966 * whatever the device-tree provides. This is because of an HW issue on 967 * early Raspberry Pi 4's revisions (bcm2711). It turns out its 968 * firmware has to dynamically edit dma-ranges due to a bug on the 969 * PCIe controller integration, which prohibits any access above the 970 * lower 3GB of memory. Given this, we decided to keep the dma-ranges 971 * in check, avoiding hard to debug device-tree related issues in the 972 * future: 973 * 974 * The PCIe host controller by design must set the inbound viewport to 975 * be a contiguous arrangement of all of the system's memory. In 976 * addition, its size must be a power of two. To further complicate 977 * matters, the viewport must start on a pcie-address that is aligned 978 * on a multiple of its size. If a portion of the viewport does not 979 * represent system memory -- e.g. 3GB of memory requires a 4GB 980 * viewport -- we can map the outbound memory in or after 3GB and even 981 * though the viewport will overlap the outbound memory the controller 982 * will know to send outbound memory downstream and everything else 983 * upstream. 984 * 985 * For example: 986 * 987 * - The best-case scenario, memory up to 3GB, is to place the inbound 988 * region in the first 4GB of pcie-space, as some legacy devices can 989 * only address 32bits. We would also like to put the MSI under 4GB 990 * as well, since some devices require a 32bit MSI target address. 991 * 992 * - If the system memory is 4GB or larger we cannot start the inbound 993 * region at location 0 (since we have to allow some space for 994 * outbound memory @ 3GB). So instead it will start at the 1x 995 * multiple of its size 996 */ 997 if (!size || (pci_offset & (size - 1)) || 998 (pci_offset < SZ_4G && pci_offset > SZ_2G)) { 999 dev_err(dev, "Invalid inbound_win2_offset/size: size 0x%llx, off 0x%llx\n", 1000 size, pci_offset); 1001 return -EINVAL; 1002 } 1003 1004 /* Enable inbound window 2, the main inbound window for STB chips */ 1005 add_inbound_win(b++, &n, size, cpu_addr, pci_offset); 1006 1007 /* 1008 * Disable inbound window 3. On some chips presents the same 1009 * window as #2 but the data appears in a settable endianness. 1010 */ 1011 add_inbound_win(b++, &n, 0, 0, 0); 1012 1013 return n; 1014 } 1015 1016 static u32 brcm_bar_reg_offset(int bar) 1017 { 1018 if (bar <= 3) 1019 return PCIE_MISC_RC_BAR1_CONFIG_LO + 8 * (bar - 1); 1020 else 1021 return PCIE_MISC_RC_BAR4_CONFIG_LO + 8 * (bar - 4); 1022 } 1023 1024 static u32 brcm_ubus_reg_offset(int bar) 1025 { 1026 if (bar <= 3) 1027 return PCIE_MISC_UBUS_BAR1_CONFIG_REMAP + 8 * (bar - 1); 1028 else 1029 return PCIE_MISC_UBUS_BAR4_CONFIG_REMAP + 8 * (bar - 4); 1030 } 1031 1032 static void set_inbound_win_registers(struct brcm_pcie *pcie, 1033 const struct inbound_win *inbound_wins, 1034 u8 num_inbound_wins) 1035 { 1036 void __iomem *base = pcie->base; 1037 int i; 1038 1039 for (i = 1; i <= num_inbound_wins; i++) { 1040 u64 pci_offset = inbound_wins[i].pci_offset; 1041 u64 cpu_addr = inbound_wins[i].cpu_addr; 1042 u64 size = inbound_wins[i].size; 1043 u32 reg_offset = brcm_bar_reg_offset(i); 1044 u32 tmp = lower_32_bits(pci_offset); 1045 1046 u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(size), 1047 PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK); 1048 1049 /* Write low */ 1050 writel_relaxed(tmp, base + reg_offset); 1051 /* Write high */ 1052 writel_relaxed(upper_32_bits(pci_offset), base + reg_offset + 4); 1053 1054 /* 1055 * Most STB chips: 1056 * Do nothing. 1057 * 7712: 1058 * All of their BARs need to be set. 1059 */ 1060 if (pcie->cfg->soc_base == BCM7712) { 1061 /* BUS remap register settings */ 1062 reg_offset = brcm_ubus_reg_offset(i); 1063 tmp = lower_32_bits(cpu_addr) & ~0xfff; 1064 tmp |= PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK; 1065 writel_relaxed(tmp, base + reg_offset); 1066 tmp = upper_32_bits(cpu_addr); 1067 writel_relaxed(tmp, base + reg_offset + 4); 1068 } 1069 } 1070 } 1071 1072 static int brcm_pcie_setup(struct brcm_pcie *pcie) 1073 { 1074 struct inbound_win inbound_wins[PCIE_BRCM_MAX_INBOUND_WINS]; 1075 void __iomem *base = pcie->base; 1076 struct pci_host_bridge *bridge; 1077 struct resource_entry *entry; 1078 u32 tmp, burst, aspm_support, num_lanes, num_lanes_cap; 1079 u8 num_out_wins = 0; 1080 int num_inbound_wins = 0; 1081 int memc, ret; 1082 1083 /* Reset the bridge */ 1084 ret = pcie->cfg->bridge_sw_init_set(pcie, 1); 1085 if (ret) 1086 return ret; 1087 1088 /* Ensure that PERST# is asserted; some bootloaders may deassert it. */ 1089 if (pcie->cfg->soc_base == BCM2711) { 1090 ret = pcie->cfg->perst_set(pcie, 1); 1091 if (ret) { 1092 pcie->cfg->bridge_sw_init_set(pcie, 0); 1093 return ret; 1094 } 1095 } 1096 1097 usleep_range(100, 200); 1098 1099 /* Take the bridge out of reset */ 1100 ret = pcie->cfg->bridge_sw_init_set(pcie, 0); 1101 if (ret) 1102 return ret; 1103 1104 tmp = readl(base + HARD_DEBUG(pcie)); 1105 if (is_bmips(pcie)) 1106 tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; 1107 else 1108 tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; 1109 writel(tmp, base + HARD_DEBUG(pcie)); 1110 /* Wait for SerDes to be stable */ 1111 usleep_range(100, 200); 1112 1113 /* 1114 * SCB_MAX_BURST_SIZE is a two bit field. For GENERIC chips it 1115 * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it 1116 * is encoded as 0=Rsvd, 1=128, 2=256, 3=512. 1117 */ 1118 if (is_bmips(pcie)) 1119 burst = 0x1; /* 256 bytes */ 1120 else if (pcie->cfg->soc_base == BCM2711) 1121 burst = 0x0; /* 128 bytes */ 1122 else if (pcie->cfg->soc_base == BCM7278) 1123 burst = 0x3; /* 512 bytes */ 1124 else 1125 burst = 0x2; /* 512 bytes */ 1126 1127 /* 1128 * Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN, 1129 * RCB_MPS_MODE, RCB_64B_MODE 1130 */ 1131 tmp = readl(base + PCIE_MISC_MISC_CTRL); 1132 u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK); 1133 u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK); 1134 u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK); 1135 u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK); 1136 u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK); 1137 writel(tmp, base + PCIE_MISC_MISC_CTRL); 1138 1139 num_inbound_wins = brcm_pcie_get_inbound_wins(pcie, inbound_wins); 1140 if (num_inbound_wins < 0) 1141 return num_inbound_wins; 1142 1143 set_inbound_win_registers(pcie, inbound_wins, num_inbound_wins); 1144 1145 if (!brcm_pcie_rc_mode(pcie)) { 1146 dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n"); 1147 return -EINVAL; 1148 } 1149 1150 tmp = readl(base + PCIE_MISC_MISC_CTRL); 1151 for (memc = 0; memc < pcie->num_memc; memc++) { 1152 u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15; 1153 1154 if (memc == 0) 1155 u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0)); 1156 else if (memc == 1) 1157 u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1)); 1158 else if (memc == 2) 1159 u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2)); 1160 } 1161 writel(tmp, base + PCIE_MISC_MISC_CTRL); 1162 1163 /* 1164 * We ideally want the MSI target address to be located in the 32bit 1165 * addressable memory area. Some devices might depend on it. This is 1166 * possible either when the inbound window is located above the lower 1167 * 4GB or when the inbound area is smaller than 4GB (taking into 1168 * account the rounding-up we're forced to perform). 1169 */ 1170 if (inbound_wins[2].pci_offset >= SZ_4G || 1171 (inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G) 1172 pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB; 1173 else 1174 pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB; 1175 1176 1177 /* Don't advertise L0s capability if 'aspm-no-l0s' */ 1178 aspm_support = PCIE_LINK_STATE_L1; 1179 if (!of_property_read_bool(pcie->np, "aspm-no-l0s")) 1180 aspm_support |= PCIE_LINK_STATE_L0S; 1181 tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); 1182 u32p_replace_bits(&tmp, aspm_support, 1183 PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); 1184 writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); 1185 1186 /* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */ 1187 num_lanes_cap = u32_get_bits(tmp, PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK); 1188 num_lanes = 0; 1189 1190 /* 1191 * Use hardware negotiated Max Link Width value by default. If the 1192 * "num-lanes" DT property is present, assume that the chip's default 1193 * link width capability information is incorrect/undesired and use the 1194 * specified value instead. 1195 */ 1196 if (!of_property_read_u32(pcie->np, "num-lanes", &num_lanes) && 1197 num_lanes && num_lanes <= 4 && num_lanes_cap != num_lanes) { 1198 u32p_replace_bits(&tmp, num_lanes, 1199 PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK); 1200 writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); 1201 tmp = readl(base + PCIE_RC_PL_REG_PHY_CTL_1); 1202 u32p_replace_bits(&tmp, 1, 1203 PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK); 1204 writel(tmp, base + PCIE_RC_PL_REG_PHY_CTL_1); 1205 } 1206 1207 /* 1208 * For config space accesses on the RC, show the right class for 1209 * a PCIe-PCIe bridge (the default setting is to be EP mode). 1210 */ 1211 tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3); 1212 u32p_replace_bits(&tmp, 0x060400, 1213 PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK); 1214 writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3); 1215 1216 bridge = pci_host_bridge_from_priv(pcie); 1217 resource_list_for_each_entry(entry, &bridge->windows) { 1218 struct resource *res = entry->res; 1219 1220 if (resource_type(res) != IORESOURCE_MEM) 1221 continue; 1222 1223 if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) { 1224 dev_err(pcie->dev, "too many outbound wins\n"); 1225 return -EINVAL; 1226 } 1227 1228 if (is_bmips(pcie)) { 1229 u64 start = res->start; 1230 unsigned int j, nwins = resource_size(res) / SZ_128M; 1231 1232 /* bmips PCIe outbound windows have a 128MB max size */ 1233 if (nwins > BRCM_NUM_PCIE_OUT_WINS) 1234 nwins = BRCM_NUM_PCIE_OUT_WINS; 1235 for (j = 0; j < nwins; j++, start += SZ_128M) 1236 brcm_pcie_set_outbound_win(pcie, j, start, 1237 start - entry->offset, 1238 SZ_128M); 1239 break; 1240 } 1241 brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start, 1242 res->start - entry->offset, 1243 resource_size(res)); 1244 num_out_wins++; 1245 } 1246 1247 /* PCIe->SCB endian mode for inbound window */ 1248 tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); 1249 u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN, 1250 PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK); 1251 writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); 1252 1253 if (pcie->cfg->post_setup) { 1254 ret = pcie->cfg->post_setup(pcie); 1255 if (ret < 0) 1256 return ret; 1257 } 1258 1259 return 0; 1260 } 1261 1262 /* 1263 * This extends the timeout period for an access to an internal bus. This 1264 * access timeout may occur during L1SS sleep periods, even without the 1265 * presence of a PCIe access. 1266 */ 1267 static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie) 1268 { 1269 /* TIMEOUT register is two registers before RGR1_SW_INIT_1 */ 1270 const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8; 1271 u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */ 1272 1273 /* 7712 does not have this (RGR1) timer */ 1274 if (pcie->cfg->soc_base == BCM7712) 1275 return; 1276 1277 /* Each unit in timeout register is 1/216,000,000 seconds */ 1278 writel(216 * timeout_us, pcie->base + REG_OFFSET); 1279 } 1280 1281 static void brcm_config_clkreq(struct brcm_pcie *pcie) 1282 { 1283 static const char err_msg[] = "invalid 'brcm,clkreq-mode' DT string\n"; 1284 const char *mode = "default"; 1285 u32 clkreq_cntl; 1286 int ret, tmp; 1287 1288 ret = of_property_read_string(pcie->np, "brcm,clkreq-mode", &mode); 1289 if (ret && ret != -EINVAL) { 1290 dev_err(pcie->dev, err_msg); 1291 mode = "safe"; 1292 } 1293 1294 /* Start out assuming safe mode (both mode bits cleared) */ 1295 clkreq_cntl = readl(pcie->base + HARD_DEBUG(pcie)); 1296 clkreq_cntl &= ~PCIE_CLKREQ_MASK; 1297 1298 if (strcmp(mode, "no-l1ss") == 0) { 1299 /* 1300 * "no-l1ss" -- Provides Clock Power Management, L0s, and 1301 * L1, but cannot provide L1 substate (L1SS) power 1302 * savings. If the downstream device connected to the RC is 1303 * L1SS capable AND the OS enables L1SS, all PCIe traffic 1304 * may abruptly halt, potentially hanging the system. 1305 */ 1306 clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK; 1307 /* 1308 * We want to un-advertise L1 substates because if the OS 1309 * tries to configure the controller into using L1 substate 1310 * power savings it may fail or hang when the RC HW is in 1311 * "no-l1ss" mode. 1312 */ 1313 tmp = readl(pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP); 1314 u32p_replace_bits(&tmp, 2, PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK); 1315 writel(tmp, pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP); 1316 1317 } else if (strcmp(mode, "default") == 0) { 1318 /* 1319 * "default" -- Provides L0s, L1, and L1SS, but not 1320 * compliant to provide Clock Power Management; 1321 * specifically, may not be able to meet the Tclron max 1322 * timing of 400ns as specified in "Dynamic Clock Control", 1323 * section 3.2.5.2.2 of the PCIe spec. This situation is 1324 * atypical and should happen only with older devices. 1325 */ 1326 clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK; 1327 brcm_extend_rbus_timeout(pcie); 1328 1329 } else { 1330 /* 1331 * "safe" -- No power savings; refclk is driven by RC 1332 * unconditionally. 1333 */ 1334 if (strcmp(mode, "safe") != 0) 1335 dev_err(pcie->dev, err_msg); 1336 mode = "safe"; 1337 } 1338 writel(clkreq_cntl, pcie->base + HARD_DEBUG(pcie)); 1339 1340 dev_info(pcie->dev, "clkreq-mode set to %s\n", mode); 1341 } 1342 1343 static int brcm_pcie_start_link(struct brcm_pcie *pcie) 1344 { 1345 struct device *dev = pcie->dev; 1346 void __iomem *base = pcie->base; 1347 u16 nlw, cls, lnksta; 1348 bool ssc_good = false; 1349 int ret, i; 1350 1351 /* Limit the generation if specified */ 1352 if (pcie->gen) 1353 brcm_pcie_set_gen(pcie, pcie->gen); 1354 1355 /* Unassert the fundamental reset */ 1356 ret = pcie->cfg->perst_set(pcie, 0); 1357 if (ret) 1358 return ret; 1359 1360 msleep(PCIE_RESET_CONFIG_WAIT_MS); 1361 1362 /* 1363 * Give the RC/EP even more time to wake up, before trying to 1364 * configure RC. Intermittently check status for link-up, up to a 1365 * total of 100ms. 1366 */ 1367 for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5) 1368 msleep(5); 1369 1370 if (!brcm_pcie_link_up(pcie)) { 1371 dev_err(dev, "link down\n"); 1372 return -ENODEV; 1373 } 1374 1375 brcm_config_clkreq(pcie); 1376 1377 if (pcie->ssc) { 1378 ret = brcm_pcie_set_ssc(pcie); 1379 if (ret == 0) 1380 ssc_good = true; 1381 else 1382 dev_err(dev, "failed attempt to enter ssc mode\n"); 1383 } 1384 1385 lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA); 1386 cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta); 1387 nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); 1388 dev_info(dev, "link up, %s x%u %s\n", 1389 pci_speed_string(pcie_link_speed[cls]), nlw, 1390 ssc_good ? "(SSC)" : "(!SSC)"); 1391 1392 return 0; 1393 } 1394 1395 static const char * const supplies[] = { 1396 "vpcie3v3", 1397 "vpcie3v3aux", 1398 "vpcie12v", 1399 }; 1400 1401 static void *alloc_subdev_regulators(struct device *dev) 1402 { 1403 const size_t size = sizeof(struct subdev_regulators) + 1404 sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies); 1405 struct subdev_regulators *sr; 1406 int i; 1407 1408 sr = devm_kzalloc(dev, size, GFP_KERNEL); 1409 if (sr) { 1410 sr->num_supplies = ARRAY_SIZE(supplies); 1411 for (i = 0; i < ARRAY_SIZE(supplies); i++) 1412 sr->supplies[i].supply = supplies[i]; 1413 } 1414 1415 return sr; 1416 } 1417 1418 static int brcm_pcie_add_bus(struct pci_bus *bus) 1419 { 1420 struct brcm_pcie *pcie = bus->sysdata; 1421 struct device *dev = &bus->dev; 1422 struct subdev_regulators *sr; 1423 int ret; 1424 1425 if (!bus->parent || !pci_is_root_bus(bus->parent)) 1426 return 0; 1427 1428 if (dev->of_node) { 1429 sr = alloc_subdev_regulators(dev); 1430 if (!sr) { 1431 dev_info(dev, "Can't allocate regulators for downstream device\n"); 1432 goto no_regulators; 1433 } 1434 1435 pcie->sr = sr; 1436 1437 ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); 1438 if (ret) { 1439 dev_info(dev, "Did not get regulators, err=%d\n", ret); 1440 pcie->sr = NULL; 1441 goto no_regulators; 1442 } 1443 1444 ret = regulator_bulk_enable(sr->num_supplies, sr->supplies); 1445 if (ret) { 1446 dev_err(dev, "Can't enable regulators for downstream device\n"); 1447 regulator_bulk_free(sr->num_supplies, sr->supplies); 1448 pcie->sr = NULL; 1449 } 1450 } 1451 1452 no_regulators: 1453 brcm_pcie_start_link(pcie); 1454 return 0; 1455 } 1456 1457 static void brcm_pcie_remove_bus(struct pci_bus *bus) 1458 { 1459 struct brcm_pcie *pcie = bus->sysdata; 1460 struct subdev_regulators *sr = pcie->sr; 1461 struct device *dev = &bus->dev; 1462 1463 if (!sr || !bus->parent || !pci_is_root_bus(bus->parent)) 1464 return; 1465 1466 if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) 1467 dev_err(dev, "Failed to disable regulators for downstream device\n"); 1468 regulator_bulk_free(sr->num_supplies, sr->supplies); 1469 pcie->sr = NULL; 1470 } 1471 1472 /* L23 is a low-power PCIe link state */ 1473 static void brcm_pcie_enter_l23(struct brcm_pcie *pcie) 1474 { 1475 void __iomem *base = pcie->base; 1476 int l23, i; 1477 u32 tmp; 1478 1479 /* Assert request for L23 */ 1480 tmp = readl(base + PCIE_MISC_PCIE_CTRL); 1481 u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK); 1482 writel(tmp, base + PCIE_MISC_PCIE_CTRL); 1483 1484 /* Wait up to 36 msec for L23 */ 1485 tmp = readl(base + PCIE_MISC_PCIE_STATUS); 1486 l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp); 1487 for (i = 0; i < 15 && !l23; i++) { 1488 usleep_range(2000, 2400); 1489 tmp = readl(base + PCIE_MISC_PCIE_STATUS); 1490 l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, 1491 tmp); 1492 } 1493 1494 if (!l23) 1495 dev_err(pcie->dev, "failed to enter low-power link state\n"); 1496 } 1497 1498 static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start) 1499 { 1500 static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = { 1501 PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT, 1502 PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT, 1503 PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,}; 1504 static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = { 1505 PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK, 1506 PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK, 1507 PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,}; 1508 const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1; 1509 const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1; 1510 u32 tmp, combined_mask = 0; 1511 u32 val; 1512 void __iomem *base = pcie->base; 1513 int i, ret; 1514 1515 for (i = beg; i != end; start ? i++ : i--) { 1516 val = start ? BIT_MASK(shifts[i]) : 0; 1517 tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL); 1518 tmp = (tmp & ~masks[i]) | (val & masks[i]); 1519 writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL); 1520 usleep_range(50, 200); 1521 combined_mask |= masks[i]; 1522 } 1523 1524 tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL); 1525 val = start ? combined_mask : 0; 1526 1527 ret = (tmp & combined_mask) == val ? 0 : -EIO; 1528 if (ret) 1529 dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop")); 1530 1531 return ret; 1532 } 1533 1534 static inline int brcm_phy_start(struct brcm_pcie *pcie) 1535 { 1536 return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0; 1537 } 1538 1539 static inline int brcm_phy_stop(struct brcm_pcie *pcie) 1540 { 1541 return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0; 1542 } 1543 1544 static int brcm_pcie_turn_off(struct brcm_pcie *pcie) 1545 { 1546 void __iomem *base = pcie->base; 1547 int tmp, ret; 1548 1549 if (brcm_pcie_link_up(pcie)) 1550 brcm_pcie_enter_l23(pcie); 1551 /* Assert fundamental reset */ 1552 ret = pcie->cfg->perst_set(pcie, 1); 1553 if (ret) 1554 return ret; 1555 1556 /* Deassert request for L23 in case it was asserted */ 1557 tmp = readl(base + PCIE_MISC_PCIE_CTRL); 1558 u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK); 1559 writel(tmp, base + PCIE_MISC_PCIE_CTRL); 1560 1561 /* Turn off SerDes */ 1562 tmp = readl(base + HARD_DEBUG(pcie)); 1563 u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK); 1564 writel(tmp, base + HARD_DEBUG(pcie)); 1565 1566 if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN)) 1567 /* Shutdown PCIe bridge */ 1568 ret = pcie->cfg->bridge_sw_init_set(pcie, 1); 1569 1570 return ret; 1571 } 1572 1573 static int pci_dev_may_wakeup(struct pci_dev *dev, void *data) 1574 { 1575 bool *ret = data; 1576 1577 if (device_may_wakeup(&dev->dev)) { 1578 *ret = true; 1579 dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n"); 1580 } 1581 return (int) *ret; 1582 } 1583 1584 static int brcm_pcie_suspend_noirq(struct device *dev) 1585 { 1586 struct brcm_pcie *pcie = dev_get_drvdata(dev); 1587 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 1588 int ret, rret; 1589 1590 ret = brcm_pcie_turn_off(pcie); 1591 if (ret) 1592 return ret; 1593 1594 /* 1595 * If brcm_phy_stop() returns an error, just dev_err(). If we 1596 * return the error it will cause the suspend to fail and this is a 1597 * forgivable offense that will probably be erased on resume. 1598 */ 1599 if (brcm_phy_stop(pcie)) 1600 dev_err(dev, "Could not stop phy for suspend\n"); 1601 1602 ret = reset_control_rearm(pcie->rescal); 1603 if (ret) { 1604 dev_err(dev, "Could not rearm rescal reset\n"); 1605 return ret; 1606 } 1607 1608 if (pcie->sr) { 1609 /* 1610 * Now turn off the regulators, but if at least one 1611 * downstream device is enabled as a wake-up source, do not 1612 * turn off regulators. 1613 */ 1614 pcie->ep_wakeup_capable = false; 1615 pci_walk_bus(bridge->bus, pci_dev_may_wakeup, 1616 &pcie->ep_wakeup_capable); 1617 if (!pcie->ep_wakeup_capable) { 1618 ret = regulator_bulk_disable(pcie->sr->num_supplies, 1619 pcie->sr->supplies); 1620 if (ret) { 1621 dev_err(dev, "Could not turn off regulators\n"); 1622 rret = reset_control_reset(pcie->rescal); 1623 if (rret) 1624 dev_err(dev, "failed to reset 'rascal' controller ret=%d\n", 1625 rret); 1626 return ret; 1627 } 1628 } 1629 } 1630 clk_disable_unprepare(pcie->clk); 1631 1632 return 0; 1633 } 1634 1635 static int brcm_pcie_resume_noirq(struct device *dev) 1636 { 1637 struct brcm_pcie *pcie = dev_get_drvdata(dev); 1638 void __iomem *base; 1639 u32 tmp; 1640 int ret, rret; 1641 1642 base = pcie->base; 1643 ret = clk_prepare_enable(pcie->clk); 1644 if (ret) 1645 return ret; 1646 1647 ret = reset_control_reset(pcie->rescal); 1648 if (ret) 1649 goto err_disable_clk; 1650 1651 ret = brcm_phy_start(pcie); 1652 if (ret) 1653 goto err_reset; 1654 1655 /* Take bridge out of reset so we can access the SERDES reg */ 1656 pcie->cfg->bridge_sw_init_set(pcie, 0); 1657 1658 /* SERDES_IDDQ = 0 */ 1659 tmp = readl(base + HARD_DEBUG(pcie)); 1660 u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK); 1661 writel(tmp, base + HARD_DEBUG(pcie)); 1662 1663 /* wait for serdes to be stable */ 1664 udelay(100); 1665 1666 ret = brcm_pcie_setup(pcie); 1667 if (ret) 1668 goto err_reset; 1669 1670 if (pcie->sr) { 1671 if (pcie->ep_wakeup_capable) { 1672 /* 1673 * We are resuming from a suspend. In the suspend we 1674 * did not disable the power supplies, so there is 1675 * no need to enable them (and falsely increase their 1676 * usage count). 1677 */ 1678 pcie->ep_wakeup_capable = false; 1679 } else { 1680 ret = regulator_bulk_enable(pcie->sr->num_supplies, 1681 pcie->sr->supplies); 1682 if (ret) { 1683 dev_err(dev, "Could not turn on regulators\n"); 1684 goto err_reset; 1685 } 1686 } 1687 } 1688 1689 ret = brcm_pcie_start_link(pcie); 1690 if (ret) 1691 goto err_regulator; 1692 1693 if (pcie->msi) 1694 brcm_msi_set_regs(pcie->msi); 1695 1696 return 0; 1697 1698 err_regulator: 1699 if (pcie->sr) 1700 regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies); 1701 err_reset: 1702 rret = reset_control_rearm(pcie->rescal); 1703 if (rret) 1704 dev_err(pcie->dev, "failed to rearm 'rescal' reset, err=%d\n", rret); 1705 err_disable_clk: 1706 clk_disable_unprepare(pcie->clk); 1707 return ret; 1708 } 1709 1710 static void __brcm_pcie_remove(struct brcm_pcie *pcie) 1711 { 1712 brcm_msi_remove(pcie); 1713 brcm_pcie_turn_off(pcie); 1714 if (brcm_phy_stop(pcie)) 1715 dev_err(pcie->dev, "Could not stop phy\n"); 1716 if (reset_control_rearm(pcie->rescal)) 1717 dev_err(pcie->dev, "Could not rearm rescal reset\n"); 1718 clk_disable_unprepare(pcie->clk); 1719 } 1720 1721 static void brcm_pcie_remove(struct platform_device *pdev) 1722 { 1723 struct brcm_pcie *pcie = platform_get_drvdata(pdev); 1724 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); 1725 1726 pci_stop_root_bus(bridge->bus); 1727 pci_remove_root_bus(bridge->bus); 1728 __brcm_pcie_remove(pcie); 1729 } 1730 1731 static const int pcie_offsets[] = { 1732 [RGR1_SW_INIT_1] = 0x9210, 1733 [EXT_CFG_INDEX] = 0x9000, 1734 [EXT_CFG_DATA] = 0x8000, 1735 [PCIE_HARD_DEBUG] = 0x4204, 1736 [PCIE_INTR2_CPU_BASE] = 0x4300, 1737 }; 1738 1739 static const int pcie_offsets_bcm7278[] = { 1740 [RGR1_SW_INIT_1] = 0xc010, 1741 [EXT_CFG_INDEX] = 0x9000, 1742 [EXT_CFG_DATA] = 0x8000, 1743 [PCIE_HARD_DEBUG] = 0x4204, 1744 [PCIE_INTR2_CPU_BASE] = 0x4300, 1745 }; 1746 1747 static const int pcie_offsets_bcm7425[] = { 1748 [RGR1_SW_INIT_1] = 0x8010, 1749 [EXT_CFG_INDEX] = 0x8300, 1750 [EXT_CFG_DATA] = 0x8304, 1751 [PCIE_HARD_DEBUG] = 0x4204, 1752 [PCIE_INTR2_CPU_BASE] = 0x4300, 1753 }; 1754 1755 static const int pcie_offsets_bcm7712[] = { 1756 [RGR1_SW_INIT_1] = 0x9210, 1757 [EXT_CFG_INDEX] = 0x9000, 1758 [EXT_CFG_DATA] = 0x8000, 1759 [PCIE_HARD_DEBUG] = 0x4304, 1760 [PCIE_INTR2_CPU_BASE] = 0x4400, 1761 }; 1762 1763 static const struct pcie_cfg_data generic_cfg = { 1764 .offsets = pcie_offsets, 1765 .soc_base = GENERIC, 1766 .perst_set = brcm_pcie_perst_set_generic, 1767 .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, 1768 .num_inbound_wins = 3, 1769 }; 1770 1771 static const struct pcie_cfg_data bcm2711_cfg = { 1772 .offsets = pcie_offsets, 1773 .soc_base = BCM2711, 1774 .perst_set = brcm_pcie_perst_set_generic, 1775 .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, 1776 .num_inbound_wins = 3, 1777 }; 1778 1779 static const struct pcie_cfg_data bcm2712_cfg = { 1780 .offsets = pcie_offsets_bcm7712, 1781 .soc_base = BCM7712, 1782 .perst_set = brcm_pcie_perst_set_7278, 1783 .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, 1784 .post_setup = brcm_pcie_post_setup_bcm2712, 1785 .quirks = CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN, 1786 .num_inbound_wins = 10, 1787 }; 1788 1789 static const struct pcie_cfg_data bcm4908_cfg = { 1790 .offsets = pcie_offsets, 1791 .soc_base = BCM4908, 1792 .perst_set = brcm_pcie_perst_set_4908, 1793 .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, 1794 .num_inbound_wins = 3, 1795 }; 1796 1797 static const struct pcie_cfg_data bcm7278_cfg = { 1798 .offsets = pcie_offsets_bcm7278, 1799 .soc_base = BCM7278, 1800 .perst_set = brcm_pcie_perst_set_7278, 1801 .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, 1802 .num_inbound_wins = 3, 1803 }; 1804 1805 static const struct pcie_cfg_data bcm7425_cfg = { 1806 .offsets = pcie_offsets_bcm7425, 1807 .soc_base = BCM7425, 1808 .perst_set = brcm_pcie_perst_set_generic, 1809 .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, 1810 .num_inbound_wins = 3, 1811 }; 1812 1813 static const struct pcie_cfg_data bcm7435_cfg = { 1814 .offsets = pcie_offsets, 1815 .soc_base = BCM7435, 1816 .perst_set = brcm_pcie_perst_set_generic, 1817 .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, 1818 .num_inbound_wins = 3, 1819 }; 1820 1821 static const struct pcie_cfg_data bcm7216_cfg = { 1822 .offsets = pcie_offsets_bcm7278, 1823 .soc_base = BCM7278, 1824 .perst_set = brcm_pcie_perst_set_7278, 1825 .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, 1826 .has_phy = true, 1827 .num_inbound_wins = 3, 1828 }; 1829 1830 static const struct pcie_cfg_data bcm7712_cfg = { 1831 .offsets = pcie_offsets_bcm7712, 1832 .perst_set = brcm_pcie_perst_set_7278, 1833 .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, 1834 .soc_base = BCM7712, 1835 .num_inbound_wins = 10, 1836 }; 1837 1838 static const struct of_device_id brcm_pcie_match[] = { 1839 { .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg }, 1840 { .compatible = "brcm,bcm2712-pcie", .data = &bcm2712_cfg }, 1841 { .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg }, 1842 { .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg }, 1843 { .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg }, 1844 { .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg }, 1845 { .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg }, 1846 { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg }, 1847 { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg }, 1848 { .compatible = "brcm,bcm7712-pcie", .data = &bcm7712_cfg }, 1849 {}, 1850 }; 1851 1852 static struct pci_ops brcm_pcie_ops = { 1853 .map_bus = brcm_pcie_map_bus, 1854 .read = pci_generic_config_read, 1855 .write = pci_generic_config_write, 1856 .add_bus = brcm_pcie_add_bus, 1857 .remove_bus = brcm_pcie_remove_bus, 1858 }; 1859 1860 static struct pci_ops brcm7425_pcie_ops = { 1861 .map_bus = brcm7425_pcie_map_bus, 1862 .read = pci_generic_config_read32, 1863 .write = pci_generic_config_write32, 1864 .add_bus = brcm_pcie_add_bus, 1865 .remove_bus = brcm_pcie_remove_bus, 1866 }; 1867 1868 static int brcm_pcie_probe(struct platform_device *pdev) 1869 { 1870 struct device_node *np = pdev->dev.of_node; 1871 struct pci_host_bridge *bridge; 1872 const struct pcie_cfg_data *data; 1873 struct brcm_pcie *pcie; 1874 int ret; 1875 1876 bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie)); 1877 if (!bridge) 1878 return -ENOMEM; 1879 1880 data = of_device_get_match_data(&pdev->dev); 1881 if (!data) { 1882 pr_err("failed to look up compatible string\n"); 1883 return -EINVAL; 1884 } 1885 1886 pcie = pci_host_bridge_priv(bridge); 1887 pcie->dev = &pdev->dev; 1888 pcie->np = np; 1889 pcie->cfg = data; 1890 1891 pcie->base = devm_platform_ioremap_resource(pdev, 0); 1892 if (IS_ERR(pcie->base)) 1893 return PTR_ERR(pcie->base); 1894 1895 pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie"); 1896 if (IS_ERR(pcie->clk)) 1897 return PTR_ERR(pcie->clk); 1898 1899 ret = of_pci_get_max_link_speed(np); 1900 pcie->gen = (ret < 0) ? 0 : ret; 1901 1902 pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc"); 1903 1904 pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal"); 1905 if (IS_ERR(pcie->rescal)) 1906 return PTR_ERR(pcie->rescal); 1907 1908 pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst"); 1909 if (IS_ERR(pcie->perst_reset)) 1910 return PTR_ERR(pcie->perst_reset); 1911 1912 pcie->bridge_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "bridge"); 1913 if (IS_ERR(pcie->bridge_reset)) 1914 return PTR_ERR(pcie->bridge_reset); 1915 1916 pcie->swinit_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "swinit"); 1917 if (IS_ERR(pcie->swinit_reset)) 1918 return PTR_ERR(pcie->swinit_reset); 1919 1920 ret = clk_prepare_enable(pcie->clk); 1921 if (ret) 1922 return dev_err_probe(&pdev->dev, ret, "could not enable clock\n"); 1923 1924 pcie->cfg->bridge_sw_init_set(pcie, 0); 1925 1926 if (pcie->swinit_reset) { 1927 ret = reset_control_assert(pcie->swinit_reset); 1928 if (ret) { 1929 clk_disable_unprepare(pcie->clk); 1930 return dev_err_probe(&pdev->dev, ret, 1931 "could not assert reset 'swinit'\n"); 1932 } 1933 1934 /* HW team recommends 1us for proper sync and propagation of reset */ 1935 udelay(1); 1936 1937 ret = reset_control_deassert(pcie->swinit_reset); 1938 if (ret) { 1939 clk_disable_unprepare(pcie->clk); 1940 return dev_err_probe(&pdev->dev, ret, 1941 "could not de-assert reset 'swinit'\n"); 1942 } 1943 } 1944 1945 ret = reset_control_reset(pcie->rescal); 1946 if (ret) { 1947 clk_disable_unprepare(pcie->clk); 1948 return dev_err_probe(&pdev->dev, ret, "failed to deassert 'rescal'\n"); 1949 } 1950 1951 ret = brcm_phy_start(pcie); 1952 if (ret) { 1953 reset_control_rearm(pcie->rescal); 1954 clk_disable_unprepare(pcie->clk); 1955 return ret; 1956 } 1957 1958 ret = brcm_pcie_setup(pcie); 1959 if (ret) 1960 goto fail; 1961 1962 pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION); 1963 if (pcie->cfg->soc_base == BCM4908 && 1964 pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) { 1965 dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n"); 1966 ret = -ENODEV; 1967 goto fail; 1968 } 1969 1970 if (pci_msi_enabled()) { 1971 struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0); 1972 1973 if (msi_np == pcie->np) 1974 ret = brcm_pcie_enable_msi(pcie); 1975 1976 of_node_put(msi_np); 1977 1978 if (ret) { 1979 dev_err(pcie->dev, "probe of internal MSI failed"); 1980 goto fail; 1981 } 1982 } 1983 1984 bridge->ops = pcie->cfg->soc_base == BCM7425 ? 1985 &brcm7425_pcie_ops : &brcm_pcie_ops; 1986 bridge->sysdata = pcie; 1987 1988 platform_set_drvdata(pdev, pcie); 1989 1990 ret = pci_host_probe(bridge); 1991 if (!ret && !brcm_pcie_link_up(pcie)) 1992 ret = -ENODEV; 1993 1994 if (ret) { 1995 brcm_pcie_remove(pdev); 1996 return ret; 1997 } 1998 1999 return 0; 2000 2001 fail: 2002 __brcm_pcie_remove(pcie); 2003 2004 return ret; 2005 } 2006 2007 MODULE_DEVICE_TABLE(of, brcm_pcie_match); 2008 2009 static const struct dev_pm_ops brcm_pcie_pm_ops = { 2010 .suspend_noirq = brcm_pcie_suspend_noirq, 2011 .resume_noirq = brcm_pcie_resume_noirq, 2012 }; 2013 2014 static struct platform_driver brcm_pcie_driver = { 2015 .probe = brcm_pcie_probe, 2016 .remove = brcm_pcie_remove, 2017 .driver = { 2018 .name = "brcm-pcie", 2019 .of_match_table = brcm_pcie_match, 2020 .pm = &brcm_pcie_pm_ops, 2021 }, 2022 }; 2023 module_platform_driver(brcm_pcie_driver); 2024 2025 MODULE_LICENSE("GPL"); 2026 MODULE_DESCRIPTION("Broadcom STB PCIe RC driver"); 2027 MODULE_AUTHOR("Broadcom"); 2028 MODULE_SOFTDEP("pre: irq_bcm2712_mip"); 2029