1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/delay.h> 4 #include <linux/clk-provider.h> 5 #include <linux/io.h> 6 #include <linux/platform_device.h> 7 #include <linux/property.h> 8 #include <dt-bindings/clock/en7523-clk.h> 9 10 #define REG_PCI_CONTROL 0x88 11 #define REG_PCI_CONTROL_PERSTOUT BIT(29) 12 #define REG_PCI_CONTROL_PERSTOUT1 BIT(26) 13 #define REG_PCI_CONTROL_REFCLK_EN0 BIT(23) 14 #define REG_PCI_CONTROL_REFCLK_EN1 BIT(22) 15 #define REG_PCI_CONTROL_PERSTOUT2 BIT(16) 16 #define REG_GSW_CLK_DIV_SEL 0x1b4 17 #define REG_EMI_CLK_DIV_SEL 0x1b8 18 #define REG_BUS_CLK_DIV_SEL 0x1bc 19 #define REG_SPI_CLK_DIV_SEL 0x1c4 20 #define REG_SPI_CLK_FREQ_SEL 0x1c8 21 #define REG_NPU_CLK_DIV_SEL 0x1fc 22 #define REG_CRYPTO_CLKSRC 0x200 23 #define REG_RESET_CONTROL2 0x830 24 #define REG_RESET2_CONTROL_PCIE2 BIT(27) 25 #define REG_RESET_CONTROL1 0x834 26 #define REG_RESET_CONTROL_PCIEHB BIT(29) 27 #define REG_RESET_CONTROL_PCIE1 BIT(27) 28 #define REG_RESET_CONTROL_PCIE2 BIT(26) 29 /* EN7581 */ 30 #define REG_PCIE0_MEM 0x00 31 #define REG_PCIE0_MEM_MASK 0x04 32 #define REG_PCIE1_MEM 0x08 33 #define REG_PCIE1_MEM_MASK 0x0c 34 #define REG_PCIE2_MEM 0x10 35 #define REG_PCIE2_MEM_MASK 0x14 36 #define REG_PCIE_RESET_OPEN_DRAIN 0x018c 37 #define REG_PCIE_RESET_OPEN_DRAIN_MASK GENMASK(2, 0) 38 #define REG_NP_SCU_PCIC 0x88 39 #define REG_NP_SCU_SSTR 0x9c 40 #define REG_PCIE_XSI0_SEL_MASK GENMASK(14, 13) 41 #define REG_PCIE_XSI1_SEL_MASK GENMASK(12, 11) 42 43 struct en_clk_desc { 44 int id; 45 const char *name; 46 u32 base_reg; 47 u8 base_bits; 48 u8 base_shift; 49 union { 50 const unsigned int *base_values; 51 unsigned int base_value; 52 }; 53 size_t n_base_values; 54 55 u16 div_reg; 56 u8 div_bits; 57 u8 div_shift; 58 u16 div_val0; 59 u8 div_step; 60 }; 61 62 struct en_clk_gate { 63 void __iomem *base; 64 struct clk_hw hw; 65 }; 66 67 struct en_clk_soc_data { 68 const struct clk_ops pcie_ops; 69 int (*hw_init)(struct platform_device *pdev, void __iomem *base, 70 void __iomem *np_base); 71 }; 72 73 static const u32 gsw_base[] = { 400000000, 500000000 }; 74 static const u32 emi_base[] = { 333000000, 400000000 }; 75 static const u32 bus_base[] = { 500000000, 540000000 }; 76 static const u32 slic_base[] = { 100000000, 3125000 }; 77 static const u32 npu_base[] = { 333000000, 400000000, 500000000 }; 78 79 static const struct en_clk_desc en7523_base_clks[] = { 80 { 81 .id = EN7523_CLK_GSW, 82 .name = "gsw", 83 84 .base_reg = REG_GSW_CLK_DIV_SEL, 85 .base_bits = 1, 86 .base_shift = 8, 87 .base_values = gsw_base, 88 .n_base_values = ARRAY_SIZE(gsw_base), 89 90 .div_bits = 3, 91 .div_shift = 0, 92 .div_step = 1, 93 }, { 94 .id = EN7523_CLK_EMI, 95 .name = "emi", 96 97 .base_reg = REG_EMI_CLK_DIV_SEL, 98 .base_bits = 1, 99 .base_shift = 8, 100 .base_values = emi_base, 101 .n_base_values = ARRAY_SIZE(emi_base), 102 103 .div_bits = 3, 104 .div_shift = 0, 105 .div_step = 1, 106 }, { 107 .id = EN7523_CLK_BUS, 108 .name = "bus", 109 110 .base_reg = REG_BUS_CLK_DIV_SEL, 111 .base_bits = 1, 112 .base_shift = 8, 113 .base_values = bus_base, 114 .n_base_values = ARRAY_SIZE(bus_base), 115 116 .div_bits = 3, 117 .div_shift = 0, 118 .div_step = 1, 119 }, { 120 .id = EN7523_CLK_SLIC, 121 .name = "slic", 122 123 .base_reg = REG_SPI_CLK_FREQ_SEL, 124 .base_bits = 1, 125 .base_shift = 0, 126 .base_values = slic_base, 127 .n_base_values = ARRAY_SIZE(slic_base), 128 129 .div_reg = REG_SPI_CLK_DIV_SEL, 130 .div_bits = 5, 131 .div_shift = 24, 132 .div_val0 = 20, 133 .div_step = 2, 134 }, { 135 .id = EN7523_CLK_SPI, 136 .name = "spi", 137 138 .base_reg = REG_SPI_CLK_DIV_SEL, 139 140 .base_value = 400000000, 141 142 .div_bits = 5, 143 .div_shift = 8, 144 .div_val0 = 40, 145 .div_step = 2, 146 }, { 147 .id = EN7523_CLK_NPU, 148 .name = "npu", 149 150 .base_reg = REG_NPU_CLK_DIV_SEL, 151 .base_bits = 2, 152 .base_shift = 8, 153 .base_values = npu_base, 154 .n_base_values = ARRAY_SIZE(npu_base), 155 156 .div_bits = 3, 157 .div_shift = 0, 158 .div_step = 1, 159 }, { 160 .id = EN7523_CLK_CRYPTO, 161 .name = "crypto", 162 163 .base_reg = REG_CRYPTO_CLKSRC, 164 .base_bits = 1, 165 .base_shift = 8, 166 .base_values = emi_base, 167 .n_base_values = ARRAY_SIZE(emi_base), 168 } 169 }; 170 171 static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i) 172 { 173 const struct en_clk_desc *desc = &en7523_base_clks[i]; 174 u32 val; 175 176 if (!desc->base_bits) 177 return desc->base_value; 178 179 val = readl(base + desc->base_reg); 180 val >>= desc->base_shift; 181 val &= (1 << desc->base_bits) - 1; 182 183 if (val >= desc->n_base_values) 184 return 0; 185 186 return desc->base_values[val]; 187 } 188 189 static u32 en7523_get_div(void __iomem *base, int i) 190 { 191 const struct en_clk_desc *desc = &en7523_base_clks[i]; 192 u32 reg, val; 193 194 if (!desc->div_bits) 195 return 1; 196 197 reg = desc->div_reg ? desc->div_reg : desc->base_reg; 198 val = readl(base + reg); 199 val >>= desc->div_shift; 200 val &= (1 << desc->div_bits) - 1; 201 202 if (!val && desc->div_val0) 203 return desc->div_val0; 204 205 return (val + 1) * desc->div_step; 206 } 207 208 static int en7523_pci_is_enabled(struct clk_hw *hw) 209 { 210 struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); 211 212 return !!(readl(cg->base + REG_PCI_CONTROL) & REG_PCI_CONTROL_REFCLK_EN1); 213 } 214 215 static int en7523_pci_prepare(struct clk_hw *hw) 216 { 217 struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); 218 void __iomem *np_base = cg->base; 219 u32 val, mask; 220 221 /* Need to pull device low before reset */ 222 val = readl(np_base + REG_PCI_CONTROL); 223 val &= ~(REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT); 224 writel(val, np_base + REG_PCI_CONTROL); 225 usleep_range(1000, 2000); 226 227 /* Enable PCIe port 1 */ 228 val |= REG_PCI_CONTROL_REFCLK_EN1; 229 writel(val, np_base + REG_PCI_CONTROL); 230 usleep_range(1000, 2000); 231 232 /* Reset to default */ 233 val = readl(np_base + REG_RESET_CONTROL1); 234 mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 | 235 REG_RESET_CONTROL_PCIEHB; 236 writel(val & ~mask, np_base + REG_RESET_CONTROL1); 237 usleep_range(1000, 2000); 238 writel(val | mask, np_base + REG_RESET_CONTROL1); 239 msleep(100); 240 writel(val & ~mask, np_base + REG_RESET_CONTROL1); 241 usleep_range(5000, 10000); 242 243 /* Release device */ 244 mask = REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT; 245 val = readl(np_base + REG_PCI_CONTROL); 246 writel(val & ~mask, np_base + REG_PCI_CONTROL); 247 usleep_range(1000, 2000); 248 writel(val | mask, np_base + REG_PCI_CONTROL); 249 msleep(250); 250 251 return 0; 252 } 253 254 static void en7523_pci_unprepare(struct clk_hw *hw) 255 { 256 struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); 257 void __iomem *np_base = cg->base; 258 u32 val; 259 260 val = readl(np_base + REG_PCI_CONTROL); 261 val &= ~REG_PCI_CONTROL_REFCLK_EN1; 262 writel(val, np_base + REG_PCI_CONTROL); 263 } 264 265 static struct clk_hw *en7523_register_pcie_clk(struct device *dev, 266 void __iomem *np_base) 267 { 268 const struct en_clk_soc_data *soc_data = device_get_match_data(dev); 269 struct clk_init_data init = { 270 .name = "pcie", 271 .ops = &soc_data->pcie_ops, 272 }; 273 struct en_clk_gate *cg; 274 275 cg = devm_kzalloc(dev, sizeof(*cg), GFP_KERNEL); 276 if (!cg) 277 return NULL; 278 279 cg->base = np_base; 280 cg->hw.init = &init; 281 282 if (init.ops->disable) 283 init.ops->disable(&cg->hw); 284 init.ops->unprepare(&cg->hw); 285 286 if (clk_hw_register(dev, &cg->hw)) 287 return NULL; 288 289 return &cg->hw; 290 } 291 292 static int en7581_pci_is_enabled(struct clk_hw *hw) 293 { 294 struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); 295 u32 val, mask; 296 297 mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1; 298 val = readl(cg->base + REG_PCI_CONTROL); 299 return (val & mask) == mask; 300 } 301 302 static int en7581_pci_prepare(struct clk_hw *hw) 303 { 304 struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); 305 void __iomem *np_base = cg->base; 306 u32 val, mask; 307 308 mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 | 309 REG_RESET_CONTROL_PCIEHB; 310 val = readl(np_base + REG_RESET_CONTROL1); 311 writel(val & ~mask, np_base + REG_RESET_CONTROL1); 312 val = readl(np_base + REG_RESET_CONTROL2); 313 writel(val & ~REG_RESET2_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2); 314 usleep_range(5000, 10000); 315 316 return 0; 317 } 318 319 static int en7581_pci_enable(struct clk_hw *hw) 320 { 321 struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); 322 void __iomem *np_base = cg->base; 323 u32 val, mask; 324 325 mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1 | 326 REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT2 | 327 REG_PCI_CONTROL_PERSTOUT; 328 val = readl(np_base + REG_PCI_CONTROL); 329 writel(val | mask, np_base + REG_PCI_CONTROL); 330 msleep(250); 331 332 return 0; 333 } 334 335 static void en7581_pci_unprepare(struct clk_hw *hw) 336 { 337 struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); 338 void __iomem *np_base = cg->base; 339 u32 val, mask; 340 341 mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 | 342 REG_RESET_CONTROL_PCIEHB; 343 val = readl(np_base + REG_RESET_CONTROL1); 344 writel(val | mask, np_base + REG_RESET_CONTROL1); 345 mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2; 346 writel(val | mask, np_base + REG_RESET_CONTROL1); 347 val = readl(np_base + REG_RESET_CONTROL2); 348 writel(val | REG_RESET_CONTROL_PCIE2, np_base + REG_RESET_CONTROL2); 349 msleep(100); 350 } 351 352 static void en7581_pci_disable(struct clk_hw *hw) 353 { 354 struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); 355 void __iomem *np_base = cg->base; 356 u32 val, mask; 357 358 mask = REG_PCI_CONTROL_REFCLK_EN0 | REG_PCI_CONTROL_REFCLK_EN1 | 359 REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT2 | 360 REG_PCI_CONTROL_PERSTOUT; 361 val = readl(np_base + REG_PCI_CONTROL); 362 writel(val & ~mask, np_base + REG_PCI_CONTROL); 363 usleep_range(1000, 2000); 364 } 365 366 static int en7581_clk_hw_init(struct platform_device *pdev, 367 void __iomem *base, 368 void __iomem *np_base) 369 { 370 void __iomem *pb_base; 371 u32 val; 372 373 pb_base = devm_platform_ioremap_resource(pdev, 2); 374 if (IS_ERR(pb_base)) 375 return PTR_ERR(pb_base); 376 377 val = readl(np_base + REG_NP_SCU_SSTR); 378 val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK); 379 writel(val, np_base + REG_NP_SCU_SSTR); 380 val = readl(np_base + REG_NP_SCU_PCIC); 381 writel(val | 3, np_base + REG_NP_SCU_PCIC); 382 383 writel(0x20000000, pb_base + REG_PCIE0_MEM); 384 writel(0xfc000000, pb_base + REG_PCIE0_MEM_MASK); 385 writel(0x24000000, pb_base + REG_PCIE1_MEM); 386 writel(0xfc000000, pb_base + REG_PCIE1_MEM_MASK); 387 writel(0x28000000, pb_base + REG_PCIE2_MEM); 388 writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK); 389 390 val = readl(base + REG_PCIE_RESET_OPEN_DRAIN); 391 writel(val | REG_PCIE_RESET_OPEN_DRAIN_MASK, 392 base + REG_PCIE_RESET_OPEN_DRAIN); 393 394 return 0; 395 } 396 397 static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data, 398 void __iomem *base, void __iomem *np_base) 399 { 400 struct clk_hw *hw; 401 u32 rate; 402 int i; 403 404 for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) { 405 const struct en_clk_desc *desc = &en7523_base_clks[i]; 406 407 rate = en7523_get_base_rate(base, i); 408 rate /= en7523_get_div(base, i); 409 410 hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate); 411 if (IS_ERR(hw)) { 412 pr_err("Failed to register clk %s: %ld\n", 413 desc->name, PTR_ERR(hw)); 414 continue; 415 } 416 417 clk_data->hws[desc->id] = hw; 418 } 419 420 hw = en7523_register_pcie_clk(dev, np_base); 421 clk_data->hws[EN7523_CLK_PCIE] = hw; 422 423 clk_data->num = EN7523_NUM_CLOCKS; 424 } 425 426 static int en7523_clk_probe(struct platform_device *pdev) 427 { 428 struct device_node *node = pdev->dev.of_node; 429 const struct en_clk_soc_data *soc_data; 430 struct clk_hw_onecell_data *clk_data; 431 void __iomem *base, *np_base; 432 int r; 433 434 base = devm_platform_ioremap_resource(pdev, 0); 435 if (IS_ERR(base)) 436 return PTR_ERR(base); 437 438 np_base = devm_platform_ioremap_resource(pdev, 1); 439 if (IS_ERR(np_base)) 440 return PTR_ERR(np_base); 441 442 soc_data = device_get_match_data(&pdev->dev); 443 if (soc_data->hw_init) { 444 r = soc_data->hw_init(pdev, base, np_base); 445 if (r) 446 return r; 447 } 448 449 clk_data = devm_kzalloc(&pdev->dev, 450 struct_size(clk_data, hws, EN7523_NUM_CLOCKS), 451 GFP_KERNEL); 452 if (!clk_data) 453 return -ENOMEM; 454 455 en7523_register_clocks(&pdev->dev, clk_data, base, np_base); 456 457 r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); 458 if (r) 459 dev_err(&pdev->dev, 460 "could not register clock provider: %s: %d\n", 461 pdev->name, r); 462 463 return r; 464 } 465 466 static const struct en_clk_soc_data en7523_data = { 467 .pcie_ops = { 468 .is_enabled = en7523_pci_is_enabled, 469 .prepare = en7523_pci_prepare, 470 .unprepare = en7523_pci_unprepare, 471 }, 472 }; 473 474 static const struct en_clk_soc_data en7581_data = { 475 .pcie_ops = { 476 .is_enabled = en7581_pci_is_enabled, 477 .prepare = en7581_pci_prepare, 478 .enable = en7581_pci_enable, 479 .unprepare = en7581_pci_unprepare, 480 .disable = en7581_pci_disable, 481 }, 482 .hw_init = en7581_clk_hw_init, 483 }; 484 485 static const struct of_device_id of_match_clk_en7523[] = { 486 { .compatible = "airoha,en7523-scu", .data = &en7523_data }, 487 { .compatible = "airoha,en7581-scu", .data = &en7581_data }, 488 { /* sentinel */ } 489 }; 490 491 static struct platform_driver clk_en7523_drv = { 492 .probe = en7523_clk_probe, 493 .driver = { 494 .name = "clk-en7523", 495 .of_match_table = of_match_clk_en7523, 496 .suppress_bind_attrs = true, 497 }, 498 }; 499 500 static int __init clk_en7523_init(void) 501 { 502 return platform_driver_register(&clk_en7523_drv); 503 } 504 505 arch_initcall(clk_en7523_init); 506