1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Rockchip PCIe PHY driver 4 * 5 * Copyright (C) 2016 Shawn Lin <shawn.lin@rock-chips.com> 6 * Copyright (C) 2016 ROCKCHIP, Inc. 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/delay.h> 11 #include <linux/io.h> 12 #include <linux/mfd/syscon.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/phy/phy.h> 16 #include <linux/platform_device.h> 17 #include <linux/property.h> 18 #include <linux/regmap.h> 19 #include <linux/reset.h> 20 21 /* 22 * The higher 16-bit of this register is used for write protection 23 * only if BIT(x + 16) set to 1 the BIT(x) can be written. 24 */ 25 #define HIWORD_UPDATE(val, mask, shift) \ 26 ((val) << (shift) | (mask) << ((shift) + 16)) 27 28 #define PHY_MAX_LANE_NUM 4 29 #define PHY_CFG_DATA_SHIFT 7 30 #define PHY_CFG_ADDR_SHIFT 1 31 #define PHY_CFG_DATA_MASK 0xf 32 #define PHY_CFG_ADDR_MASK 0x3f 33 #define PHY_CFG_RD_MASK 0x3ff 34 #define PHY_CFG_WR_ENABLE 1 35 #define PHY_CFG_WR_DISABLE 1 36 #define PHY_CFG_WR_SHIFT 0 37 #define PHY_CFG_WR_MASK 1 38 #define PHY_CFG_PLL_LOCK 0x10 39 #define PHY_CFG_CLK_TEST 0x10 40 #define PHY_CFG_CLK_SCC 0x12 41 #define PHY_CFG_SEPE_RATE BIT(3) 42 #define PHY_CFG_PLL_100M BIT(3) 43 #define PHY_PLL_LOCKED BIT(9) 44 #define PHY_PLL_OUTPUT BIT(10) 45 #define PHY_LANE_A_STATUS 0x30 46 #define PHY_LANE_B_STATUS 0x31 47 #define PHY_LANE_C_STATUS 0x32 48 #define PHY_LANE_D_STATUS 0x33 49 #define PHY_LANE_RX_DET_SHIFT 11 50 #define PHY_LANE_RX_DET_TH 0x1 51 #define PHY_LANE_IDLE_OFF 0x1 52 #define PHY_LANE_IDLE_MASK 0x1 53 #define PHY_LANE_IDLE_A_SHIFT 3 54 #define PHY_LANE_IDLE_B_SHIFT 4 55 #define PHY_LANE_IDLE_C_SHIFT 5 56 #define PHY_LANE_IDLE_D_SHIFT 6 57 58 struct rockchip_pcie_data { 59 unsigned int pcie_conf; 60 unsigned int pcie_status; 61 unsigned int pcie_laneoff; 62 }; 63 64 struct rockchip_pcie_phy { 65 const struct rockchip_pcie_data *phy_data; 66 struct regmap *reg_base; 67 struct phy_pcie_instance { 68 struct phy *phy; 69 u32 index; 70 } phys[PHY_MAX_LANE_NUM]; 71 struct mutex pcie_mutex; 72 struct reset_control *phy_rst; 73 struct clk *clk_pciephy_ref; 74 int pwr_cnt; 75 int init_cnt; 76 }; 77 78 static struct rockchip_pcie_phy *to_pcie_phy(struct phy_pcie_instance *inst) 79 { 80 return container_of(inst, struct rockchip_pcie_phy, 81 phys[inst->index]); 82 } 83 84 static struct phy *rockchip_pcie_phy_of_xlate(struct device *dev, 85 struct of_phandle_args *args) 86 { 87 struct rockchip_pcie_phy *rk_phy = dev_get_drvdata(dev); 88 89 if (args->args_count == 0) 90 return rk_phy->phys[0].phy; 91 92 if (WARN_ON(args->args[0] >= PHY_MAX_LANE_NUM)) 93 return ERR_PTR(-ENODEV); 94 95 return rk_phy->phys[args->args[0]].phy; 96 } 97 98 99 static inline void phy_wr_cfg(struct rockchip_pcie_phy *rk_phy, 100 u32 addr, u32 data) 101 { 102 regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf, 103 HIWORD_UPDATE(data, 104 PHY_CFG_DATA_MASK, 105 PHY_CFG_DATA_SHIFT) | 106 HIWORD_UPDATE(addr, 107 PHY_CFG_ADDR_MASK, 108 PHY_CFG_ADDR_SHIFT)); 109 udelay(1); 110 regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf, 111 HIWORD_UPDATE(PHY_CFG_WR_ENABLE, 112 PHY_CFG_WR_MASK, 113 PHY_CFG_WR_SHIFT)); 114 udelay(1); 115 regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf, 116 HIWORD_UPDATE(PHY_CFG_WR_DISABLE, 117 PHY_CFG_WR_MASK, 118 PHY_CFG_WR_SHIFT)); 119 } 120 121 static int rockchip_pcie_phy_power_off(struct phy *phy) 122 { 123 struct phy_pcie_instance *inst = phy_get_drvdata(phy); 124 struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); 125 int err = 0; 126 127 mutex_lock(&rk_phy->pcie_mutex); 128 129 regmap_write(rk_phy->reg_base, 130 rk_phy->phy_data->pcie_laneoff, 131 HIWORD_UPDATE(PHY_LANE_IDLE_OFF, 132 PHY_LANE_IDLE_MASK, 133 PHY_LANE_IDLE_A_SHIFT + inst->index)); 134 135 if (--rk_phy->pwr_cnt) 136 goto err_out; 137 138 err = reset_control_assert(rk_phy->phy_rst); 139 if (err) { 140 dev_err(&phy->dev, "assert phy_rst err %d\n", err); 141 goto err_restore; 142 } 143 144 err_out: 145 mutex_unlock(&rk_phy->pcie_mutex); 146 return 0; 147 148 err_restore: 149 rk_phy->pwr_cnt++; 150 regmap_write(rk_phy->reg_base, 151 rk_phy->phy_data->pcie_laneoff, 152 HIWORD_UPDATE(!PHY_LANE_IDLE_OFF, 153 PHY_LANE_IDLE_MASK, 154 PHY_LANE_IDLE_A_SHIFT + inst->index)); 155 mutex_unlock(&rk_phy->pcie_mutex); 156 return err; 157 } 158 159 static int rockchip_pcie_phy_power_on(struct phy *phy) 160 { 161 struct phy_pcie_instance *inst = phy_get_drvdata(phy); 162 struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); 163 int err = 0; 164 u32 status; 165 unsigned long timeout; 166 167 mutex_lock(&rk_phy->pcie_mutex); 168 169 if (rk_phy->pwr_cnt++) 170 goto err_out; 171 172 err = reset_control_deassert(rk_phy->phy_rst); 173 if (err) { 174 dev_err(&phy->dev, "deassert phy_rst err %d\n", err); 175 goto err_pwr_cnt; 176 } 177 178 regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf, 179 HIWORD_UPDATE(PHY_CFG_PLL_LOCK, 180 PHY_CFG_ADDR_MASK, 181 PHY_CFG_ADDR_SHIFT)); 182 183 regmap_write(rk_phy->reg_base, 184 rk_phy->phy_data->pcie_laneoff, 185 HIWORD_UPDATE(!PHY_LANE_IDLE_OFF, 186 PHY_LANE_IDLE_MASK, 187 PHY_LANE_IDLE_A_SHIFT + inst->index)); 188 189 /* 190 * No documented timeout value for phy operation below, 191 * so we make it large enough here. And we use loop-break 192 * method which should not be harmful. 193 */ 194 timeout = jiffies + msecs_to_jiffies(1000); 195 196 err = -EINVAL; 197 while (time_before(jiffies, timeout)) { 198 regmap_read(rk_phy->reg_base, 199 rk_phy->phy_data->pcie_status, 200 &status); 201 if (status & PHY_PLL_LOCKED) { 202 dev_dbg(&phy->dev, "pll locked!\n"); 203 err = 0; 204 break; 205 } 206 msleep(20); 207 } 208 209 if (err) { 210 dev_err(&phy->dev, "pll lock timeout!\n"); 211 goto err_pll_lock; 212 } 213 214 phy_wr_cfg(rk_phy, PHY_CFG_CLK_TEST, PHY_CFG_SEPE_RATE); 215 phy_wr_cfg(rk_phy, PHY_CFG_CLK_SCC, PHY_CFG_PLL_100M); 216 217 err = -ETIMEDOUT; 218 while (time_before(jiffies, timeout)) { 219 regmap_read(rk_phy->reg_base, 220 rk_phy->phy_data->pcie_status, 221 &status); 222 if (!(status & PHY_PLL_OUTPUT)) { 223 dev_dbg(&phy->dev, "pll output enable done!\n"); 224 err = 0; 225 break; 226 } 227 msleep(20); 228 } 229 230 if (err) { 231 dev_err(&phy->dev, "pll output enable timeout!\n"); 232 goto err_pll_lock; 233 } 234 235 regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf, 236 HIWORD_UPDATE(PHY_CFG_PLL_LOCK, 237 PHY_CFG_ADDR_MASK, 238 PHY_CFG_ADDR_SHIFT)); 239 err = -EINVAL; 240 while (time_before(jiffies, timeout)) { 241 regmap_read(rk_phy->reg_base, 242 rk_phy->phy_data->pcie_status, 243 &status); 244 if (status & PHY_PLL_LOCKED) { 245 dev_dbg(&phy->dev, "pll relocked!\n"); 246 err = 0; 247 break; 248 } 249 msleep(20); 250 } 251 252 if (err) { 253 dev_err(&phy->dev, "pll relock timeout!\n"); 254 goto err_pll_lock; 255 } 256 257 err_out: 258 mutex_unlock(&rk_phy->pcie_mutex); 259 return 0; 260 261 err_pll_lock: 262 reset_control_assert(rk_phy->phy_rst); 263 err_pwr_cnt: 264 rk_phy->pwr_cnt--; 265 mutex_unlock(&rk_phy->pcie_mutex); 266 return err; 267 } 268 269 static int rockchip_pcie_phy_init(struct phy *phy) 270 { 271 struct phy_pcie_instance *inst = phy_get_drvdata(phy); 272 struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); 273 int err = 0; 274 275 mutex_lock(&rk_phy->pcie_mutex); 276 277 if (rk_phy->init_cnt++) 278 goto err_out; 279 280 err = clk_prepare_enable(rk_phy->clk_pciephy_ref); 281 if (err) { 282 dev_err(&phy->dev, "Fail to enable pcie ref clock.\n"); 283 goto err_refclk; 284 } 285 286 err = reset_control_assert(rk_phy->phy_rst); 287 if (err) { 288 dev_err(&phy->dev, "assert phy_rst err %d\n", err); 289 goto err_reset; 290 } 291 292 err_out: 293 mutex_unlock(&rk_phy->pcie_mutex); 294 return 0; 295 296 err_reset: 297 298 clk_disable_unprepare(rk_phy->clk_pciephy_ref); 299 err_refclk: 300 rk_phy->init_cnt--; 301 mutex_unlock(&rk_phy->pcie_mutex); 302 return err; 303 } 304 305 static int rockchip_pcie_phy_exit(struct phy *phy) 306 { 307 struct phy_pcie_instance *inst = phy_get_drvdata(phy); 308 struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); 309 310 mutex_lock(&rk_phy->pcie_mutex); 311 312 if (--rk_phy->init_cnt) 313 goto err_init_cnt; 314 315 clk_disable_unprepare(rk_phy->clk_pciephy_ref); 316 317 err_init_cnt: 318 mutex_unlock(&rk_phy->pcie_mutex); 319 return 0; 320 } 321 322 static const struct phy_ops ops = { 323 .init = rockchip_pcie_phy_init, 324 .exit = rockchip_pcie_phy_exit, 325 .power_on = rockchip_pcie_phy_power_on, 326 .power_off = rockchip_pcie_phy_power_off, 327 .owner = THIS_MODULE, 328 }; 329 330 static const struct rockchip_pcie_data rk3399_pcie_data = { 331 .pcie_conf = 0xe220, 332 .pcie_status = 0xe2a4, 333 .pcie_laneoff = 0xe214, 334 }; 335 336 static const struct of_device_id rockchip_pcie_phy_dt_ids[] = { 337 { 338 .compatible = "rockchip,rk3399-pcie-phy", 339 .data = &rk3399_pcie_data, 340 }, 341 {} 342 }; 343 344 MODULE_DEVICE_TABLE(of, rockchip_pcie_phy_dt_ids); 345 346 static int rockchip_pcie_phy_probe(struct platform_device *pdev) 347 { 348 struct device *dev = &pdev->dev; 349 struct rockchip_pcie_phy *rk_phy; 350 struct phy_provider *phy_provider; 351 struct regmap *grf; 352 int i; 353 u32 phy_num; 354 355 grf = syscon_node_to_regmap(dev->parent->of_node); 356 if (IS_ERR(grf)) { 357 dev_err(dev, "Cannot find GRF syscon\n"); 358 return PTR_ERR(grf); 359 } 360 361 rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL); 362 if (!rk_phy) 363 return -ENOMEM; 364 365 rk_phy->phy_data = device_get_match_data(&pdev->dev); 366 if (!rk_phy->phy_data) 367 return -EINVAL; 368 369 rk_phy->reg_base = grf; 370 371 mutex_init(&rk_phy->pcie_mutex); 372 373 rk_phy->phy_rst = devm_reset_control_get(dev, "phy"); 374 if (IS_ERR(rk_phy->phy_rst)) { 375 if (PTR_ERR(rk_phy->phy_rst) != -EPROBE_DEFER) 376 dev_err(dev, 377 "missing phy property for reset controller\n"); 378 return PTR_ERR(rk_phy->phy_rst); 379 } 380 381 rk_phy->clk_pciephy_ref = devm_clk_get(dev, "refclk"); 382 if (IS_ERR(rk_phy->clk_pciephy_ref)) { 383 dev_err(dev, "refclk not found.\n"); 384 return PTR_ERR(rk_phy->clk_pciephy_ref); 385 } 386 387 /* parse #phy-cells to see if it's legacy PHY model */ 388 if (of_property_read_u32(dev->of_node, "#phy-cells", &phy_num)) 389 return -ENOENT; 390 391 phy_num = (phy_num == 0) ? 1 : PHY_MAX_LANE_NUM; 392 dev_dbg(dev, "phy number is %d\n", phy_num); 393 394 for (i = 0; i < phy_num; i++) { 395 rk_phy->phys[i].phy = devm_phy_create(dev, dev->of_node, &ops); 396 if (IS_ERR(rk_phy->phys[i].phy)) { 397 dev_err(dev, "failed to create PHY%d\n", i); 398 return PTR_ERR(rk_phy->phys[i].phy); 399 } 400 rk_phy->phys[i].index = i; 401 phy_set_drvdata(rk_phy->phys[i].phy, &rk_phy->phys[i]); 402 } 403 404 platform_set_drvdata(pdev, rk_phy); 405 phy_provider = devm_of_phy_provider_register(dev, 406 rockchip_pcie_phy_of_xlate); 407 408 return PTR_ERR_OR_ZERO(phy_provider); 409 } 410 411 static struct platform_driver rockchip_pcie_driver = { 412 .probe = rockchip_pcie_phy_probe, 413 .driver = { 414 .name = "rockchip-pcie-phy", 415 .of_match_table = rockchip_pcie_phy_dt_ids, 416 }, 417 }; 418 419 module_platform_driver(rockchip_pcie_driver); 420 421 MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>"); 422 MODULE_DESCRIPTION("Rockchip PCIe PHY driver"); 423 MODULE_LICENSE("GPL v2"); 424