1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2023, Linaro Ltd. All rights reserved. 4 */ 5 6 #include <linux/err.h> 7 #include <linux/interrupt.h> 8 #include <linux/kernel.h> 9 #include <linux/mod_devicetable.h> 10 #include <linux/module.h> 11 #include <linux/platform_device.h> 12 #include <linux/regmap.h> 13 #include <linux/regulator/consumer.h> 14 #include <linux/slab.h> 15 #include <linux/string_choices.h> 16 #include <linux/usb/pd.h> 17 #include <linux/usb/tcpm.h> 18 #include "qcom_pmic_typec.h" 19 #include "qcom_pmic_typec_pdphy.h" 20 21 /* PD PHY register offsets and bit fields */ 22 #define USB_PDPHY_MSG_CONFIG_REG 0x40 23 #define MSG_CONFIG_PORT_DATA_ROLE BIT(3) 24 #define MSG_CONFIG_PORT_POWER_ROLE BIT(2) 25 #define MSG_CONFIG_SPEC_REV_MASK (BIT(1) | BIT(0)) 26 27 #define USB_PDPHY_EN_CONTROL_REG 0x46 28 #define CONTROL_ENABLE BIT(0) 29 30 #define USB_PDPHY_RX_STATUS_REG 0x4A 31 #define RX_FRAME_TYPE (BIT(0) | BIT(1) | BIT(2)) 32 33 #define USB_PDPHY_FRAME_FILTER_REG 0x4C 34 #define FRAME_FILTER_EN_HARD_RESET BIT(5) 35 #define FRAME_FILTER_EN_SOP BIT(0) 36 37 #define USB_PDPHY_TX_SIZE_REG 0x42 38 #define TX_SIZE_MASK 0xF 39 40 #define USB_PDPHY_TX_CONTROL_REG 0x44 41 #define TX_CONTROL_RETRY_COUNT(n) (((n) & 0x3) << 5) 42 #define TX_CONTROL_FRAME_TYPE(n) (((n) & 0x7) << 2) 43 #define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2) 44 #define TX_CONTROL_SEND_SIGNAL BIT(1) 45 #define TX_CONTROL_SEND_MSG BIT(0) 46 47 #define USB_PDPHY_RX_SIZE_REG 0x48 48 49 #define USB_PDPHY_RX_ACKNOWLEDGE_REG 0x4B 50 #define RX_BUFFER_TOKEN BIT(0) 51 52 #define USB_PDPHY_BIST_MODE_REG 0x4E 53 #define BIST_MODE_MASK 0xF 54 #define BIST_ENABLE BIT(7) 55 #define PD_MSG_BIST 0x3 56 #define PD_BIST_TEST_DATA_MODE 0x8 57 58 #define USB_PDPHY_TX_BUFFER_HDR_REG 0x60 59 #define USB_PDPHY_TX_BUFFER_DATA_REG 0x62 60 61 #define USB_PDPHY_RX_BUFFER_REG 0x80 62 63 /* VDD regulator */ 64 #define VDD_PDPHY_VOL_MIN 2800000 /* uV */ 65 #define VDD_PDPHY_VOL_MAX 3300000 /* uV */ 66 #define VDD_PDPHY_HPM_LOAD 3000 /* uA */ 67 68 /* Message Spec Rev field */ 69 #define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3) 70 71 /* timers */ 72 #define RECEIVER_RESPONSE_TIME 15 /* tReceiverResponse */ 73 #define HARD_RESET_COMPLETE_TIME 5 /* tHardResetComplete */ 74 75 /* Interrupt numbers */ 76 #define PMIC_PDPHY_SIG_TX_IRQ 0x0 77 #define PMIC_PDPHY_SIG_RX_IRQ 0x1 78 #define PMIC_PDPHY_MSG_TX_IRQ 0x2 79 #define PMIC_PDPHY_MSG_RX_IRQ 0x3 80 #define PMIC_PDPHY_MSG_TX_FAIL_IRQ 0x4 81 #define PMIC_PDPHY_MSG_TX_DISCARD_IRQ 0x5 82 #define PMIC_PDPHY_MSG_RX_DISCARD_IRQ 0x6 83 #define PMIC_PDPHY_FR_SWAP_IRQ 0x7 84 85 86 struct pmic_typec_pdphy_irq_data { 87 int virq; 88 int irq; 89 struct pmic_typec_pdphy *pmic_typec_pdphy; 90 }; 91 92 struct pmic_typec_pdphy { 93 struct device *dev; 94 struct tcpm_port *tcpm_port; 95 struct regmap *regmap; 96 u32 base; 97 98 unsigned int nr_irqs; 99 struct pmic_typec_pdphy_irq_data *irq_data; 100 101 struct work_struct reset_work; 102 struct work_struct receive_work; 103 struct regulator *vdd_pdphy; 104 spinlock_t lock; /* Register atomicity */ 105 }; 106 107 static void qcom_pmic_typec_pdphy_reset_on(struct pmic_typec_pdphy *pmic_typec_pdphy) 108 { 109 struct device *dev = pmic_typec_pdphy->dev; 110 int ret; 111 112 /* Terminate TX */ 113 ret = regmap_write(pmic_typec_pdphy->regmap, 114 pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0); 115 if (ret) 116 goto err; 117 118 ret = regmap_write(pmic_typec_pdphy->regmap, 119 pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG, 0); 120 if (ret) 121 goto err; 122 123 return; 124 err: 125 dev_err(dev, "pd_reset_on error\n"); 126 } 127 128 static void qcom_pmic_typec_pdphy_reset_off(struct pmic_typec_pdphy *pmic_typec_pdphy) 129 { 130 struct device *dev = pmic_typec_pdphy->dev; 131 int ret; 132 133 ret = regmap_write(pmic_typec_pdphy->regmap, 134 pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG, 135 FRAME_FILTER_EN_SOP | FRAME_FILTER_EN_HARD_RESET); 136 if (ret) 137 dev_err(dev, "pd_reset_off error\n"); 138 } 139 140 static void qcom_pmic_typec_pdphy_sig_reset_work(struct work_struct *work) 141 { 142 struct pmic_typec_pdphy *pmic_typec_pdphy = container_of(work, struct pmic_typec_pdphy, 143 reset_work); 144 unsigned long flags; 145 146 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags); 147 148 qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy); 149 qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy); 150 151 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags); 152 153 tcpm_pd_hard_reset(pmic_typec_pdphy->tcpm_port); 154 } 155 156 static int 157 qcom_pmic_typec_pdphy_clear_tx_control_reg(struct pmic_typec_pdphy *pmic_typec_pdphy) 158 { 159 struct device *dev = pmic_typec_pdphy->dev; 160 unsigned int val; 161 int ret; 162 163 /* Clear TX control register */ 164 ret = regmap_write(pmic_typec_pdphy->regmap, 165 pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0); 166 if (ret) 167 goto done; 168 169 /* Perform readback to ensure sufficient delay for command to latch */ 170 ret = regmap_read(pmic_typec_pdphy->regmap, 171 pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, &val); 172 173 done: 174 if (ret) 175 dev_err(dev, "pd_clear_tx_control_reg: clear tx flag\n"); 176 177 return ret; 178 } 179 180 static int 181 qcom_pmic_typec_pdphy_pd_transmit_signal(struct pmic_typec_pdphy *pmic_typec_pdphy, 182 enum tcpm_transmit_type type, 183 unsigned int negotiated_rev) 184 { 185 struct device *dev = pmic_typec_pdphy->dev; 186 unsigned int val; 187 unsigned long flags; 188 int ret; 189 190 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags); 191 192 /* Clear TX control register */ 193 ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy); 194 if (ret) 195 goto done; 196 197 val = TX_CONTROL_SEND_SIGNAL; 198 if (negotiated_rev == PD_REV30) 199 val |= TX_CONTROL_RETRY_COUNT(2); 200 else 201 val |= TX_CONTROL_RETRY_COUNT(3); 202 203 if (type == TCPC_TX_CABLE_RESET || type == TCPC_TX_HARD_RESET) 204 val |= TX_CONTROL_FRAME_TYPE(1); 205 206 ret = regmap_write(pmic_typec_pdphy->regmap, 207 pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val); 208 209 done: 210 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags); 211 212 dev_vdbg(dev, "pd_transmit_signal: type %d negotiate_rev %d send %d\n", 213 type, negotiated_rev, ret); 214 215 return ret; 216 } 217 218 static int 219 qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pdphy, 220 enum tcpm_transmit_type type, 221 const struct pd_message *msg, 222 unsigned int negotiated_rev) 223 { 224 struct device *dev = pmic_typec_pdphy->dev; 225 unsigned int val, hdr_len, txbuf_len, txsize_len; 226 unsigned long flags; 227 int ret; 228 229 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags); 230 231 hdr_len = sizeof(msg->header); 232 txbuf_len = pd_header_cnt_le(msg->header) * 4; 233 txsize_len = hdr_len + txbuf_len - 1; 234 235 ret = regmap_read(pmic_typec_pdphy->regmap, 236 pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, 237 &val); 238 if (ret) 239 goto done; 240 241 if (val) { 242 dev_err(dev, "pd_transmit_payload: RX message pending\n"); 243 ret = -EBUSY; 244 goto done; 245 } 246 247 /* Clear TX control register */ 248 ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy); 249 if (ret) 250 goto done; 251 252 /* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */ 253 ret = regmap_bulk_write(pmic_typec_pdphy->regmap, 254 pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG, 255 &msg->header, hdr_len); 256 if (ret) 257 goto done; 258 259 /* Write payload to USB_PDPHY_TX_BUFFER_DATA_REG for txbuf_len */ 260 if (txbuf_len) { 261 ret = regmap_bulk_write(pmic_typec_pdphy->regmap, 262 pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_DATA_REG, 263 &msg->payload, txbuf_len); 264 if (ret) 265 goto done; 266 } 267 268 /* Write total length ((header + data) - 1) to USB_PDPHY_TX_SIZE_REG */ 269 ret = regmap_write(pmic_typec_pdphy->regmap, 270 pmic_typec_pdphy->base + USB_PDPHY_TX_SIZE_REG, 271 txsize_len); 272 if (ret) 273 goto done; 274 275 /* Clear TX control register */ 276 ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy); 277 if (ret) 278 goto done; 279 280 /* Initiate transmit with retry count as indicated by PD revision */ 281 val = TX_CONTROL_FRAME_TYPE(type) | TX_CONTROL_SEND_MSG; 282 if (pd_header_rev(msg->header) == PD_REV30) 283 val |= TX_CONTROL_RETRY_COUNT(2); 284 else 285 val |= TX_CONTROL_RETRY_COUNT(3); 286 287 ret = regmap_write(pmic_typec_pdphy->regmap, 288 pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val); 289 290 done: 291 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags); 292 293 if (ret) { 294 dev_err(dev, "pd_transmit_payload: hdr %*ph data %*ph ret %d\n", 295 hdr_len, &msg->header, txbuf_len, &msg->payload, ret); 296 } 297 298 return ret; 299 } 300 301 static int qcom_pmic_typec_pdphy_pd_transmit(struct tcpc_dev *tcpc, 302 enum tcpm_transmit_type type, 303 const struct pd_message *msg, 304 unsigned int negotiated_rev) 305 { 306 struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc); 307 struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy; 308 struct device *dev = pmic_typec_pdphy->dev; 309 int ret; 310 311 if (msg) { 312 ret = qcom_pmic_typec_pdphy_pd_transmit_payload(pmic_typec_pdphy, 313 type, msg, 314 negotiated_rev); 315 } else { 316 ret = qcom_pmic_typec_pdphy_pd_transmit_signal(pmic_typec_pdphy, 317 type, 318 negotiated_rev); 319 } 320 321 if (ret) 322 dev_dbg(dev, "pd_transmit: type %x result %d\n", type, ret); 323 324 return ret; 325 } 326 327 static void qcom_pmic_typec_pdphy_pd_receive(struct pmic_typec_pdphy *pmic_typec_pdphy) 328 { 329 struct device *dev = pmic_typec_pdphy->dev; 330 struct pd_message msg; 331 unsigned int size, rx_status; 332 unsigned long flags; 333 int ret; 334 335 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags); 336 337 ret = regmap_read(pmic_typec_pdphy->regmap, 338 pmic_typec_pdphy->base + USB_PDPHY_RX_SIZE_REG, &size); 339 if (ret) 340 goto done; 341 342 /* Hardware requires +1 of the real read value to be passed */ 343 if (size < 1 || size > sizeof(msg.payload) + 1) { 344 dev_dbg(dev, "pd_receive: invalid size %d\n", size); 345 goto done; 346 } 347 348 size += 1; 349 ret = regmap_read(pmic_typec_pdphy->regmap, 350 pmic_typec_pdphy->base + USB_PDPHY_RX_STATUS_REG, 351 &rx_status); 352 353 if (ret) 354 goto done; 355 356 ret = regmap_bulk_read(pmic_typec_pdphy->regmap, 357 pmic_typec_pdphy->base + USB_PDPHY_RX_BUFFER_REG, 358 (u8 *)&msg, size); 359 if (ret) 360 goto done; 361 362 /* Return ownership of RX buffer to hardware */ 363 ret = regmap_write(pmic_typec_pdphy->regmap, 364 pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, 0); 365 366 done: 367 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags); 368 369 if (!ret) { 370 dev_vdbg(dev, "pd_receive: handing %d bytes to tcpm\n", size); 371 tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg, TCPC_TX_SOP); 372 } 373 } 374 375 static irqreturn_t qcom_pmic_typec_pdphy_isr(int irq, void *dev_id) 376 { 377 struct pmic_typec_pdphy_irq_data *irq_data = dev_id; 378 struct pmic_typec_pdphy *pmic_typec_pdphy = irq_data->pmic_typec_pdphy; 379 struct device *dev = pmic_typec_pdphy->dev; 380 381 switch (irq_data->virq) { 382 case PMIC_PDPHY_SIG_TX_IRQ: 383 dev_err(dev, "isr: tx_sig\n"); 384 break; 385 case PMIC_PDPHY_SIG_RX_IRQ: 386 schedule_work(&pmic_typec_pdphy->reset_work); 387 break; 388 case PMIC_PDPHY_MSG_TX_IRQ: 389 tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port, 390 TCPC_TX_SUCCESS); 391 break; 392 case PMIC_PDPHY_MSG_RX_IRQ: 393 qcom_pmic_typec_pdphy_pd_receive(pmic_typec_pdphy); 394 break; 395 case PMIC_PDPHY_MSG_TX_FAIL_IRQ: 396 tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port, 397 TCPC_TX_FAILED); 398 break; 399 case PMIC_PDPHY_MSG_TX_DISCARD_IRQ: 400 tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port, 401 TCPC_TX_DISCARDED); 402 break; 403 } 404 405 return IRQ_HANDLED; 406 } 407 408 static int qcom_pmic_typec_pdphy_set_pd_rx(struct tcpc_dev *tcpc, bool on) 409 { 410 struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc); 411 struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy; 412 unsigned long flags; 413 int ret; 414 415 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags); 416 417 ret = regmap_write(pmic_typec_pdphy->regmap, 418 pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, !on); 419 420 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags); 421 422 dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", str_on_off(on)); 423 424 return ret; 425 } 426 427 static int qcom_pmic_typec_pdphy_set_roles(struct tcpc_dev *tcpc, bool attached, 428 enum typec_role power_role, 429 enum typec_data_role data_role) 430 { 431 struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc); 432 struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy; 433 struct device *dev = pmic_typec_pdphy->dev; 434 unsigned long flags; 435 int ret; 436 437 spin_lock_irqsave(&pmic_typec_pdphy->lock, flags); 438 439 ret = regmap_update_bits(pmic_typec_pdphy->regmap, 440 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG, 441 MSG_CONFIG_PORT_DATA_ROLE | 442 MSG_CONFIG_PORT_POWER_ROLE, 443 (data_role == TYPEC_HOST ? MSG_CONFIG_PORT_DATA_ROLE : 0) | 444 (power_role == TYPEC_SOURCE ? MSG_CONFIG_PORT_POWER_ROLE : 0)); 445 446 spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags); 447 448 dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n", 449 data_role, power_role); 450 451 return ret; 452 } 453 454 static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdphy) 455 { 456 struct device *dev = pmic_typec_pdphy->dev; 457 int ret; 458 459 /* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */ 460 ret = regmap_update_bits(pmic_typec_pdphy->regmap, 461 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG, 462 MSG_CONFIG_SPEC_REV_MASK, PD_REV20); 463 if (ret) 464 goto done; 465 466 ret = regmap_write(pmic_typec_pdphy->regmap, 467 pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0); 468 if (ret) 469 goto done; 470 471 ret = regmap_write(pmic_typec_pdphy->regmap, 472 pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 473 CONTROL_ENABLE); 474 if (ret) 475 goto done; 476 477 qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy); 478 done: 479 if (ret) 480 dev_err(dev, "pdphy_enable fail %d\n", ret); 481 482 return ret; 483 } 484 485 static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdphy) 486 { 487 int ret; 488 489 qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy); 490 491 ret = regmap_write(pmic_typec_pdphy->regmap, 492 pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0); 493 494 return ret; 495 } 496 497 static int pmic_typec_pdphy_reset(struct pmic_typec_pdphy *pmic_typec_pdphy) 498 { 499 int ret; 500 501 ret = qcom_pmic_typec_pdphy_disable(pmic_typec_pdphy); 502 if (ret) 503 goto done; 504 505 usleep_range(400, 500); 506 ret = qcom_pmic_typec_pdphy_enable(pmic_typec_pdphy); 507 done: 508 return ret; 509 } 510 511 static int qcom_pmic_typec_pdphy_start(struct pmic_typec *tcpm, 512 struct tcpm_port *tcpm_port) 513 { 514 struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy; 515 int i; 516 int ret; 517 518 ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy); 519 if (ret) 520 return ret; 521 522 pmic_typec_pdphy->tcpm_port = tcpm_port; 523 524 ret = pmic_typec_pdphy_reset(pmic_typec_pdphy); 525 if (ret) 526 goto err_disable_vdd_pdhy; 527 528 for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++) 529 enable_irq(pmic_typec_pdphy->irq_data[i].irq); 530 531 return 0; 532 533 err_disable_vdd_pdhy: 534 regulator_disable(pmic_typec_pdphy->vdd_pdphy); 535 536 return ret; 537 } 538 539 static void qcom_pmic_typec_pdphy_stop(struct pmic_typec *tcpm) 540 { 541 struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy; 542 int i; 543 544 for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++) 545 disable_irq(pmic_typec_pdphy->irq_data[i].irq); 546 547 qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy); 548 549 regulator_disable(pmic_typec_pdphy->vdd_pdphy); 550 } 551 552 int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev, 553 struct pmic_typec *tcpm, 554 const struct pmic_typec_pdphy_resources *res, 555 struct regmap *regmap, 556 u32 base) 557 { 558 struct pmic_typec_pdphy *pmic_typec_pdphy; 559 struct device *dev = &pdev->dev; 560 struct pmic_typec_pdphy_irq_data *irq_data; 561 int i, ret, irq; 562 563 pmic_typec_pdphy = devm_kzalloc(dev, sizeof(*pmic_typec_pdphy), GFP_KERNEL); 564 if (!pmic_typec_pdphy) 565 return -ENOMEM; 566 567 if (!res->nr_irqs || res->nr_irqs > PMIC_PDPHY_MAX_IRQS) 568 return -EINVAL; 569 570 irq_data = devm_kzalloc(dev, sizeof(*irq_data) * res->nr_irqs, 571 GFP_KERNEL); 572 if (!irq_data) 573 return -ENOMEM; 574 575 pmic_typec_pdphy->vdd_pdphy = devm_regulator_get(dev, "vdd-pdphy"); 576 if (IS_ERR(pmic_typec_pdphy->vdd_pdphy)) 577 return PTR_ERR(pmic_typec_pdphy->vdd_pdphy); 578 579 pmic_typec_pdphy->dev = dev; 580 pmic_typec_pdphy->base = base; 581 pmic_typec_pdphy->regmap = regmap; 582 pmic_typec_pdphy->nr_irqs = res->nr_irqs; 583 pmic_typec_pdphy->irq_data = irq_data; 584 spin_lock_init(&pmic_typec_pdphy->lock); 585 INIT_WORK(&pmic_typec_pdphy->reset_work, qcom_pmic_typec_pdphy_sig_reset_work); 586 587 for (i = 0; i < res->nr_irqs; i++, irq_data++) { 588 irq = platform_get_irq_byname(pdev, res->irq_params[i].irq_name); 589 if (irq < 0) 590 return irq; 591 592 irq_data->pmic_typec_pdphy = pmic_typec_pdphy; 593 irq_data->irq = irq; 594 irq_data->virq = res->irq_params[i].virq; 595 596 ret = devm_request_threaded_irq(dev, irq, NULL, 597 qcom_pmic_typec_pdphy_isr, 598 IRQF_ONESHOT | IRQF_NO_AUTOEN, 599 res->irq_params[i].irq_name, 600 irq_data); 601 if (ret) 602 return ret; 603 } 604 605 tcpm->pmic_typec_pdphy = pmic_typec_pdphy; 606 607 tcpm->tcpc.set_pd_rx = qcom_pmic_typec_pdphy_set_pd_rx; 608 tcpm->tcpc.set_roles = qcom_pmic_typec_pdphy_set_roles; 609 tcpm->tcpc.pd_transmit = qcom_pmic_typec_pdphy_pd_transmit; 610 611 tcpm->pdphy_start = qcom_pmic_typec_pdphy_start; 612 tcpm->pdphy_stop = qcom_pmic_typec_pdphy_stop; 613 614 return 0; 615 } 616 617 const struct pmic_typec_pdphy_resources pm8150b_pdphy_res = { 618 .irq_params = { 619 { 620 .virq = PMIC_PDPHY_SIG_TX_IRQ, 621 .irq_name = "sig-tx", 622 }, 623 { 624 .virq = PMIC_PDPHY_SIG_RX_IRQ, 625 .irq_name = "sig-rx", 626 }, 627 { 628 .virq = PMIC_PDPHY_MSG_TX_IRQ, 629 .irq_name = "msg-tx", 630 }, 631 { 632 .virq = PMIC_PDPHY_MSG_RX_IRQ, 633 .irq_name = "msg-rx", 634 }, 635 { 636 .virq = PMIC_PDPHY_MSG_TX_FAIL_IRQ, 637 .irq_name = "msg-tx-failed", 638 }, 639 { 640 .virq = PMIC_PDPHY_MSG_TX_DISCARD_IRQ, 641 .irq_name = "msg-tx-discarded", 642 }, 643 { 644 .virq = PMIC_PDPHY_MSG_RX_DISCARD_IRQ, 645 .irq_name = "msg-rx-discarded", 646 }, 647 }, 648 .nr_irqs = 7, 649 }; 650