1 /* 2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 #include <linux/delay.h> 14 #include <linux/err.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/irqchip/chained_irq.h> 18 #include <linux/irqdomain.h> 19 #include <linux/irq.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/platform_device.h> 24 #include <linux/slab.h> 25 #include <linux/spmi.h> 26 27 /* PMIC Arbiter configuration registers */ 28 #define PMIC_ARB_VERSION 0x0000 29 #define PMIC_ARB_VERSION_V2_MIN 0x20010000 30 #define PMIC_ARB_INT_EN 0x0004 31 32 /* PMIC Arbiter channel registers offsets */ 33 #define PMIC_ARB_CMD 0x00 34 #define PMIC_ARB_CONFIG 0x04 35 #define PMIC_ARB_STATUS 0x08 36 #define PMIC_ARB_WDATA0 0x10 37 #define PMIC_ARB_WDATA1 0x14 38 #define PMIC_ARB_RDATA0 0x18 39 #define PMIC_ARB_RDATA1 0x1C 40 #define PMIC_ARB_REG_CHNL(N) (0x800 + 0x4 * (N)) 41 42 /* Mapping Table */ 43 #define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N))) 44 #define SPMI_MAPPING_BIT_INDEX(X) (((X) >> 18) & 0xF) 45 #define SPMI_MAPPING_BIT_IS_0_FLAG(X) (((X) >> 17) & 0x1) 46 #define SPMI_MAPPING_BIT_IS_0_RESULT(X) (((X) >> 9) & 0xFF) 47 #define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1) 48 #define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF) 49 50 #define SPMI_MAPPING_TABLE_LEN 255 51 #define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */ 52 #define PPID_TO_CHAN_TABLE_SZ BIT(12) /* PPID is 12bit chan is 1byte*/ 53 54 /* Ownership Table */ 55 #define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N))) 56 #define SPMI_OWNERSHIP_PERIPH2OWNER(X) ((X) & 0x7) 57 58 /* Channel Status fields */ 59 enum pmic_arb_chnl_status { 60 PMIC_ARB_STATUS_DONE = (1 << 0), 61 PMIC_ARB_STATUS_FAILURE = (1 << 1), 62 PMIC_ARB_STATUS_DENIED = (1 << 2), 63 PMIC_ARB_STATUS_DROPPED = (1 << 3), 64 }; 65 66 /* Command register fields */ 67 #define PMIC_ARB_CMD_MAX_BYTE_COUNT 8 68 69 /* Command Opcodes */ 70 enum pmic_arb_cmd_op_code { 71 PMIC_ARB_OP_EXT_WRITEL = 0, 72 PMIC_ARB_OP_EXT_READL = 1, 73 PMIC_ARB_OP_EXT_WRITE = 2, 74 PMIC_ARB_OP_RESET = 3, 75 PMIC_ARB_OP_SLEEP = 4, 76 PMIC_ARB_OP_SHUTDOWN = 5, 77 PMIC_ARB_OP_WAKEUP = 6, 78 PMIC_ARB_OP_AUTHENTICATE = 7, 79 PMIC_ARB_OP_MSTR_READ = 8, 80 PMIC_ARB_OP_MSTR_WRITE = 9, 81 PMIC_ARB_OP_EXT_READ = 13, 82 PMIC_ARB_OP_WRITE = 14, 83 PMIC_ARB_OP_READ = 15, 84 PMIC_ARB_OP_ZERO_WRITE = 16, 85 }; 86 87 /* Maximum number of support PMIC peripherals */ 88 #define PMIC_ARB_MAX_PERIPHS 256 89 #define PMIC_ARB_MAX_CHNL 128 90 #define PMIC_ARB_PERIPH_ID_VALID (1 << 15) 91 #define PMIC_ARB_TIMEOUT_US 100 92 #define PMIC_ARB_MAX_TRANS_BYTES (8) 93 94 #define PMIC_ARB_APID_MASK 0xFF 95 #define PMIC_ARB_PPID_MASK 0xFFF 96 97 /* interrupt enable bit */ 98 #define SPMI_PIC_ACC_ENABLE_BIT BIT(0) 99 100 struct pmic_arb_ver_ops; 101 102 /** 103 * spmi_pmic_arb_dev - SPMI PMIC Arbiter object 104 * 105 * @rd_base: on v1 "core", on v2 "observer" register base off DT. 106 * @wr_base: on v1 "core", on v2 "chnls" register base off DT. 107 * @intr: address of the SPMI interrupt control registers. 108 * @cnfg: address of the PMIC Arbiter configuration registers. 109 * @lock: lock to synchronize accesses. 110 * @channel: execution environment channel to use for accesses. 111 * @irq: PMIC ARB interrupt. 112 * @ee: the current Execution Environment 113 * @min_apid: minimum APID (used for bounding IRQ search) 114 * @max_apid: maximum APID 115 * @mapping_table: in-memory copy of PPID -> APID mapping table. 116 * @domain: irq domain object for PMIC IRQ domain 117 * @spmic: SPMI controller object 118 * @apid_to_ppid: in-memory copy of APID -> PPID mapping table. 119 * @ver_ops: version dependent operations. 120 * @ppid_to_chan in-memory copy of PPID -> channel (APID) mapping table. 121 * v2 only. 122 */ 123 struct spmi_pmic_arb_dev { 124 void __iomem *rd_base; 125 void __iomem *wr_base; 126 void __iomem *intr; 127 void __iomem *cnfg; 128 raw_spinlock_t lock; 129 u8 channel; 130 int irq; 131 u8 ee; 132 u8 min_apid; 133 u8 max_apid; 134 u32 mapping_table[SPMI_MAPPING_TABLE_LEN]; 135 struct irq_domain *domain; 136 struct spmi_controller *spmic; 137 u16 apid_to_ppid[256]; 138 const struct pmic_arb_ver_ops *ver_ops; 139 u8 *ppid_to_chan; 140 }; 141 142 /** 143 * pmic_arb_ver: version dependent functionality. 144 * 145 * @non_data_cmd: on v1 issues an spmi non-data command. 146 * on v2 no HW support, returns -EOPNOTSUPP. 147 * @offset: on v1 offset of per-ee channel. 148 * on v2 offset of per-ee and per-ppid channel. 149 * @fmt_cmd: formats a GENI/SPMI command. 150 * @owner_acc_status: on v1 offset of PMIC_ARB_SPMI_PIC_OWNERm_ACC_STATUSn 151 * on v2 offset of SPMI_PIC_OWNERm_ACC_STATUSn. 152 * @acc_enable: on v1 offset of PMIC_ARB_SPMI_PIC_ACC_ENABLEn 153 * on v2 offset of SPMI_PIC_ACC_ENABLEn. 154 * @irq_status: on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_STATUSn 155 * on v2 offset of SPMI_PIC_IRQ_STATUSn. 156 * @irq_clear: on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_CLEARn 157 * on v2 offset of SPMI_PIC_IRQ_CLEARn. 158 */ 159 struct pmic_arb_ver_ops { 160 /* spmi commands (read_cmd, write_cmd, cmd) functionality */ 161 u32 (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr); 162 u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc); 163 int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid); 164 /* Interrupts controller functionality (offset of PIC registers) */ 165 u32 (*owner_acc_status)(u8 m, u8 n); 166 u32 (*acc_enable)(u8 n); 167 u32 (*irq_status)(u8 n); 168 u32 (*irq_clear)(u8 n); 169 }; 170 171 static inline u32 pmic_arb_base_read(struct spmi_pmic_arb_dev *dev, u32 offset) 172 { 173 return readl_relaxed(dev->rd_base + offset); 174 } 175 176 static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev, 177 u32 offset, u32 val) 178 { 179 writel_relaxed(val, dev->wr_base + offset); 180 } 181 182 static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb_dev *dev, 183 u32 offset, u32 val) 184 { 185 writel_relaxed(val, dev->rd_base + offset); 186 } 187 188 /** 189 * pa_read_data: reads pmic-arb's register and copy 1..4 bytes to buf 190 * @bc: byte count -1. range: 0..3 191 * @reg: register's address 192 * @buf: output parameter, length must be bc + 1 193 */ 194 static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc) 195 { 196 u32 data = pmic_arb_base_read(dev, reg); 197 memcpy(buf, &data, (bc & 3) + 1); 198 } 199 200 /** 201 * pa_write_data: write 1..4 bytes from buf to pmic-arb's register 202 * @bc: byte-count -1. range: 0..3. 203 * @reg: register's address. 204 * @buf: buffer to write. length must be bc + 1. 205 */ 206 static void 207 pa_write_data(struct spmi_pmic_arb_dev *dev, const u8 *buf, u32 reg, u8 bc) 208 { 209 u32 data = 0; 210 memcpy(&data, buf, (bc & 3) + 1); 211 pmic_arb_base_write(dev, reg, data); 212 } 213 214 static int pmic_arb_wait_for_done(struct spmi_controller *ctrl, 215 void __iomem *base, u8 sid, u16 addr) 216 { 217 struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl); 218 u32 status = 0; 219 u32 timeout = PMIC_ARB_TIMEOUT_US; 220 u32 offset = dev->ver_ops->offset(dev, sid, addr) + PMIC_ARB_STATUS; 221 222 while (timeout--) { 223 status = readl_relaxed(base + offset); 224 225 if (status & PMIC_ARB_STATUS_DONE) { 226 if (status & PMIC_ARB_STATUS_DENIED) { 227 dev_err(&ctrl->dev, 228 "%s: transaction denied (0x%x)\n", 229 __func__, status); 230 return -EPERM; 231 } 232 233 if (status & PMIC_ARB_STATUS_FAILURE) { 234 dev_err(&ctrl->dev, 235 "%s: transaction failed (0x%x)\n", 236 __func__, status); 237 return -EIO; 238 } 239 240 if (status & PMIC_ARB_STATUS_DROPPED) { 241 dev_err(&ctrl->dev, 242 "%s: transaction dropped (0x%x)\n", 243 __func__, status); 244 return -EIO; 245 } 246 247 return 0; 248 } 249 udelay(1); 250 } 251 252 dev_err(&ctrl->dev, 253 "%s: timeout, status 0x%x\n", 254 __func__, status); 255 return -ETIMEDOUT; 256 } 257 258 static int 259 pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid) 260 { 261 struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl); 262 unsigned long flags; 263 u32 cmd; 264 int rc; 265 u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, 0); 266 267 cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20); 268 269 raw_spin_lock_irqsave(&pmic_arb->lock, flags); 270 pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd); 271 rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, 0); 272 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); 273 274 return rc; 275 } 276 277 static int 278 pmic_arb_non_data_cmd_v2(struct spmi_controller *ctrl, u8 opc, u8 sid) 279 { 280 return -EOPNOTSUPP; 281 } 282 283 /* Non-data command */ 284 static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid) 285 { 286 struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl); 287 288 dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid); 289 290 /* Check for valid non-data command */ 291 if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP) 292 return -EINVAL; 293 294 return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid); 295 } 296 297 static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, 298 u16 addr, u8 *buf, size_t len) 299 { 300 struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl); 301 unsigned long flags; 302 u8 bc = len - 1; 303 u32 cmd; 304 int rc; 305 u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr); 306 307 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { 308 dev_err(&ctrl->dev, 309 "pmic-arb supports 1..%d bytes per trans, but:%zu requested", 310 PMIC_ARB_MAX_TRANS_BYTES, len); 311 return -EINVAL; 312 } 313 314 /* Check the opcode */ 315 if (opc >= 0x60 && opc <= 0x7F) 316 opc = PMIC_ARB_OP_READ; 317 else if (opc >= 0x20 && opc <= 0x2F) 318 opc = PMIC_ARB_OP_EXT_READ; 319 else if (opc >= 0x38 && opc <= 0x3F) 320 opc = PMIC_ARB_OP_EXT_READL; 321 else 322 return -EINVAL; 323 324 cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc); 325 326 raw_spin_lock_irqsave(&pmic_arb->lock, flags); 327 pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd); 328 rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr); 329 if (rc) 330 goto done; 331 332 pa_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0, 333 min_t(u8, bc, 3)); 334 335 if (bc > 3) 336 pa_read_data(pmic_arb, buf + 4, 337 offset + PMIC_ARB_RDATA1, bc - 4); 338 339 done: 340 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); 341 return rc; 342 } 343 344 static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, 345 u16 addr, const u8 *buf, size_t len) 346 { 347 struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl); 348 unsigned long flags; 349 u8 bc = len - 1; 350 u32 cmd; 351 int rc; 352 u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr); 353 354 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { 355 dev_err(&ctrl->dev, 356 "pmic-arb supports 1..%d bytes per trans, but:%zu requested", 357 PMIC_ARB_MAX_TRANS_BYTES, len); 358 return -EINVAL; 359 } 360 361 /* Check the opcode */ 362 if (opc >= 0x40 && opc <= 0x5F) 363 opc = PMIC_ARB_OP_WRITE; 364 else if (opc >= 0x00 && opc <= 0x0F) 365 opc = PMIC_ARB_OP_EXT_WRITE; 366 else if (opc >= 0x30 && opc <= 0x37) 367 opc = PMIC_ARB_OP_EXT_WRITEL; 368 else if (opc >= 0x80 && opc <= 0xFF) 369 opc = PMIC_ARB_OP_ZERO_WRITE; 370 else 371 return -EINVAL; 372 373 cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc); 374 375 /* Write data to FIFOs */ 376 raw_spin_lock_irqsave(&pmic_arb->lock, flags); 377 pa_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0, 378 min_t(u8, bc, 3)); 379 if (bc > 3) 380 pa_write_data(pmic_arb, buf + 4, 381 offset + PMIC_ARB_WDATA1, bc - 4); 382 383 /* Start the transaction */ 384 pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd); 385 rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr); 386 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); 387 388 return rc; 389 } 390 391 enum qpnpint_regs { 392 QPNPINT_REG_RT_STS = 0x10, 393 QPNPINT_REG_SET_TYPE = 0x11, 394 QPNPINT_REG_POLARITY_HIGH = 0x12, 395 QPNPINT_REG_POLARITY_LOW = 0x13, 396 QPNPINT_REG_LATCHED_CLR = 0x14, 397 QPNPINT_REG_EN_SET = 0x15, 398 QPNPINT_REG_EN_CLR = 0x16, 399 QPNPINT_REG_LATCHED_STS = 0x18, 400 }; 401 402 struct spmi_pmic_arb_qpnpint_type { 403 u8 type; /* 1 -> edge */ 404 u8 polarity_high; 405 u8 polarity_low; 406 } __packed; 407 408 /* Simplified accessor functions for irqchip callbacks */ 409 static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf, 410 size_t len) 411 { 412 struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d); 413 u8 sid = d->hwirq >> 24; 414 u8 per = d->hwirq >> 16; 415 416 if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid, 417 (per << 8) + reg, buf, len)) 418 dev_err_ratelimited(&pa->spmic->dev, 419 "failed irqchip transaction on %x\n", 420 d->irq); 421 } 422 423 static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len) 424 { 425 struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d); 426 u8 sid = d->hwirq >> 24; 427 u8 per = d->hwirq >> 16; 428 429 if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid, 430 (per << 8) + reg, buf, len)) 431 dev_err_ratelimited(&pa->spmic->dev, 432 "failed irqchip transaction on %x\n", 433 d->irq); 434 } 435 436 static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid) 437 { 438 unsigned int irq; 439 u32 status; 440 int id; 441 442 status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid)); 443 while (status) { 444 id = ffs(status) - 1; 445 status &= ~(1 << id); 446 irq = irq_find_mapping(pa->domain, 447 pa->apid_to_ppid[apid] << 16 448 | id << 8 449 | apid); 450 generic_handle_irq(irq); 451 } 452 } 453 454 static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc) 455 { 456 struct spmi_pmic_arb_dev *pa = irq_get_handler_data(irq); 457 struct irq_chip *chip = irq_get_chip(irq); 458 void __iomem *intr = pa->intr; 459 int first = pa->min_apid >> 5; 460 int last = pa->max_apid >> 5; 461 u32 status; 462 int i, id; 463 464 chained_irq_enter(chip, desc); 465 466 for (i = first; i <= last; ++i) { 467 status = readl_relaxed(intr + 468 pa->ver_ops->owner_acc_status(pa->ee, i)); 469 while (status) { 470 id = ffs(status) - 1; 471 status &= ~(1 << id); 472 periph_interrupt(pa, id + i * 32); 473 } 474 } 475 476 chained_irq_exit(chip, desc); 477 } 478 479 static void qpnpint_irq_ack(struct irq_data *d) 480 { 481 struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d); 482 u8 irq = d->hwirq >> 8; 483 u8 apid = d->hwirq; 484 unsigned long flags; 485 u8 data; 486 487 raw_spin_lock_irqsave(&pa->lock, flags); 488 writel_relaxed(1 << irq, pa->intr + pa->ver_ops->irq_clear(apid)); 489 raw_spin_unlock_irqrestore(&pa->lock, flags); 490 491 data = 1 << irq; 492 qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1); 493 } 494 495 static void qpnpint_irq_mask(struct irq_data *d) 496 { 497 struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d); 498 u8 irq = d->hwirq >> 8; 499 u8 apid = d->hwirq; 500 unsigned long flags; 501 u32 status; 502 u8 data; 503 504 raw_spin_lock_irqsave(&pa->lock, flags); 505 status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid)); 506 if (status & SPMI_PIC_ACC_ENABLE_BIT) { 507 status = status & ~SPMI_PIC_ACC_ENABLE_BIT; 508 writel_relaxed(status, pa->intr + 509 pa->ver_ops->acc_enable(apid)); 510 } 511 raw_spin_unlock_irqrestore(&pa->lock, flags); 512 513 data = 1 << irq; 514 qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1); 515 } 516 517 static void qpnpint_irq_unmask(struct irq_data *d) 518 { 519 struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d); 520 u8 irq = d->hwirq >> 8; 521 u8 apid = d->hwirq; 522 unsigned long flags; 523 u32 status; 524 u8 data; 525 526 raw_spin_lock_irqsave(&pa->lock, flags); 527 status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid)); 528 if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) { 529 writel_relaxed(status | SPMI_PIC_ACC_ENABLE_BIT, 530 pa->intr + pa->ver_ops->acc_enable(apid)); 531 } 532 raw_spin_unlock_irqrestore(&pa->lock, flags); 533 534 data = 1 << irq; 535 qpnpint_spmi_write(d, QPNPINT_REG_EN_SET, &data, 1); 536 } 537 538 static void qpnpint_irq_enable(struct irq_data *d) 539 { 540 u8 irq = d->hwirq >> 8; 541 u8 data; 542 543 qpnpint_irq_unmask(d); 544 545 data = 1 << irq; 546 qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1); 547 } 548 549 static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type) 550 { 551 struct spmi_pmic_arb_qpnpint_type type; 552 u8 irq = d->hwirq >> 8; 553 554 qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type)); 555 556 if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { 557 type.type |= 1 << irq; 558 if (flow_type & IRQF_TRIGGER_RISING) 559 type.polarity_high |= 1 << irq; 560 if (flow_type & IRQF_TRIGGER_FALLING) 561 type.polarity_low |= 1 << irq; 562 } else { 563 if ((flow_type & (IRQF_TRIGGER_HIGH)) && 564 (flow_type & (IRQF_TRIGGER_LOW))) 565 return -EINVAL; 566 567 type.type &= ~(1 << irq); /* level trig */ 568 if (flow_type & IRQF_TRIGGER_HIGH) 569 type.polarity_high |= 1 << irq; 570 else 571 type.polarity_low |= 1 << irq; 572 } 573 574 qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type)); 575 return 0; 576 } 577 578 static struct irq_chip pmic_arb_irqchip = { 579 .name = "pmic_arb", 580 .irq_enable = qpnpint_irq_enable, 581 .irq_ack = qpnpint_irq_ack, 582 .irq_mask = qpnpint_irq_mask, 583 .irq_unmask = qpnpint_irq_unmask, 584 .irq_set_type = qpnpint_irq_set_type, 585 .flags = IRQCHIP_MASK_ON_SUSPEND 586 | IRQCHIP_SKIP_SET_WAKE, 587 }; 588 589 struct spmi_pmic_arb_irq_spec { 590 unsigned slave:4; 591 unsigned per:8; 592 unsigned irq:3; 593 }; 594 595 static int search_mapping_table(struct spmi_pmic_arb_dev *pa, 596 struct spmi_pmic_arb_irq_spec *spec, 597 u8 *apid) 598 { 599 u16 ppid = spec->slave << 8 | spec->per; 600 u32 *mapping_table = pa->mapping_table; 601 int index = 0, i; 602 u32 data; 603 604 for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) { 605 data = mapping_table[index]; 606 607 if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) { 608 if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) { 609 index = SPMI_MAPPING_BIT_IS_1_RESULT(data); 610 } else { 611 *apid = SPMI_MAPPING_BIT_IS_1_RESULT(data); 612 return 0; 613 } 614 } else { 615 if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) { 616 index = SPMI_MAPPING_BIT_IS_0_RESULT(data); 617 } else { 618 *apid = SPMI_MAPPING_BIT_IS_0_RESULT(data); 619 return 0; 620 } 621 } 622 } 623 624 return -ENODEV; 625 } 626 627 static int qpnpint_irq_domain_dt_translate(struct irq_domain *d, 628 struct device_node *controller, 629 const u32 *intspec, 630 unsigned int intsize, 631 unsigned long *out_hwirq, 632 unsigned int *out_type) 633 { 634 struct spmi_pmic_arb_dev *pa = d->host_data; 635 struct spmi_pmic_arb_irq_spec spec; 636 int err; 637 u8 apid; 638 639 dev_dbg(&pa->spmic->dev, 640 "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n", 641 intspec[0], intspec[1], intspec[2]); 642 643 if (d->of_node != controller) 644 return -EINVAL; 645 if (intsize != 4) 646 return -EINVAL; 647 if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7) 648 return -EINVAL; 649 650 spec.slave = intspec[0]; 651 spec.per = intspec[1]; 652 spec.irq = intspec[2]; 653 654 err = search_mapping_table(pa, &spec, &apid); 655 if (err) 656 return err; 657 658 pa->apid_to_ppid[apid] = spec.slave << 8 | spec.per; 659 660 /* Keep track of {max,min}_apid for bounding search during interrupt */ 661 if (apid > pa->max_apid) 662 pa->max_apid = apid; 663 if (apid < pa->min_apid) 664 pa->min_apid = apid; 665 666 *out_hwirq = spec.slave << 24 667 | spec.per << 16 668 | spec.irq << 8 669 | apid; 670 *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK; 671 672 dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq); 673 674 return 0; 675 } 676 677 static int qpnpint_irq_domain_map(struct irq_domain *d, 678 unsigned int virq, 679 irq_hw_number_t hwirq) 680 { 681 struct spmi_pmic_arb_dev *pa = d->host_data; 682 683 dev_dbg(&pa->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq); 684 685 irq_set_chip_and_handler(virq, &pmic_arb_irqchip, handle_level_irq); 686 irq_set_chip_data(virq, d->host_data); 687 irq_set_noprobe(virq); 688 return 0; 689 } 690 691 /* v1 offset per ee */ 692 static u32 pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr) 693 { 694 return 0x800 + 0x80 * pa->channel; 695 } 696 697 /* v2 offset per ppid (chan) and per ee */ 698 static u32 pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr) 699 { 700 u16 ppid = (sid << 8) | (addr >> 8); 701 u8 chan = pa->ppid_to_chan[ppid]; 702 703 return 0x1000 * pa->ee + 0x8000 * chan; 704 } 705 706 static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc) 707 { 708 return (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7); 709 } 710 711 static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc) 712 { 713 return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7); 714 } 715 716 static u32 pmic_arb_owner_acc_status_v1(u8 m, u8 n) 717 { 718 return 0x20 * m + 0x4 * n; 719 } 720 721 static u32 pmic_arb_owner_acc_status_v2(u8 m, u8 n) 722 { 723 return 0x100000 + 0x1000 * m + 0x4 * n; 724 } 725 726 static u32 pmic_arb_acc_enable_v1(u8 n) 727 { 728 return 0x200 + 0x4 * n; 729 } 730 731 static u32 pmic_arb_acc_enable_v2(u8 n) 732 { 733 return 0x1000 * n; 734 } 735 736 static u32 pmic_arb_irq_status_v1(u8 n) 737 { 738 return 0x600 + 0x4 * n; 739 } 740 741 static u32 pmic_arb_irq_status_v2(u8 n) 742 { 743 return 0x4 + 0x1000 * n; 744 } 745 746 static u32 pmic_arb_irq_clear_v1(u8 n) 747 { 748 return 0xA00 + 0x4 * n; 749 } 750 751 static u32 pmic_arb_irq_clear_v2(u8 n) 752 { 753 return 0x8 + 0x1000 * n; 754 } 755 756 static const struct pmic_arb_ver_ops pmic_arb_v1 = { 757 .non_data_cmd = pmic_arb_non_data_cmd_v1, 758 .offset = pmic_arb_offset_v1, 759 .fmt_cmd = pmic_arb_fmt_cmd_v1, 760 .owner_acc_status = pmic_arb_owner_acc_status_v1, 761 .acc_enable = pmic_arb_acc_enable_v1, 762 .irq_status = pmic_arb_irq_status_v1, 763 .irq_clear = pmic_arb_irq_clear_v1, 764 }; 765 766 static const struct pmic_arb_ver_ops pmic_arb_v2 = { 767 .non_data_cmd = pmic_arb_non_data_cmd_v2, 768 .offset = pmic_arb_offset_v2, 769 .fmt_cmd = pmic_arb_fmt_cmd_v2, 770 .owner_acc_status = pmic_arb_owner_acc_status_v2, 771 .acc_enable = pmic_arb_acc_enable_v2, 772 .irq_status = pmic_arb_irq_status_v2, 773 .irq_clear = pmic_arb_irq_clear_v2, 774 }; 775 776 static const struct irq_domain_ops pmic_arb_irq_domain_ops = { 777 .map = qpnpint_irq_domain_map, 778 .xlate = qpnpint_irq_domain_dt_translate, 779 }; 780 781 static int spmi_pmic_arb_probe(struct platform_device *pdev) 782 { 783 struct spmi_pmic_arb_dev *pa; 784 struct spmi_controller *ctrl; 785 struct resource *res; 786 void __iomem *core; 787 u32 channel, ee, hw_ver; 788 int err, i; 789 bool is_v1; 790 791 ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa)); 792 if (!ctrl) 793 return -ENOMEM; 794 795 pa = spmi_controller_get_drvdata(ctrl); 796 pa->spmic = ctrl; 797 798 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); 799 core = devm_ioremap_resource(&ctrl->dev, res); 800 if (IS_ERR(core)) { 801 err = PTR_ERR(core); 802 goto err_put_ctrl; 803 } 804 805 hw_ver = readl_relaxed(core + PMIC_ARB_VERSION); 806 is_v1 = (hw_ver < PMIC_ARB_VERSION_V2_MIN); 807 808 dev_info(&ctrl->dev, "PMIC Arb Version-%d (0x%x)\n", (is_v1 ? 1 : 2), 809 hw_ver); 810 811 if (is_v1) { 812 pa->ver_ops = &pmic_arb_v1; 813 pa->wr_base = core; 814 pa->rd_base = core; 815 } else { 816 u8 chan; 817 u16 ppid; 818 u32 regval; 819 820 pa->ver_ops = &pmic_arb_v2; 821 822 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 823 "obsrvr"); 824 pa->rd_base = devm_ioremap_resource(&ctrl->dev, res); 825 if (IS_ERR(pa->rd_base)) { 826 err = PTR_ERR(pa->rd_base); 827 goto err_put_ctrl; 828 } 829 830 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 831 "chnls"); 832 pa->wr_base = devm_ioremap_resource(&ctrl->dev, res); 833 if (IS_ERR(pa->wr_base)) { 834 err = PTR_ERR(pa->wr_base); 835 goto err_put_ctrl; 836 } 837 838 pa->ppid_to_chan = devm_kzalloc(&ctrl->dev, 839 PPID_TO_CHAN_TABLE_SZ, GFP_KERNEL); 840 if (!pa->ppid_to_chan) { 841 err = -ENOMEM; 842 goto err_put_ctrl; 843 } 844 /* 845 * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid. 846 * ppid_to_chan is an in-memory invert of that table. 847 */ 848 for (chan = 0; chan < PMIC_ARB_MAX_CHNL; ++chan) { 849 regval = readl_relaxed(core + PMIC_ARB_REG_CHNL(chan)); 850 if (!regval) 851 continue; 852 853 ppid = (regval >> 8) & 0xFFF; 854 pa->ppid_to_chan[ppid] = chan; 855 } 856 } 857 858 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr"); 859 pa->intr = devm_ioremap_resource(&ctrl->dev, res); 860 if (IS_ERR(pa->intr)) { 861 err = PTR_ERR(pa->intr); 862 goto err_put_ctrl; 863 } 864 865 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg"); 866 pa->cnfg = devm_ioremap_resource(&ctrl->dev, res); 867 if (IS_ERR(pa->cnfg)) { 868 err = PTR_ERR(pa->cnfg); 869 goto err_put_ctrl; 870 } 871 872 pa->irq = platform_get_irq_byname(pdev, "periph_irq"); 873 if (pa->irq < 0) { 874 err = pa->irq; 875 goto err_put_ctrl; 876 } 877 878 err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel); 879 if (err) { 880 dev_err(&pdev->dev, "channel unspecified.\n"); 881 goto err_put_ctrl; 882 } 883 884 if (channel > 5) { 885 dev_err(&pdev->dev, "invalid channel (%u) specified.\n", 886 channel); 887 goto err_put_ctrl; 888 } 889 890 pa->channel = channel; 891 892 err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee); 893 if (err) { 894 dev_err(&pdev->dev, "EE unspecified.\n"); 895 goto err_put_ctrl; 896 } 897 898 if (ee > 5) { 899 dev_err(&pdev->dev, "invalid EE (%u) specified\n", ee); 900 err = -EINVAL; 901 goto err_put_ctrl; 902 } 903 904 pa->ee = ee; 905 906 for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i) 907 pa->mapping_table[i] = readl_relaxed( 908 pa->cnfg + SPMI_MAPPING_TABLE_REG(i)); 909 910 /* Initialize max_apid/min_apid to the opposite bounds, during 911 * the irq domain translation, we are sure to update these */ 912 pa->max_apid = 0; 913 pa->min_apid = PMIC_ARB_MAX_PERIPHS - 1; 914 915 platform_set_drvdata(pdev, ctrl); 916 raw_spin_lock_init(&pa->lock); 917 918 ctrl->cmd = pmic_arb_cmd; 919 ctrl->read_cmd = pmic_arb_read_cmd; 920 ctrl->write_cmd = pmic_arb_write_cmd; 921 922 dev_dbg(&pdev->dev, "adding irq domain\n"); 923 pa->domain = irq_domain_add_tree(pdev->dev.of_node, 924 &pmic_arb_irq_domain_ops, pa); 925 if (!pa->domain) { 926 dev_err(&pdev->dev, "unable to create irq_domain\n"); 927 err = -ENOMEM; 928 goto err_put_ctrl; 929 } 930 931 irq_set_handler_data(pa->irq, pa); 932 irq_set_chained_handler(pa->irq, pmic_arb_chained_irq); 933 934 err = spmi_controller_add(ctrl); 935 if (err) 936 goto err_domain_remove; 937 938 return 0; 939 940 err_domain_remove: 941 irq_set_chained_handler(pa->irq, NULL); 942 irq_set_handler_data(pa->irq, NULL); 943 irq_domain_remove(pa->domain); 944 err_put_ctrl: 945 spmi_controller_put(ctrl); 946 return err; 947 } 948 949 static int spmi_pmic_arb_remove(struct platform_device *pdev) 950 { 951 struct spmi_controller *ctrl = platform_get_drvdata(pdev); 952 struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl); 953 spmi_controller_remove(ctrl); 954 irq_set_chained_handler(pa->irq, NULL); 955 irq_set_handler_data(pa->irq, NULL); 956 irq_domain_remove(pa->domain); 957 spmi_controller_put(ctrl); 958 return 0; 959 } 960 961 static const struct of_device_id spmi_pmic_arb_match_table[] = { 962 { .compatible = "qcom,spmi-pmic-arb", }, 963 {}, 964 }; 965 MODULE_DEVICE_TABLE(of, spmi_pmic_arb_match_table); 966 967 static struct platform_driver spmi_pmic_arb_driver = { 968 .probe = spmi_pmic_arb_probe, 969 .remove = spmi_pmic_arb_remove, 970 .driver = { 971 .name = "spmi_pmic_arb", 972 .of_match_table = spmi_pmic_arb_match_table, 973 }, 974 }; 975 module_platform_driver(spmi_pmic_arb_driver); 976 977 MODULE_LICENSE("GPL v2"); 978 MODULE_ALIAS("platform:spmi_pmic_arb"); 979