1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME 8 9 #include <linux/atomic.h> 10 #include <linux/cpu_pm.h> 11 #include <linux/delay.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/iopoll.h> 15 #include <linux/kernel.h> 16 #include <linux/ktime.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/notifier.h> 20 #include <linux/of.h> 21 #include <linux/of_irq.h> 22 #include <linux/of_platform.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_domain.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/wait.h> 29 30 #include <clocksource/arm_arch_timer.h> 31 #include <soc/qcom/cmd-db.h> 32 #include <soc/qcom/tcs.h> 33 #include <dt-bindings/soc/qcom,rpmh-rsc.h> 34 35 #include "rpmh-internal.h" 36 37 #define CREATE_TRACE_POINTS 38 #include "trace-rpmh.h" 39 40 41 #define RSC_DRV_ID 0 42 43 #define MAJOR_VER_MASK 0xFF 44 #define MAJOR_VER_SHIFT 16 45 #define MINOR_VER_MASK 0xFF 46 #define MINOR_VER_SHIFT 8 47 48 enum { 49 RSC_DRV_TCS_OFFSET, 50 RSC_DRV_CMD_OFFSET, 51 DRV_SOLVER_CONFIG, 52 DRV_PRNT_CHLD_CONFIG, 53 RSC_DRV_IRQ_ENABLE, 54 RSC_DRV_IRQ_STATUS, 55 RSC_DRV_IRQ_CLEAR, 56 RSC_DRV_CMD_WAIT_FOR_CMPL, 57 RSC_DRV_CONTROL, 58 RSC_DRV_STATUS, 59 RSC_DRV_CMD_ENABLE, 60 RSC_DRV_CMD_MSGID, 61 RSC_DRV_CMD_ADDR, 62 RSC_DRV_CMD_DATA, 63 RSC_DRV_CMD_STATUS, 64 RSC_DRV_CMD_RESP_DATA, 65 }; 66 67 /* DRV HW Solver Configuration Information Register */ 68 #define DRV_HW_SOLVER_MASK 1 69 #define DRV_HW_SOLVER_SHIFT 24 70 71 /* DRV TCS Configuration Information Register */ 72 #define DRV_NUM_TCS_MASK 0x3F 73 #define DRV_NUM_TCS_SHIFT 6 74 #define DRV_NCPT_MASK 0x1F 75 #define DRV_NCPT_SHIFT 27 76 77 /* Offsets for CONTROL TCS Registers */ 78 #define RSC_DRV_CTL_TCS_DATA_HI 0x38 79 #define RSC_DRV_CTL_TCS_DATA_HI_MASK 0xFFFFFF 80 #define RSC_DRV_CTL_TCS_DATA_HI_VALID BIT(31) 81 #define RSC_DRV_CTL_TCS_DATA_LO 0x40 82 #define RSC_DRV_CTL_TCS_DATA_LO_MASK 0xFFFFFFFF 83 #define RSC_DRV_CTL_TCS_DATA_SIZE 32 84 85 #define TCS_AMC_MODE_ENABLE BIT(16) 86 #define TCS_AMC_MODE_TRIGGER BIT(24) 87 88 /* TCS CMD register bit mask */ 89 #define CMD_MSGID_LEN 8 90 #define CMD_MSGID_RESP_REQ BIT(8) 91 #define CMD_MSGID_WRITE BIT(16) 92 #define CMD_STATUS_ISSUED BIT(8) 93 #define CMD_STATUS_COMPL BIT(16) 94 95 /* 96 * Here's a high level overview of how all the registers in RPMH work 97 * together: 98 * 99 * - The main rpmh-rsc address is the base of a register space that can 100 * be used to find overall configuration of the hardware 101 * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register 102 * space are all the TCS blocks. The offset of the TCS blocks is 103 * specified in the device tree by "qcom,tcs-offset" and used to 104 * compute tcs_base. 105 * - TCS blocks come one after another. Type, count, and order are 106 * specified by the device tree as "qcom,tcs-config". 107 * - Each TCS block has some registers, then space for up to 16 commands. 108 * Note that though address space is reserved for 16 commands, fewer 109 * might be present. See ncpt (num cmds per TCS). 110 * 111 * Here's a picture: 112 * 113 * +---------------------------------------------------+ 114 * |RSC | 115 * | ctrl | 116 * | | 117 * | Drvs: | 118 * | +-----------------------------------------------+ | 119 * | |DRV0 | | 120 * | | ctrl/config | | 121 * | | IRQ | | 122 * | | | | 123 * | | TCSes: | | 124 * | | +------------------------------------------+ | | 125 * | | |TCS0 | | | | | | | | | | | | | | | 126 * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 127 * | | | | | | | | | | | | | | | | | | 128 * | | +------------------------------------------+ | | 129 * | | +------------------------------------------+ | | 130 * | | |TCS1 | | | | | | | | | | | | | | | 131 * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 132 * | | | | | | | | | | | | | | | | | | 133 * | | +------------------------------------------+ | | 134 * | | +------------------------------------------+ | | 135 * | | |TCS2 | | | | | | | | | | | | | | | 136 * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 137 * | | | | | | | | | | | | | | | | | | 138 * | | +------------------------------------------+ | | 139 * | | ...... | | 140 * | +-----------------------------------------------+ | 141 * | +-----------------------------------------------+ | 142 * | |DRV1 | | 143 * | | (same as DRV0) | | 144 * | +-----------------------------------------------+ | 145 * | ...... | 146 * +---------------------------------------------------+ 147 */ 148 149 #define USECS_TO_CYCLES(time_usecs) \ 150 xloops_to_cycles((time_usecs) * 0x10C7UL) 151 152 static inline unsigned long xloops_to_cycles(u64 xloops) 153 { 154 return (xloops * loops_per_jiffy * HZ) >> 32; 155 } 156 157 static u32 rpmh_rsc_reg_offset_ver_2_7[] = { 158 [RSC_DRV_TCS_OFFSET] = 672, 159 [RSC_DRV_CMD_OFFSET] = 20, 160 [DRV_SOLVER_CONFIG] = 0x04, 161 [DRV_PRNT_CHLD_CONFIG] = 0x0C, 162 [RSC_DRV_IRQ_ENABLE] = 0x00, 163 [RSC_DRV_IRQ_STATUS] = 0x04, 164 [RSC_DRV_IRQ_CLEAR] = 0x08, 165 [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x10, 166 [RSC_DRV_CONTROL] = 0x14, 167 [RSC_DRV_STATUS] = 0x18, 168 [RSC_DRV_CMD_ENABLE] = 0x1C, 169 [RSC_DRV_CMD_MSGID] = 0x30, 170 [RSC_DRV_CMD_ADDR] = 0x34, 171 [RSC_DRV_CMD_DATA] = 0x38, 172 [RSC_DRV_CMD_STATUS] = 0x3C, 173 [RSC_DRV_CMD_RESP_DATA] = 0x40, 174 }; 175 176 static u32 rpmh_rsc_reg_offset_ver_3_0[] = { 177 [RSC_DRV_TCS_OFFSET] = 672, 178 [RSC_DRV_CMD_OFFSET] = 24, 179 [DRV_SOLVER_CONFIG] = 0x04, 180 [DRV_PRNT_CHLD_CONFIG] = 0x0C, 181 [RSC_DRV_IRQ_ENABLE] = 0x00, 182 [RSC_DRV_IRQ_STATUS] = 0x04, 183 [RSC_DRV_IRQ_CLEAR] = 0x08, 184 [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x20, 185 [RSC_DRV_CONTROL] = 0x24, 186 [RSC_DRV_STATUS] = 0x28, 187 [RSC_DRV_CMD_ENABLE] = 0x2C, 188 [RSC_DRV_CMD_MSGID] = 0x34, 189 [RSC_DRV_CMD_ADDR] = 0x38, 190 [RSC_DRV_CMD_DATA] = 0x3C, 191 [RSC_DRV_CMD_STATUS] = 0x40, 192 [RSC_DRV_CMD_RESP_DATA] = 0x44, 193 }; 194 195 static inline void __iomem * 196 tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id) 197 { 198 return drv->tcs_base + drv->regs[RSC_DRV_TCS_OFFSET] * tcs_id + reg; 199 } 200 201 static inline void __iomem * 202 tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id) 203 { 204 return tcs_reg_addr(drv, reg, tcs_id) + drv->regs[RSC_DRV_CMD_OFFSET] * cmd_id; 205 } 206 207 static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, 208 int cmd_id) 209 { 210 return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); 211 } 212 213 static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id) 214 { 215 return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id)); 216 } 217 218 static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, 219 int cmd_id, u32 data) 220 { 221 writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); 222 } 223 224 static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id, 225 u32 data) 226 { 227 writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id)); 228 } 229 230 static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id, 231 u32 data) 232 { 233 int i; 234 235 writel(data, tcs_reg_addr(drv, reg, tcs_id)); 236 237 /* 238 * Wait until we read back the same value. Use a counter rather than 239 * ktime for timeout since this may be called after timekeeping stops. 240 */ 241 for (i = 0; i < USEC_PER_SEC; i++) { 242 if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data) 243 return; 244 udelay(1); 245 } 246 pr_err("%s: error writing %#x to %d:%#x\n", drv->name, 247 data, tcs_id, reg); 248 } 249 250 /** 251 * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake). 252 * @drv: The RSC controller. 253 * @type: SLEEP_TCS or WAKE_TCS 254 * 255 * This will clear the "slots" variable of the given tcs_group and also 256 * tell the hardware to forget about all entries. 257 * 258 * The caller must ensure that no other RPMH actions are happening when this 259 * function is called, since otherwise the device may immediately become 260 * used again even before this function exits. 261 */ 262 static void tcs_invalidate(struct rsc_drv *drv, int type) 263 { 264 int m; 265 struct tcs_group *tcs = &drv->tcs[type]; 266 267 /* Caller ensures nobody else is running so no lock */ 268 if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) 269 return; 270 271 for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) 272 write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], m, 0); 273 274 bitmap_zero(tcs->slots, MAX_TCS_SLOTS); 275 } 276 277 /** 278 * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes. 279 * @drv: The RSC controller. 280 * 281 * The caller must ensure that no other RPMH actions are happening when this 282 * function is called, since otherwise the device may immediately become 283 * used again even before this function exits. 284 */ 285 void rpmh_rsc_invalidate(struct rsc_drv *drv) 286 { 287 tcs_invalidate(drv, SLEEP_TCS); 288 tcs_invalidate(drv, WAKE_TCS); 289 } 290 291 /** 292 * get_tcs_for_msg() - Get the tcs_group used to send the given message. 293 * @drv: The RSC controller. 294 * @msg: The message we want to send. 295 * 296 * This is normally pretty straightforward except if we are trying to send 297 * an ACTIVE_ONLY message but don't have any active_only TCSes. 298 * 299 * Return: A pointer to a tcs_group or an ERR_PTR. 300 */ 301 static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, 302 const struct tcs_request *msg) 303 { 304 int type; 305 struct tcs_group *tcs; 306 307 switch (msg->state) { 308 case RPMH_ACTIVE_ONLY_STATE: 309 type = ACTIVE_TCS; 310 break; 311 case RPMH_WAKE_ONLY_STATE: 312 type = WAKE_TCS; 313 break; 314 case RPMH_SLEEP_STATE: 315 type = SLEEP_TCS; 316 break; 317 default: 318 return ERR_PTR(-EINVAL); 319 } 320 321 /* 322 * If we are making an active request on a RSC that does not have a 323 * dedicated TCS for active state use, then re-purpose a wake TCS to 324 * send active votes. This is safe because we ensure any active-only 325 * transfers have finished before we use it (maybe by running from 326 * the last CPU in PM code). 327 */ 328 tcs = &drv->tcs[type]; 329 if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) 330 tcs = &drv->tcs[WAKE_TCS]; 331 332 return tcs; 333 } 334 335 /** 336 * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS. 337 * @drv: The RSC controller. 338 * @tcs_id: The global ID of this TCS. 339 * 340 * For ACTIVE_ONLY transfers we want to call back into the client when the 341 * transfer finishes. To do this we need the "request" that the client 342 * originally provided us. This function grabs the request that we stashed 343 * when we started the transfer. 344 * 345 * This only makes sense for ACTIVE_ONLY transfers since those are the only 346 * ones we track sending (the only ones we enable interrupts for and the only 347 * ones we call back to the client for). 348 * 349 * Return: The stashed request. 350 */ 351 static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv, 352 int tcs_id) 353 { 354 struct tcs_group *tcs; 355 int i; 356 357 for (i = 0; i < TCS_TYPE_NR; i++) { 358 tcs = &drv->tcs[i]; 359 if (tcs->mask & BIT(tcs_id)) 360 return tcs->req[tcs_id - tcs->offset]; 361 } 362 363 return NULL; 364 } 365 366 /** 367 * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS 368 * @drv: The controller. 369 * @tcs_id: The global ID of this TCS. 370 * @trigger: If true then untrigger/retrigger. If false then just untrigger. 371 * 372 * In the normal case we only ever call with "trigger=true" to start a 373 * transfer. That will un-trigger/disable the TCS from the last transfer 374 * then trigger/enable for this transfer. 375 * 376 * If we borrowed a wake TCS for an active-only transfer we'll also call 377 * this function with "trigger=false" to just do the un-trigger/disable 378 * before using the TCS for wake purposes again. 379 * 380 * Note that the AP is only in charge of triggering active-only transfers. 381 * The AP never triggers sleep/wake values using this function. 382 */ 383 static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger) 384 { 385 u32 enable; 386 u32 reg = drv->regs[RSC_DRV_CONTROL]; 387 388 /* 389 * HW req: Clear the DRV_CONTROL and enable TCS again 390 * While clearing ensure that the AMC mode trigger is cleared 391 * and then the mode enable is cleared. 392 */ 393 enable = read_tcs_reg(drv, reg, tcs_id); 394 enable &= ~TCS_AMC_MODE_TRIGGER; 395 write_tcs_reg_sync(drv, reg, tcs_id, enable); 396 enable &= ~TCS_AMC_MODE_ENABLE; 397 write_tcs_reg_sync(drv, reg, tcs_id, enable); 398 399 if (trigger) { 400 /* Enable the AMC mode on the TCS and then trigger the TCS */ 401 enable = TCS_AMC_MODE_ENABLE; 402 write_tcs_reg_sync(drv, reg, tcs_id, enable); 403 enable |= TCS_AMC_MODE_TRIGGER; 404 write_tcs_reg(drv, reg, tcs_id, enable); 405 } 406 } 407 408 /** 409 * enable_tcs_irq() - Enable or disable interrupts on the given TCS. 410 * @drv: The controller. 411 * @tcs_id: The global ID of this TCS. 412 * @enable: If true then enable; if false then disable 413 * 414 * We only ever call this when we borrow a wake TCS for an active-only 415 * transfer. For active-only TCSes interrupts are always left enabled. 416 */ 417 static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable) 418 { 419 u32 data; 420 u32 reg = drv->regs[RSC_DRV_IRQ_ENABLE]; 421 422 data = readl_relaxed(drv->tcs_base + reg); 423 if (enable) 424 data |= BIT(tcs_id); 425 else 426 data &= ~BIT(tcs_id); 427 writel_relaxed(data, drv->tcs_base + reg); 428 } 429 430 /** 431 * tcs_tx_done() - TX Done interrupt handler. 432 * @irq: The IRQ number (ignored). 433 * @p: Pointer to "struct rsc_drv". 434 * 435 * Called for ACTIVE_ONLY transfers (those are the only ones we enable the 436 * IRQ for) when a transfer is done. 437 * 438 * Return: IRQ_HANDLED 439 */ 440 static irqreturn_t tcs_tx_done(int irq, void *p) 441 { 442 struct rsc_drv *drv = p; 443 int i; 444 unsigned long irq_status; 445 const struct tcs_request *req; 446 447 irq_status = readl_relaxed(drv->tcs_base + drv->regs[RSC_DRV_IRQ_STATUS]); 448 449 for_each_set_bit(i, &irq_status, BITS_PER_TYPE(u32)) { 450 req = get_req_from_tcs(drv, i); 451 if (WARN_ON(!req)) 452 goto skip; 453 454 trace_rpmh_tx_done(drv, i, req); 455 456 /* 457 * If wake tcs was re-purposed for sending active 458 * votes, clear AMC trigger & enable modes and 459 * disable interrupt for this TCS 460 */ 461 if (!drv->tcs[ACTIVE_TCS].num_tcs) 462 __tcs_set_trigger(drv, i, false); 463 skip: 464 /* Reclaim the TCS */ 465 write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0); 466 writel_relaxed(BIT(i), drv->tcs_base + drv->regs[RSC_DRV_IRQ_CLEAR]); 467 spin_lock(&drv->lock); 468 clear_bit(i, drv->tcs_in_use); 469 /* 470 * Disable interrupt for WAKE TCS to avoid being 471 * spammed with interrupts coming when the solver 472 * sends its wake votes. 473 */ 474 if (!drv->tcs[ACTIVE_TCS].num_tcs) 475 enable_tcs_irq(drv, i, false); 476 spin_unlock(&drv->lock); 477 wake_up(&drv->tcs_wait); 478 if (req) 479 rpmh_tx_done(req); 480 } 481 482 return IRQ_HANDLED; 483 } 484 485 /** 486 * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger. 487 * @drv: The controller. 488 * @tcs_id: The global ID of this TCS. 489 * @cmd_id: The index within the TCS to start writing. 490 * @msg: The message we want to send, which will contain several addr/data 491 * pairs to program (but few enough that they all fit in one TCS). 492 * 493 * This is used for all types of transfers (active, sleep, and wake). 494 */ 495 static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, 496 const struct tcs_request *msg) 497 { 498 u32 msgid; 499 u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE; 500 u32 cmd_enable = 0; 501 struct tcs_cmd *cmd; 502 int i, j; 503 504 /* Convert all commands to RR when the request has wait_for_compl set */ 505 cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0; 506 507 for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) { 508 cmd = &msg->cmds[i]; 509 cmd_enable |= BIT(j); 510 msgid = cmd_msgid; 511 /* 512 * Additionally, if the cmd->wait is set, make the command 513 * response reqd even if the overall request was fire-n-forget. 514 */ 515 msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0; 516 517 write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_MSGID], tcs_id, j, msgid); 518 write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], tcs_id, j, cmd->addr); 519 write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_DATA], tcs_id, j, cmd->data); 520 trace_rpmh_send_msg(drv, tcs_id, msg->state, j, msgid, cmd); 521 } 522 523 cmd_enable |= read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id); 524 write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, cmd_enable); 525 } 526 527 /** 528 * check_for_req_inflight() - Look to see if conflicting cmds are in flight. 529 * @drv: The controller. 530 * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers. 531 * @msg: The message we want to send, which will contain several addr/data 532 * pairs to program (but few enough that they all fit in one TCS). 533 * 534 * This will walk through the TCSes in the group and check if any of them 535 * appear to be sending to addresses referenced in the message. If it finds 536 * one it'll return -EBUSY. 537 * 538 * Only for use for active-only transfers. 539 * 540 * Must be called with the drv->lock held since that protects tcs_in_use. 541 * 542 * Return: 0 if nothing in flight or -EBUSY if we should try again later. 543 * The caller must re-enable interrupts between tries since that's 544 * the only way tcs_in_use will ever be updated and the only way 545 * RSC_DRV_CMD_ENABLE will ever be cleared. 546 */ 547 static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, 548 const struct tcs_request *msg) 549 { 550 unsigned long curr_enabled; 551 u32 addr; 552 int j, k; 553 int i = tcs->offset; 554 555 for_each_set_bit_from(i, drv->tcs_in_use, tcs->offset + tcs->num_tcs) { 556 curr_enabled = read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i); 557 558 for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) { 559 addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j); 560 for (k = 0; k < msg->num_cmds; k++) { 561 if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr)) 562 return -EBUSY; 563 } 564 } 565 } 566 567 return 0; 568 } 569 570 /** 571 * find_free_tcs() - Find free tcs in the given tcs_group; only for active. 572 * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if 573 * we borrowed it because there are zero active-only ones). 574 * 575 * Must be called with the drv->lock held since that protects tcs_in_use. 576 * 577 * Return: The first tcs that's free or -EBUSY if all in use. 578 */ 579 static int find_free_tcs(struct tcs_group *tcs) 580 { 581 const struct rsc_drv *drv = tcs->drv; 582 unsigned long i; 583 unsigned long max = tcs->offset + tcs->num_tcs; 584 585 i = find_next_zero_bit(drv->tcs_in_use, max, tcs->offset); 586 if (i >= max) 587 return -EBUSY; 588 589 return i; 590 } 591 592 /** 593 * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active. 594 * @drv: The controller. 595 * @tcs: The tcs_group used for ACTIVE_ONLY transfers. 596 * @msg: The data to be sent. 597 * 598 * Claims a tcs in the given tcs_group while making sure that no existing cmd 599 * is in flight that would conflict with the one in @msg. 600 * 601 * Context: Must be called with the drv->lock held since that protects 602 * tcs_in_use. 603 * 604 * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight 605 * or the tcs_group is full. 606 */ 607 static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs, 608 const struct tcs_request *msg) 609 { 610 int ret; 611 612 /* 613 * The h/w does not like if we send a request to the same address, 614 * when one is already in-flight or being processed. 615 */ 616 ret = check_for_req_inflight(drv, tcs, msg); 617 if (ret) 618 return ret; 619 620 return find_free_tcs(tcs); 621 } 622 623 /** 624 * rpmh_rsc_send_data() - Write / trigger active-only message. 625 * @drv: The controller. 626 * @msg: The data to be sent. 627 * 628 * NOTES: 629 * - This is only used for "ACTIVE_ONLY" since the limitations of this 630 * function don't make sense for sleep/wake cases. 631 * - To do the transfer, we will grab a whole TCS for ourselves--we don't 632 * try to share. If there are none available we'll wait indefinitely 633 * for a free one. 634 * - This function will not wait for the commands to be finished, only for 635 * data to be programmed into the RPMh. See rpmh_tx_done() which will 636 * be called when the transfer is fully complete. 637 * - This function must be called with interrupts enabled. If the hardware 638 * is busy doing someone else's transfer we need that transfer to fully 639 * finish so that we can have the hardware, and to fully finish it needs 640 * the interrupt handler to run. If the interrupts is set to run on the 641 * active CPU this can never happen if interrupts are disabled. 642 * 643 * Return: 0 on success, -EINVAL on error. 644 */ 645 int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) 646 { 647 struct tcs_group *tcs; 648 int tcs_id; 649 unsigned long flags; 650 651 tcs = get_tcs_for_msg(drv, msg); 652 if (IS_ERR(tcs)) 653 return PTR_ERR(tcs); 654 655 spin_lock_irqsave(&drv->lock, flags); 656 657 /* Wait forever for a free tcs. It better be there eventually! */ 658 wait_event_lock_irq(drv->tcs_wait, 659 (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0, 660 drv->lock); 661 662 tcs->req[tcs_id - tcs->offset] = msg; 663 set_bit(tcs_id, drv->tcs_in_use); 664 if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) { 665 /* 666 * Clear previously programmed WAKE commands in selected 667 * repurposed TCS to avoid triggering them. tcs->slots will be 668 * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate() 669 */ 670 write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0); 671 enable_tcs_irq(drv, tcs_id, true); 672 } 673 spin_unlock_irqrestore(&drv->lock, flags); 674 675 /* 676 * These two can be done after the lock is released because: 677 * - We marked "tcs_in_use" under lock. 678 * - Once "tcs_in_use" has been marked nobody else could be writing 679 * to these registers until the interrupt goes off. 680 * - The interrupt can't go off until we trigger w/ the last line 681 * of __tcs_set_trigger() below. 682 */ 683 __tcs_buffer_write(drv, tcs_id, 0, msg); 684 __tcs_set_trigger(drv, tcs_id, true); 685 686 return 0; 687 } 688 689 /** 690 * find_slots() - Find a place to write the given message. 691 * @tcs: The tcs group to search. 692 * @msg: The message we want to find room for. 693 * @tcs_id: If we return 0 from the function, we return the global ID of the 694 * TCS to write to here. 695 * @cmd_id: If we return 0 from the function, we return the index of 696 * the command array of the returned TCS where the client should 697 * start writing the message. 698 * 699 * Only for use on sleep/wake TCSes since those are the only ones we maintain 700 * tcs->slots for. 701 * 702 * Return: -ENOMEM if there was no room, else 0. 703 */ 704 static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg, 705 int *tcs_id, int *cmd_id) 706 { 707 int slot, offset; 708 int i = 0; 709 710 /* Do over, until we can fit the full payload in a single TCS */ 711 do { 712 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS, 713 i, msg->num_cmds, 0); 714 if (slot >= tcs->num_tcs * tcs->ncpt) 715 return -ENOMEM; 716 i += tcs->ncpt; 717 } while (slot + msg->num_cmds - 1 >= i); 718 719 bitmap_set(tcs->slots, slot, msg->num_cmds); 720 721 offset = slot / tcs->ncpt; 722 *tcs_id = offset + tcs->offset; 723 *cmd_id = slot % tcs->ncpt; 724 725 return 0; 726 } 727 728 /** 729 * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger. 730 * @drv: The controller. 731 * @msg: The data to be written to the controller. 732 * 733 * This should only be called for sleep/wake state, never active-only 734 * state. 735 * 736 * The caller must ensure that no other RPMH actions are happening and the 737 * controller is idle when this function is called since it runs lockless. 738 * 739 * Return: 0 if no error; else -error. 740 */ 741 int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg) 742 { 743 struct tcs_group *tcs; 744 int tcs_id = 0, cmd_id = 0; 745 int ret; 746 747 tcs = get_tcs_for_msg(drv, msg); 748 if (IS_ERR(tcs)) 749 return PTR_ERR(tcs); 750 751 /* find the TCS id and the command in the TCS to write to */ 752 ret = find_slots(tcs, msg, &tcs_id, &cmd_id); 753 if (!ret) 754 __tcs_buffer_write(drv, tcs_id, cmd_id, msg); 755 756 return ret; 757 } 758 759 /** 760 * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy. 761 * @drv: The controller 762 * 763 * Checks if any of the AMCs are busy in handling ACTIVE sets. 764 * This is called from the last cpu powering down before flushing 765 * SLEEP and WAKE sets. If AMCs are busy, controller can not enter 766 * power collapse, so deny from the last cpu's pm notification. 767 * 768 * Context: Must be called with the drv->lock held. 769 * 770 * Return: 771 * * False - AMCs are idle 772 * * True - AMCs are busy 773 */ 774 static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv) 775 { 776 unsigned long set; 777 const struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS]; 778 unsigned long max; 779 780 /* 781 * If we made an active request on a RSC that does not have a 782 * dedicated TCS for active state use, then re-purposed wake TCSes 783 * should be checked for not busy, because we used wake TCSes for 784 * active requests in this case. 785 */ 786 if (!tcs->num_tcs) 787 tcs = &drv->tcs[WAKE_TCS]; 788 789 max = tcs->offset + tcs->num_tcs; 790 set = find_next_bit(drv->tcs_in_use, max, tcs->offset); 791 792 return set < max; 793 } 794 795 /** 796 * rpmh_rsc_write_next_wakeup() - Write next wakeup in CONTROL_TCS. 797 * @drv: The controller 798 * 799 * Writes maximum wakeup cycles when called from suspend. 800 * Writes earliest hrtimer wakeup when called from idle. 801 */ 802 void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv) 803 { 804 ktime_t now, wakeup; 805 u64 wakeup_us, wakeup_cycles = ~0; 806 u32 lo, hi; 807 808 if (!drv->tcs[CONTROL_TCS].num_tcs || !drv->genpd_nb.notifier_call) 809 return; 810 811 /* Set highest time when system (timekeeping) is suspended */ 812 if (system_state == SYSTEM_SUSPEND) 813 goto exit; 814 815 /* Find the earliest hrtimer wakeup from online cpus */ 816 wakeup = dev_pm_genpd_get_next_hrtimer(drv->dev); 817 818 /* Find the relative wakeup in kernel time scale */ 819 now = ktime_get(); 820 wakeup = ktime_sub(wakeup, now); 821 wakeup_us = ktime_to_us(wakeup); 822 823 /* Convert the wakeup to arch timer scale */ 824 wakeup_cycles = USECS_TO_CYCLES(wakeup_us); 825 wakeup_cycles += arch_timer_read_counter(); 826 827 exit: 828 lo = wakeup_cycles & RSC_DRV_CTL_TCS_DATA_LO_MASK; 829 hi = wakeup_cycles >> RSC_DRV_CTL_TCS_DATA_SIZE; 830 hi &= RSC_DRV_CTL_TCS_DATA_HI_MASK; 831 hi |= RSC_DRV_CTL_TCS_DATA_HI_VALID; 832 833 writel_relaxed(lo, drv->base + RSC_DRV_CTL_TCS_DATA_LO); 834 writel_relaxed(hi, drv->base + RSC_DRV_CTL_TCS_DATA_HI); 835 } 836 837 /** 838 * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy. 839 * @nfb: Pointer to the notifier block in struct rsc_drv. 840 * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT. 841 * @v: Unused 842 * 843 * This function is given to cpu_pm_register_notifier so we can be informed 844 * about when CPUs go down. When all CPUs go down we know no more active 845 * transfers will be started so we write sleep/wake sets. This function gets 846 * called from cpuidle code paths and also at system suspend time. 847 * 848 * If its last CPU going down and AMCs are not busy then writes cached sleep 849 * and wake messages to TCSes. The firmware then takes care of triggering 850 * them when entering deepest low power modes. 851 * 852 * Return: See cpu_pm_register_notifier() 853 */ 854 static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb, 855 unsigned long action, void *v) 856 { 857 struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm); 858 int ret = NOTIFY_OK; 859 int cpus_in_pm; 860 861 switch (action) { 862 case CPU_PM_ENTER: 863 cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm); 864 /* 865 * NOTE: comments for num_online_cpus() point out that it's 866 * only a snapshot so we need to be careful. It should be OK 867 * for us to use, though. It's important for us not to miss 868 * if we're the last CPU going down so it would only be a 869 * problem if a CPU went offline right after we did the check 870 * AND that CPU was not idle AND that CPU was the last non-idle 871 * CPU. That can't happen. CPUs would have to come out of idle 872 * before the CPU could go offline. 873 */ 874 if (cpus_in_pm < num_online_cpus()) 875 return NOTIFY_OK; 876 break; 877 case CPU_PM_ENTER_FAILED: 878 case CPU_PM_EXIT: 879 atomic_dec(&drv->cpus_in_pm); 880 return NOTIFY_OK; 881 default: 882 return NOTIFY_DONE; 883 } 884 885 /* 886 * It's likely we're on the last CPU. Grab the drv->lock and write 887 * out the sleep/wake commands to RPMH hardware. Grabbing the lock 888 * means that if we race with another CPU coming up we are still 889 * guaranteed to be safe. If another CPU came up just after we checked 890 * and has grabbed the lock or started an active transfer then we'll 891 * notice we're busy and abort. If another CPU comes up after we start 892 * flushing it will be blocked from starting an active transfer until 893 * we're done flushing. If another CPU starts an active transfer after 894 * we release the lock we're still OK because we're no longer the last 895 * CPU. 896 */ 897 if (spin_trylock(&drv->lock)) { 898 if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client)) 899 ret = NOTIFY_BAD; 900 spin_unlock(&drv->lock); 901 } else { 902 /* Another CPU must be up */ 903 return NOTIFY_OK; 904 } 905 906 if (ret == NOTIFY_BAD) { 907 /* Double-check if we're here because someone else is up */ 908 if (cpus_in_pm < num_online_cpus()) 909 ret = NOTIFY_OK; 910 else 911 /* We won't be called w/ CPU_PM_ENTER_FAILED */ 912 atomic_dec(&drv->cpus_in_pm); 913 } 914 915 return ret; 916 } 917 918 /** 919 * rpmh_rsc_pd_callback() - Check if any of the AMCs are busy. 920 * @nfb: Pointer to the genpd notifier block in struct rsc_drv. 921 * @action: GENPD_NOTIFY_PRE_OFF, GENPD_NOTIFY_OFF, GENPD_NOTIFY_PRE_ON or GENPD_NOTIFY_ON. 922 * @v: Unused 923 * 924 * This function is given to dev_pm_genpd_add_notifier() so we can be informed 925 * about when cluster-pd is going down. When cluster go down we know no more active 926 * transfers will be started so we write sleep/wake sets. This function gets 927 * called from cpuidle code paths and also at system suspend time. 928 * 929 * If AMCs are not busy then writes cached sleep and wake messages to TCSes. 930 * The firmware then takes care of triggering them when entering deepest low power modes. 931 * 932 * Return: 933 * * NOTIFY_OK - success 934 * * NOTIFY_BAD - failure 935 */ 936 static int rpmh_rsc_pd_callback(struct notifier_block *nfb, 937 unsigned long action, void *v) 938 { 939 struct rsc_drv *drv = container_of(nfb, struct rsc_drv, genpd_nb); 940 941 /* We don't need to lock as genpd on/off are serialized */ 942 if ((action == GENPD_NOTIFY_PRE_OFF) && 943 (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))) 944 return NOTIFY_BAD; 945 946 return NOTIFY_OK; 947 } 948 949 static int rpmh_rsc_pd_attach(struct rsc_drv *drv, struct device *dev) 950 { 951 int ret; 952 953 pm_runtime_enable(dev); 954 drv->genpd_nb.notifier_call = rpmh_rsc_pd_callback; 955 ret = dev_pm_genpd_add_notifier(dev, &drv->genpd_nb); 956 if (ret) 957 pm_runtime_disable(dev); 958 959 return ret; 960 } 961 962 static int rpmh_probe_tcs_config(struct platform_device *pdev, struct rsc_drv *drv) 963 { 964 struct tcs_type_config { 965 u32 type; 966 u32 n; 967 } tcs_cfg[TCS_TYPE_NR] = { { 0 } }; 968 struct device_node *dn = pdev->dev.of_node; 969 u32 config, max_tcs, ncpt, offset; 970 int i, ret, n, st = 0; 971 struct tcs_group *tcs; 972 973 ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset); 974 if (ret) 975 return ret; 976 drv->tcs_base = drv->base + offset; 977 978 config = readl_relaxed(drv->base + drv->regs[DRV_PRNT_CHLD_CONFIG]); 979 980 max_tcs = config; 981 max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id); 982 max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id); 983 984 ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT); 985 ncpt = ncpt >> DRV_NCPT_SHIFT; 986 987 n = of_property_count_u32_elems(dn, "qcom,tcs-config"); 988 if (n != 2 * TCS_TYPE_NR) 989 return -EINVAL; 990 991 for (i = 0; i < TCS_TYPE_NR; i++) { 992 ret = of_property_read_u32_index(dn, "qcom,tcs-config", 993 i * 2, &tcs_cfg[i].type); 994 if (ret) 995 return ret; 996 if (tcs_cfg[i].type >= TCS_TYPE_NR) 997 return -EINVAL; 998 999 ret = of_property_read_u32_index(dn, "qcom,tcs-config", 1000 i * 2 + 1, &tcs_cfg[i].n); 1001 if (ret) 1002 return ret; 1003 if (tcs_cfg[i].n > MAX_TCS_PER_TYPE) 1004 return -EINVAL; 1005 } 1006 1007 for (i = 0; i < TCS_TYPE_NR; i++) { 1008 tcs = &drv->tcs[tcs_cfg[i].type]; 1009 if (tcs->drv) 1010 return -EINVAL; 1011 tcs->drv = drv; 1012 tcs->type = tcs_cfg[i].type; 1013 tcs->num_tcs = tcs_cfg[i].n; 1014 tcs->ncpt = ncpt; 1015 1016 if (!tcs->num_tcs || tcs->type == CONTROL_TCS) 1017 continue; 1018 1019 if (st + tcs->num_tcs > max_tcs || 1020 st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask)) 1021 return -EINVAL; 1022 1023 tcs->mask = ((1 << tcs->num_tcs) - 1) << st; 1024 tcs->offset = st; 1025 st += tcs->num_tcs; 1026 } 1027 1028 drv->num_tcs = st; 1029 1030 return 0; 1031 } 1032 1033 static int rpmh_rsc_probe(struct platform_device *pdev) 1034 { 1035 struct device_node *dn = pdev->dev.of_node; 1036 struct rsc_drv *drv; 1037 char drv_id[10] = {0}; 1038 int ret, irq; 1039 u32 solver_config; 1040 u32 rsc_id; 1041 1042 /* 1043 * Even though RPMh doesn't directly use cmd-db, all of its children 1044 * do. To avoid adding this check to our children we'll do it now. 1045 */ 1046 ret = cmd_db_ready(); 1047 if (ret) { 1048 if (ret != -EPROBE_DEFER) 1049 dev_err(&pdev->dev, "Command DB not available (%d)\n", 1050 ret); 1051 return ret; 1052 } 1053 1054 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); 1055 if (!drv) 1056 return -ENOMEM; 1057 1058 ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id); 1059 if (ret) 1060 return ret; 1061 1062 drv->name = of_get_property(dn, "label", NULL); 1063 if (!drv->name) 1064 drv->name = dev_name(&pdev->dev); 1065 1066 snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id); 1067 drv->base = devm_platform_ioremap_resource_byname(pdev, drv_id); 1068 if (IS_ERR(drv->base)) 1069 return PTR_ERR(drv->base); 1070 1071 rsc_id = readl_relaxed(drv->base + RSC_DRV_ID); 1072 drv->ver.major = rsc_id & (MAJOR_VER_MASK << MAJOR_VER_SHIFT); 1073 drv->ver.major >>= MAJOR_VER_SHIFT; 1074 drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT); 1075 drv->ver.minor >>= MINOR_VER_SHIFT; 1076 1077 if (drv->ver.major == 3) 1078 drv->regs = rpmh_rsc_reg_offset_ver_3_0; 1079 else 1080 drv->regs = rpmh_rsc_reg_offset_ver_2_7; 1081 1082 ret = rpmh_probe_tcs_config(pdev, drv); 1083 if (ret) 1084 return ret; 1085 1086 spin_lock_init(&drv->lock); 1087 init_waitqueue_head(&drv->tcs_wait); 1088 bitmap_zero(drv->tcs_in_use, MAX_TCS_NR); 1089 1090 irq = platform_get_irq(pdev, drv->id); 1091 if (irq < 0) 1092 return irq; 1093 1094 ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done, 1095 IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, 1096 drv->name, drv); 1097 if (ret) 1098 return ret; 1099 1100 /* 1101 * CPU PM/genpd notification are not required for controllers that support 1102 * 'HW solver' mode where they can be in autonomous mode executing low 1103 * power mode to power down. 1104 */ 1105 solver_config = readl_relaxed(drv->base + drv->regs[DRV_SOLVER_CONFIG]); 1106 solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT; 1107 solver_config = solver_config >> DRV_HW_SOLVER_SHIFT; 1108 if (!solver_config) { 1109 if (pdev->dev.pm_domain) { 1110 ret = rpmh_rsc_pd_attach(drv, &pdev->dev); 1111 if (ret) 1112 return ret; 1113 } else { 1114 drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback; 1115 cpu_pm_register_notifier(&drv->rsc_pm); 1116 } 1117 } 1118 1119 /* Enable the active TCS to send requests immediately */ 1120 writel_relaxed(drv->tcs[ACTIVE_TCS].mask, 1121 drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]); 1122 1123 spin_lock_init(&drv->client.cache_lock); 1124 INIT_LIST_HEAD(&drv->client.cache); 1125 INIT_LIST_HEAD(&drv->client.batch_cache); 1126 1127 dev_set_drvdata(&pdev->dev, drv); 1128 drv->dev = &pdev->dev; 1129 1130 ret = devm_of_platform_populate(&pdev->dev); 1131 if (ret && pdev->dev.pm_domain) { 1132 dev_pm_genpd_remove_notifier(&pdev->dev); 1133 pm_runtime_disable(&pdev->dev); 1134 } 1135 1136 return ret; 1137 } 1138 1139 static const struct of_device_id rpmh_drv_match[] = { 1140 { .compatible = "qcom,rpmh-rsc", }, 1141 { } 1142 }; 1143 MODULE_DEVICE_TABLE(of, rpmh_drv_match); 1144 1145 static struct platform_driver rpmh_driver = { 1146 .probe = rpmh_rsc_probe, 1147 .driver = { 1148 .name = "rpmh", 1149 .of_match_table = rpmh_drv_match, 1150 .suppress_bind_attrs = true, 1151 }, 1152 }; 1153 1154 static int __init rpmh_driver_init(void) 1155 { 1156 return platform_driver_register(&rpmh_driver); 1157 } 1158 core_initcall(rpmh_driver_init); 1159 1160 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver"); 1161 MODULE_LICENSE("GPL v2"); 1162