1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME 8 9 #include <linux/atomic.h> 10 #include <linux/cpu_pm.h> 11 #include <linux/delay.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/iopoll.h> 15 #include <linux/kernel.h> 16 #include <linux/ktime.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/notifier.h> 20 #include <linux/of.h> 21 #include <linux/of_irq.h> 22 #include <linux/of_platform.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_domain.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/wait.h> 29 30 #include <clocksource/arm_arch_timer.h> 31 #include <soc/qcom/cmd-db.h> 32 #include <soc/qcom/tcs.h> 33 #include <dt-bindings/soc/qcom,rpmh-rsc.h> 34 35 #include "rpmh-internal.h" 36 37 #define CREATE_TRACE_POINTS 38 #include "trace-rpmh.h" 39 40 41 #define RSC_DRV_ID 0 42 43 #define MAJOR_VER_MASK 0xFF 44 #define MAJOR_VER_SHIFT 16 45 #define MINOR_VER_MASK 0xFF 46 #define MINOR_VER_SHIFT 8 47 48 enum { 49 RSC_DRV_TCS_OFFSET, 50 RSC_DRV_CMD_OFFSET, 51 DRV_SOLVER_CONFIG, 52 DRV_PRNT_CHLD_CONFIG, 53 RSC_DRV_IRQ_ENABLE, 54 RSC_DRV_IRQ_STATUS, 55 RSC_DRV_IRQ_CLEAR, 56 RSC_DRV_CMD_WAIT_FOR_CMPL, 57 RSC_DRV_CONTROL, 58 RSC_DRV_STATUS, 59 RSC_DRV_CMD_ENABLE, 60 RSC_DRV_CMD_MSGID, 61 RSC_DRV_CMD_ADDR, 62 RSC_DRV_CMD_DATA, 63 RSC_DRV_CMD_STATUS, 64 RSC_DRV_CMD_RESP_DATA, 65 }; 66 67 /* DRV HW Solver Configuration Information Register */ 68 #define DRV_HW_SOLVER_MASK 1 69 #define DRV_HW_SOLVER_SHIFT 24 70 71 /* DRV TCS Configuration Information Register */ 72 #define DRV_NUM_TCS_MASK 0x3F 73 #define DRV_NUM_TCS_SHIFT 6 74 #define DRV_NCPT_MASK 0x1F 75 #define DRV_NCPT_SHIFT 27 76 77 /* Offsets for CONTROL TCS Registers */ 78 #define RSC_DRV_CTL_TCS_DATA_HI 0x38 79 #define RSC_DRV_CTL_TCS_DATA_HI_MASK 0xFFFFFF 80 #define RSC_DRV_CTL_TCS_DATA_HI_VALID BIT(31) 81 #define RSC_DRV_CTL_TCS_DATA_LO 0x40 82 #define RSC_DRV_CTL_TCS_DATA_LO_MASK 0xFFFFFFFF 83 #define RSC_DRV_CTL_TCS_DATA_SIZE 32 84 85 #define TCS_AMC_MODE_ENABLE BIT(16) 86 #define TCS_AMC_MODE_TRIGGER BIT(24) 87 88 /* TCS CMD register bit mask */ 89 #define CMD_MSGID_LEN 8 90 #define CMD_MSGID_RESP_REQ BIT(8) 91 #define CMD_MSGID_WRITE BIT(16) 92 #define CMD_STATUS_ISSUED BIT(8) 93 #define CMD_STATUS_COMPL BIT(16) 94 95 /* 96 * Here's a high level overview of how all the registers in RPMH work 97 * together: 98 * 99 * - The main rpmh-rsc address is the base of a register space that can 100 * be used to find overall configuration of the hardware 101 * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register 102 * space are all the TCS blocks. The offset of the TCS blocks is 103 * specified in the device tree by "qcom,tcs-offset" and used to 104 * compute tcs_base. 105 * - TCS blocks come one after another. Type, count, and order are 106 * specified by the device tree as "qcom,tcs-config". 107 * - Each TCS block has some registers, then space for up to 16 commands. 108 * Note that though address space is reserved for 16 commands, fewer 109 * might be present. See ncpt (num cmds per TCS). 110 * 111 * Here's a picture: 112 * 113 * +---------------------------------------------------+ 114 * |RSC | 115 * | ctrl | 116 * | | 117 * | Drvs: | 118 * | +-----------------------------------------------+ | 119 * | |DRV0 | | 120 * | | ctrl/config | | 121 * | | IRQ | | 122 * | | | | 123 * | | TCSes: | | 124 * | | +------------------------------------------+ | | 125 * | | |TCS0 | | | | | | | | | | | | | | | 126 * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 127 * | | | | | | | | | | | | | | | | | | 128 * | | +------------------------------------------+ | | 129 * | | +------------------------------------------+ | | 130 * | | |TCS1 | | | | | | | | | | | | | | | 131 * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 132 * | | | | | | | | | | | | | | | | | | 133 * | | +------------------------------------------+ | | 134 * | | +------------------------------------------+ | | 135 * | | |TCS2 | | | | | | | | | | | | | | | 136 * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 137 * | | | | | | | | | | | | | | | | | | 138 * | | +------------------------------------------+ | | 139 * | | ...... | | 140 * | +-----------------------------------------------+ | 141 * | +-----------------------------------------------+ | 142 * | |DRV1 | | 143 * | | (same as DRV0) | | 144 * | +-----------------------------------------------+ | 145 * | ...... | 146 * +---------------------------------------------------+ 147 */ 148 149 #define USECS_TO_CYCLES(time_usecs) \ 150 xloops_to_cycles((time_usecs) * 0x10C7UL) 151 152 static inline unsigned long xloops_to_cycles(u64 xloops) 153 { 154 return (xloops * loops_per_jiffy * HZ) >> 32; 155 } 156 157 static u32 rpmh_rsc_reg_offset_ver_2_7[] = { 158 [RSC_DRV_TCS_OFFSET] = 672, 159 [RSC_DRV_CMD_OFFSET] = 20, 160 [DRV_SOLVER_CONFIG] = 0x04, 161 [DRV_PRNT_CHLD_CONFIG] = 0x0C, 162 [RSC_DRV_IRQ_ENABLE] = 0x00, 163 [RSC_DRV_IRQ_STATUS] = 0x04, 164 [RSC_DRV_IRQ_CLEAR] = 0x08, 165 [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x10, 166 [RSC_DRV_CONTROL] = 0x14, 167 [RSC_DRV_STATUS] = 0x18, 168 [RSC_DRV_CMD_ENABLE] = 0x1C, 169 [RSC_DRV_CMD_MSGID] = 0x30, 170 [RSC_DRV_CMD_ADDR] = 0x34, 171 [RSC_DRV_CMD_DATA] = 0x38, 172 [RSC_DRV_CMD_STATUS] = 0x3C, 173 [RSC_DRV_CMD_RESP_DATA] = 0x40, 174 }; 175 176 static u32 rpmh_rsc_reg_offset_ver_3_0[] = { 177 [RSC_DRV_TCS_OFFSET] = 672, 178 [RSC_DRV_CMD_OFFSET] = 24, 179 [DRV_SOLVER_CONFIG] = 0x04, 180 [DRV_PRNT_CHLD_CONFIG] = 0x0C, 181 [RSC_DRV_IRQ_ENABLE] = 0x00, 182 [RSC_DRV_IRQ_STATUS] = 0x04, 183 [RSC_DRV_IRQ_CLEAR] = 0x08, 184 [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x20, 185 [RSC_DRV_CONTROL] = 0x24, 186 [RSC_DRV_STATUS] = 0x28, 187 [RSC_DRV_CMD_ENABLE] = 0x2C, 188 [RSC_DRV_CMD_MSGID] = 0x34, 189 [RSC_DRV_CMD_ADDR] = 0x38, 190 [RSC_DRV_CMD_DATA] = 0x3C, 191 [RSC_DRV_CMD_STATUS] = 0x40, 192 [RSC_DRV_CMD_RESP_DATA] = 0x44, 193 }; 194 195 static inline void __iomem * 196 tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id) 197 { 198 return drv->tcs_base + drv->regs[RSC_DRV_TCS_OFFSET] * tcs_id + reg; 199 } 200 201 static inline void __iomem * 202 tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id) 203 { 204 return tcs_reg_addr(drv, reg, tcs_id) + drv->regs[RSC_DRV_CMD_OFFSET] * cmd_id; 205 } 206 207 static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, 208 int cmd_id) 209 { 210 return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); 211 } 212 213 static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id) 214 { 215 return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id)); 216 } 217 218 static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, 219 int cmd_id, u32 data) 220 { 221 writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); 222 } 223 224 static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id, 225 u32 data) 226 { 227 writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id)); 228 } 229 230 static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id, 231 u32 data) 232 { 233 int i; 234 235 writel(data, tcs_reg_addr(drv, reg, tcs_id)); 236 237 /* 238 * Wait until we read back the same value. Use a counter rather than 239 * ktime for timeout since this may be called after timekeeping stops. 240 */ 241 for (i = 0; i < USEC_PER_SEC; i++) { 242 if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data) 243 return; 244 udelay(1); 245 } 246 pr_err("%s: error writing %#x to %d:%#x\n", drv->name, 247 data, tcs_id, reg); 248 } 249 250 /** 251 * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake). 252 * @drv: The RSC controller. 253 * @type: SLEEP_TCS or WAKE_TCS 254 * 255 * This will clear the "slots" variable of the given tcs_group and also 256 * tell the hardware to forget about all entries. 257 * 258 * The caller must ensure that no other RPMH actions are happening when this 259 * function is called, since otherwise the device may immediately become 260 * used again even before this function exits. 261 */ 262 static void tcs_invalidate(struct rsc_drv *drv, int type) 263 { 264 int m; 265 struct tcs_group *tcs = &drv->tcs[type]; 266 267 /* Caller ensures nobody else is running so no lock */ 268 if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) 269 return; 270 271 for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) 272 write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], m, 0); 273 274 bitmap_zero(tcs->slots, MAX_TCS_SLOTS); 275 } 276 277 /** 278 * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes. 279 * @drv: The RSC controller. 280 * 281 * The caller must ensure that no other RPMH actions are happening when this 282 * function is called, since otherwise the device may immediately become 283 * used again even before this function exits. 284 */ 285 void rpmh_rsc_invalidate(struct rsc_drv *drv) 286 { 287 tcs_invalidate(drv, SLEEP_TCS); 288 tcs_invalidate(drv, WAKE_TCS); 289 } 290 291 /** 292 * get_tcs_for_msg() - Get the tcs_group used to send the given message. 293 * @drv: The RSC controller. 294 * @msg: The message we want to send. 295 * 296 * This is normally pretty straightforward except if we are trying to send 297 * an ACTIVE_ONLY message but don't have any active_only TCSes. 298 * 299 * Return: A pointer to a tcs_group or an ERR_PTR. 300 */ 301 static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, 302 const struct tcs_request *msg) 303 { 304 int type; 305 struct tcs_group *tcs; 306 307 switch (msg->state) { 308 case RPMH_ACTIVE_ONLY_STATE: 309 type = ACTIVE_TCS; 310 break; 311 case RPMH_WAKE_ONLY_STATE: 312 type = WAKE_TCS; 313 break; 314 case RPMH_SLEEP_STATE: 315 type = SLEEP_TCS; 316 break; 317 default: 318 return ERR_PTR(-EINVAL); 319 } 320 321 /* 322 * If we are making an active request on a RSC that does not have a 323 * dedicated TCS for active state use, then re-purpose a wake TCS to 324 * send active votes. This is safe because we ensure any active-only 325 * transfers have finished before we use it (maybe by running from 326 * the last CPU in PM code). 327 */ 328 tcs = &drv->tcs[type]; 329 if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) 330 tcs = &drv->tcs[WAKE_TCS]; 331 332 return tcs; 333 } 334 335 /** 336 * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS. 337 * @drv: The RSC controller. 338 * @tcs_id: The global ID of this TCS. 339 * 340 * For ACTIVE_ONLY transfers we want to call back into the client when the 341 * transfer finishes. To do this we need the "request" that the client 342 * originally provided us. This function grabs the request that we stashed 343 * when we started the transfer. 344 * 345 * This only makes sense for ACTIVE_ONLY transfers since those are the only 346 * ones we track sending (the only ones we enable interrupts for and the only 347 * ones we call back to the client for). 348 * 349 * Return: The stashed request. 350 */ 351 static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv, 352 int tcs_id) 353 { 354 struct tcs_group *tcs; 355 int i; 356 357 for (i = 0; i < TCS_TYPE_NR; i++) { 358 tcs = &drv->tcs[i]; 359 if (tcs->mask & BIT(tcs_id)) 360 return tcs->req[tcs_id - tcs->offset]; 361 } 362 363 return NULL; 364 } 365 366 /** 367 * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS 368 * @drv: The controller. 369 * @tcs_id: The global ID of this TCS. 370 * @trigger: If true then untrigger/retrigger. If false then just untrigger. 371 * 372 * In the normal case we only ever call with "trigger=true" to start a 373 * transfer. That will un-trigger/disable the TCS from the last transfer 374 * then trigger/enable for this transfer. 375 * 376 * If we borrowed a wake TCS for an active-only transfer we'll also call 377 * this function with "trigger=false" to just do the un-trigger/disable 378 * before using the TCS for wake purposes again. 379 * 380 * Note that the AP is only in charge of triggering active-only transfers. 381 * The AP never triggers sleep/wake values using this function. 382 */ 383 static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger) 384 { 385 u32 enable; 386 u32 reg = drv->regs[RSC_DRV_CONTROL]; 387 388 /* 389 * HW req: Clear the DRV_CONTROL and enable TCS again 390 * While clearing ensure that the AMC mode trigger is cleared 391 * and then the mode enable is cleared. 392 */ 393 enable = read_tcs_reg(drv, reg, tcs_id); 394 enable &= ~TCS_AMC_MODE_TRIGGER; 395 write_tcs_reg_sync(drv, reg, tcs_id, enable); 396 enable &= ~TCS_AMC_MODE_ENABLE; 397 write_tcs_reg_sync(drv, reg, tcs_id, enable); 398 399 if (trigger) { 400 /* Enable the AMC mode on the TCS and then trigger the TCS */ 401 enable = TCS_AMC_MODE_ENABLE; 402 write_tcs_reg_sync(drv, reg, tcs_id, enable); 403 enable |= TCS_AMC_MODE_TRIGGER; 404 write_tcs_reg(drv, reg, tcs_id, enable); 405 } 406 } 407 408 /** 409 * enable_tcs_irq() - Enable or disable interrupts on the given TCS. 410 * @drv: The controller. 411 * @tcs_id: The global ID of this TCS. 412 * @enable: If true then enable; if false then disable 413 * 414 * We only ever call this when we borrow a wake TCS for an active-only 415 * transfer. For active-only TCSes interrupts are always left enabled. 416 */ 417 static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable) 418 { 419 u32 data; 420 u32 reg = drv->regs[RSC_DRV_IRQ_ENABLE]; 421 422 data = readl_relaxed(drv->tcs_base + reg); 423 if (enable) 424 data |= BIT(tcs_id); 425 else 426 data &= ~BIT(tcs_id); 427 writel_relaxed(data, drv->tcs_base + reg); 428 } 429 430 /** 431 * tcs_tx_done() - TX Done interrupt handler. 432 * @irq: The IRQ number (ignored). 433 * @p: Pointer to "struct rsc_drv". 434 * 435 * Called for ACTIVE_ONLY transfers (those are the only ones we enable the 436 * IRQ for) when a transfer is done. 437 * 438 * Return: IRQ_HANDLED 439 */ 440 static irqreturn_t tcs_tx_done(int irq, void *p) 441 { 442 struct rsc_drv *drv = p; 443 int i; 444 unsigned long irq_status; 445 const struct tcs_request *req; 446 447 irq_status = readl_relaxed(drv->tcs_base + drv->regs[RSC_DRV_IRQ_STATUS]); 448 449 for_each_set_bit(i, &irq_status, BITS_PER_TYPE(u32)) { 450 req = get_req_from_tcs(drv, i); 451 if (WARN_ON(!req)) 452 goto skip; 453 454 trace_rpmh_tx_done(drv, i, req); 455 456 /* 457 * If wake tcs was re-purposed for sending active 458 * votes, clear AMC trigger & enable modes and 459 * disable interrupt for this TCS 460 */ 461 if (!drv->tcs[ACTIVE_TCS].num_tcs) 462 __tcs_set_trigger(drv, i, false); 463 skip: 464 /* Reclaim the TCS */ 465 write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0); 466 writel_relaxed(BIT(i), drv->tcs_base + drv->regs[RSC_DRV_IRQ_CLEAR]); 467 spin_lock(&drv->lock); 468 clear_bit(i, drv->tcs_in_use); 469 /* 470 * Disable interrupt for WAKE TCS to avoid being 471 * spammed with interrupts coming when the solver 472 * sends its wake votes. 473 */ 474 if (!drv->tcs[ACTIVE_TCS].num_tcs) 475 enable_tcs_irq(drv, i, false); 476 spin_unlock(&drv->lock); 477 wake_up(&drv->tcs_wait); 478 if (req) 479 rpmh_tx_done(req); 480 } 481 482 return IRQ_HANDLED; 483 } 484 485 /** 486 * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger. 487 * @drv: The controller. 488 * @tcs_id: The global ID of this TCS. 489 * @cmd_id: The index within the TCS to start writing. 490 * @msg: The message we want to send, which will contain several addr/data 491 * pairs to program (but few enough that they all fit in one TCS). 492 * 493 * This is used for all types of transfers (active, sleep, and wake). 494 */ 495 static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, 496 const struct tcs_request *msg) 497 { 498 u32 msgid; 499 u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE; 500 u32 cmd_enable = 0; 501 struct tcs_cmd *cmd; 502 int i, j; 503 504 /* Convert all commands to RR when the request has wait_for_compl set */ 505 cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0; 506 507 for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) { 508 cmd = &msg->cmds[i]; 509 cmd_enable |= BIT(j); 510 msgid = cmd_msgid; 511 /* 512 * Additionally, if the cmd->wait is set, make the command 513 * response reqd even if the overall request was fire-n-forget. 514 */ 515 msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0; 516 517 write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_MSGID], tcs_id, j, msgid); 518 write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], tcs_id, j, cmd->addr); 519 write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_DATA], tcs_id, j, cmd->data); 520 trace_rpmh_send_msg(drv, tcs_id, msg->state, j, msgid, cmd); 521 } 522 523 cmd_enable |= read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id); 524 write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, cmd_enable); 525 } 526 527 /** 528 * check_for_req_inflight() - Look to see if conflicting cmds are in flight. 529 * @drv: The controller. 530 * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers. 531 * @msg: The message we want to send, which will contain several addr/data 532 * pairs to program (but few enough that they all fit in one TCS). 533 * 534 * This will walk through the TCSes in the group and check if any of them 535 * appear to be sending to addresses referenced in the message. If it finds 536 * one it'll return -EBUSY. 537 * 538 * Only for use for active-only transfers. 539 * 540 * Must be called with the drv->lock held since that protects tcs_in_use. 541 * 542 * Return: 0 if nothing in flight or -EBUSY if we should try again later. 543 * The caller must re-enable interrupts between tries since that's 544 * the only way tcs_in_use will ever be updated and the only way 545 * RSC_DRV_CMD_ENABLE will ever be cleared. 546 */ 547 static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, 548 const struct tcs_request *msg) 549 { 550 unsigned long curr_enabled; 551 u32 addr; 552 int j, k; 553 int i = tcs->offset; 554 555 for_each_set_bit_from(i, drv->tcs_in_use, tcs->offset + tcs->num_tcs) { 556 curr_enabled = read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i); 557 558 for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) { 559 addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j); 560 for (k = 0; k < msg->num_cmds; k++) { 561 if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr)) 562 return -EBUSY; 563 } 564 } 565 } 566 567 return 0; 568 } 569 570 /** 571 * find_free_tcs() - Find free tcs in the given tcs_group; only for active. 572 * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if 573 * we borrowed it because there are zero active-only ones). 574 * 575 * Must be called with the drv->lock held since that protects tcs_in_use. 576 * 577 * Return: The first tcs that's free or -EBUSY if all in use. 578 */ 579 static int find_free_tcs(struct tcs_group *tcs) 580 { 581 const struct rsc_drv *drv = tcs->drv; 582 unsigned long i; 583 unsigned long max = tcs->offset + tcs->num_tcs; 584 585 i = find_next_zero_bit(drv->tcs_in_use, max, tcs->offset); 586 if (i >= max) 587 return -EBUSY; 588 589 return i; 590 } 591 592 /** 593 * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active. 594 * @drv: The controller. 595 * @tcs: The tcs_group used for ACTIVE_ONLY transfers. 596 * @msg: The data to be sent. 597 * 598 * Claims a tcs in the given tcs_group while making sure that no existing cmd 599 * is in flight that would conflict with the one in @msg. 600 * 601 * Context: Must be called with the drv->lock held since that protects 602 * tcs_in_use. 603 * 604 * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight 605 * or the tcs_group is full. 606 */ 607 static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs, 608 const struct tcs_request *msg) 609 { 610 int ret; 611 612 /* 613 * The h/w does not like if we send a request to the same address, 614 * when one is already in-flight or being processed. 615 */ 616 ret = check_for_req_inflight(drv, tcs, msg); 617 if (ret) 618 return ret; 619 620 return find_free_tcs(tcs); 621 } 622 623 /** 624 * rpmh_rsc_send_data() - Write / trigger active-only message. 625 * @drv: The controller. 626 * @msg: The data to be sent. 627 * 628 * NOTES: 629 * - This is only used for "ACTIVE_ONLY" since the limitations of this 630 * function don't make sense for sleep/wake cases. 631 * - To do the transfer, we will grab a whole TCS for ourselves--we don't 632 * try to share. If there are none available we'll wait indefinitely 633 * for a free one. 634 * - This function will not wait for the commands to be finished, only for 635 * data to be programmed into the RPMh. See rpmh_tx_done() which will 636 * be called when the transfer is fully complete. 637 * - This function must be called with interrupts enabled. If the hardware 638 * is busy doing someone else's transfer we need that transfer to fully 639 * finish so that we can have the hardware, and to fully finish it needs 640 * the interrupt handler to run. If the interrupts is set to run on the 641 * active CPU this can never happen if interrupts are disabled. 642 * 643 * Return: 0 on success, -EINVAL on error. 644 */ 645 int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) 646 { 647 struct tcs_group *tcs; 648 int tcs_id; 649 650 might_sleep(); 651 652 tcs = get_tcs_for_msg(drv, msg); 653 if (IS_ERR(tcs)) 654 return PTR_ERR(tcs); 655 656 spin_lock_irq(&drv->lock); 657 658 /* Wait forever for a free tcs. It better be there eventually! */ 659 wait_event_lock_irq(drv->tcs_wait, 660 (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0, 661 drv->lock); 662 663 tcs->req[tcs_id - tcs->offset] = msg; 664 set_bit(tcs_id, drv->tcs_in_use); 665 if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) { 666 /* 667 * Clear previously programmed WAKE commands in selected 668 * repurposed TCS to avoid triggering them. tcs->slots will be 669 * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate() 670 */ 671 write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0); 672 enable_tcs_irq(drv, tcs_id, true); 673 } 674 spin_unlock_irq(&drv->lock); 675 676 /* 677 * These two can be done after the lock is released because: 678 * - We marked "tcs_in_use" under lock. 679 * - Once "tcs_in_use" has been marked nobody else could be writing 680 * to these registers until the interrupt goes off. 681 * - The interrupt can't go off until we trigger w/ the last line 682 * of __tcs_set_trigger() below. 683 */ 684 __tcs_buffer_write(drv, tcs_id, 0, msg); 685 __tcs_set_trigger(drv, tcs_id, true); 686 687 return 0; 688 } 689 690 /** 691 * find_slots() - Find a place to write the given message. 692 * @tcs: The tcs group to search. 693 * @msg: The message we want to find room for. 694 * @tcs_id: If we return 0 from the function, we return the global ID of the 695 * TCS to write to here. 696 * @cmd_id: If we return 0 from the function, we return the index of 697 * the command array of the returned TCS where the client should 698 * start writing the message. 699 * 700 * Only for use on sleep/wake TCSes since those are the only ones we maintain 701 * tcs->slots for. 702 * 703 * Return: -ENOMEM if there was no room, else 0. 704 */ 705 static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg, 706 int *tcs_id, int *cmd_id) 707 { 708 int slot, offset; 709 int i = 0; 710 711 /* Do over, until we can fit the full payload in a single TCS */ 712 do { 713 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS, 714 i, msg->num_cmds, 0); 715 if (slot >= tcs->num_tcs * tcs->ncpt) 716 return -ENOMEM; 717 i += tcs->ncpt; 718 } while (slot + msg->num_cmds - 1 >= i); 719 720 bitmap_set(tcs->slots, slot, msg->num_cmds); 721 722 offset = slot / tcs->ncpt; 723 *tcs_id = offset + tcs->offset; 724 *cmd_id = slot % tcs->ncpt; 725 726 return 0; 727 } 728 729 /** 730 * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger. 731 * @drv: The controller. 732 * @msg: The data to be written to the controller. 733 * 734 * This should only be called for sleep/wake state, never active-only 735 * state. 736 * 737 * The caller must ensure that no other RPMH actions are happening and the 738 * controller is idle when this function is called since it runs lockless. 739 * 740 * Return: 0 if no error; else -error. 741 */ 742 int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg) 743 { 744 struct tcs_group *tcs; 745 int tcs_id = 0, cmd_id = 0; 746 int ret; 747 748 tcs = get_tcs_for_msg(drv, msg); 749 if (IS_ERR(tcs)) 750 return PTR_ERR(tcs); 751 752 /* find the TCS id and the command in the TCS to write to */ 753 ret = find_slots(tcs, msg, &tcs_id, &cmd_id); 754 if (!ret) 755 __tcs_buffer_write(drv, tcs_id, cmd_id, msg); 756 757 return ret; 758 } 759 760 /** 761 * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy. 762 * @drv: The controller 763 * 764 * Checks if any of the AMCs are busy in handling ACTIVE sets. 765 * This is called from the last cpu powering down before flushing 766 * SLEEP and WAKE sets. If AMCs are busy, controller can not enter 767 * power collapse, so deny from the last cpu's pm notification. 768 * 769 * Context: Must be called with the drv->lock held. 770 * 771 * Return: 772 * * False - AMCs are idle 773 * * True - AMCs are busy 774 */ 775 static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv) 776 { 777 unsigned long set; 778 const struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS]; 779 unsigned long max; 780 781 /* 782 * If we made an active request on a RSC that does not have a 783 * dedicated TCS for active state use, then re-purposed wake TCSes 784 * should be checked for not busy, because we used wake TCSes for 785 * active requests in this case. 786 */ 787 if (!tcs->num_tcs) 788 tcs = &drv->tcs[WAKE_TCS]; 789 790 max = tcs->offset + tcs->num_tcs; 791 set = find_next_bit(drv->tcs_in_use, max, tcs->offset); 792 793 return set < max; 794 } 795 796 /** 797 * rpmh_rsc_write_next_wakeup() - Write next wakeup in CONTROL_TCS. 798 * @drv: The controller 799 * 800 * Writes maximum wakeup cycles when called from suspend. 801 * Writes earliest hrtimer wakeup when called from idle. 802 */ 803 void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv) 804 { 805 ktime_t now, wakeup; 806 u64 wakeup_us, wakeup_cycles = ~0; 807 u32 lo, hi; 808 809 if (!drv->tcs[CONTROL_TCS].num_tcs || !drv->genpd_nb.notifier_call) 810 return; 811 812 /* Set highest time when system (timekeeping) is suspended */ 813 if (system_state == SYSTEM_SUSPEND) 814 goto exit; 815 816 /* Find the earliest hrtimer wakeup from online cpus */ 817 wakeup = dev_pm_genpd_get_next_hrtimer(drv->dev); 818 819 /* Find the relative wakeup in kernel time scale */ 820 now = ktime_get(); 821 wakeup = ktime_sub(wakeup, now); 822 wakeup_us = ktime_to_us(wakeup); 823 824 /* Convert the wakeup to arch timer scale */ 825 wakeup_cycles = USECS_TO_CYCLES(wakeup_us); 826 wakeup_cycles += arch_timer_read_counter(); 827 828 exit: 829 lo = wakeup_cycles & RSC_DRV_CTL_TCS_DATA_LO_MASK; 830 hi = wakeup_cycles >> RSC_DRV_CTL_TCS_DATA_SIZE; 831 hi &= RSC_DRV_CTL_TCS_DATA_HI_MASK; 832 hi |= RSC_DRV_CTL_TCS_DATA_HI_VALID; 833 834 writel_relaxed(lo, drv->base + RSC_DRV_CTL_TCS_DATA_LO); 835 writel_relaxed(hi, drv->base + RSC_DRV_CTL_TCS_DATA_HI); 836 } 837 838 /** 839 * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy. 840 * @nfb: Pointer to the notifier block in struct rsc_drv. 841 * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT. 842 * @v: Unused 843 * 844 * This function is given to cpu_pm_register_notifier so we can be informed 845 * about when CPUs go down. When all CPUs go down we know no more active 846 * transfers will be started so we write sleep/wake sets. This function gets 847 * called from cpuidle code paths and also at system suspend time. 848 * 849 * If its last CPU going down and AMCs are not busy then writes cached sleep 850 * and wake messages to TCSes. The firmware then takes care of triggering 851 * them when entering deepest low power modes. 852 * 853 * Return: See cpu_pm_register_notifier() 854 */ 855 static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb, 856 unsigned long action, void *v) 857 { 858 struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm); 859 int ret = NOTIFY_OK; 860 int cpus_in_pm; 861 862 switch (action) { 863 case CPU_PM_ENTER: 864 cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm); 865 /* 866 * NOTE: comments for num_online_cpus() point out that it's 867 * only a snapshot so we need to be careful. It should be OK 868 * for us to use, though. It's important for us not to miss 869 * if we're the last CPU going down so it would only be a 870 * problem if a CPU went offline right after we did the check 871 * AND that CPU was not idle AND that CPU was the last non-idle 872 * CPU. That can't happen. CPUs would have to come out of idle 873 * before the CPU could go offline. 874 */ 875 if (cpus_in_pm < num_online_cpus()) 876 return NOTIFY_OK; 877 break; 878 case CPU_PM_ENTER_FAILED: 879 case CPU_PM_EXIT: 880 atomic_dec(&drv->cpus_in_pm); 881 return NOTIFY_OK; 882 default: 883 return NOTIFY_DONE; 884 } 885 886 /* 887 * It's likely we're on the last CPU. Grab the drv->lock and write 888 * out the sleep/wake commands to RPMH hardware. Grabbing the lock 889 * means that if we race with another CPU coming up we are still 890 * guaranteed to be safe. If another CPU came up just after we checked 891 * and has grabbed the lock or started an active transfer then we'll 892 * notice we're busy and abort. If another CPU comes up after we start 893 * flushing it will be blocked from starting an active transfer until 894 * we're done flushing. If another CPU starts an active transfer after 895 * we release the lock we're still OK because we're no longer the last 896 * CPU. 897 */ 898 if (spin_trylock(&drv->lock)) { 899 if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client)) 900 ret = NOTIFY_BAD; 901 spin_unlock(&drv->lock); 902 } else { 903 /* Another CPU must be up */ 904 return NOTIFY_OK; 905 } 906 907 if (ret == NOTIFY_BAD) { 908 /* Double-check if we're here because someone else is up */ 909 if (cpus_in_pm < num_online_cpus()) 910 ret = NOTIFY_OK; 911 else 912 /* We won't be called w/ CPU_PM_ENTER_FAILED */ 913 atomic_dec(&drv->cpus_in_pm); 914 } 915 916 return ret; 917 } 918 919 /** 920 * rpmh_rsc_pd_callback() - Check if any of the AMCs are busy. 921 * @nfb: Pointer to the genpd notifier block in struct rsc_drv. 922 * @action: GENPD_NOTIFY_PRE_OFF, GENPD_NOTIFY_OFF, GENPD_NOTIFY_PRE_ON or GENPD_NOTIFY_ON. 923 * @v: Unused 924 * 925 * This function is given to dev_pm_genpd_add_notifier() so we can be informed 926 * about when cluster-pd is going down. When cluster go down we know no more active 927 * transfers will be started so we write sleep/wake sets. This function gets 928 * called from cpuidle code paths and also at system suspend time. 929 * 930 * If AMCs are not busy then writes cached sleep and wake messages to TCSes. 931 * The firmware then takes care of triggering them when entering deepest low power modes. 932 * 933 * Return: 934 * * NOTIFY_OK - success 935 * * NOTIFY_BAD - failure 936 */ 937 static int rpmh_rsc_pd_callback(struct notifier_block *nfb, 938 unsigned long action, void *v) 939 { 940 struct rsc_drv *drv = container_of(nfb, struct rsc_drv, genpd_nb); 941 942 /* We don't need to lock as genpd on/off are serialized */ 943 if ((action == GENPD_NOTIFY_PRE_OFF) && 944 (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))) 945 return NOTIFY_BAD; 946 947 return NOTIFY_OK; 948 } 949 950 static int rpmh_rsc_pd_attach(struct rsc_drv *drv, struct device *dev) 951 { 952 int ret; 953 954 pm_runtime_enable(dev); 955 drv->genpd_nb.notifier_call = rpmh_rsc_pd_callback; 956 ret = dev_pm_genpd_add_notifier(dev, &drv->genpd_nb); 957 if (ret) 958 pm_runtime_disable(dev); 959 960 return ret; 961 } 962 963 static int rpmh_probe_tcs_config(struct platform_device *pdev, struct rsc_drv *drv) 964 { 965 struct tcs_type_config { 966 u32 type; 967 u32 n; 968 } tcs_cfg[TCS_TYPE_NR] = { { 0 } }; 969 struct device_node *dn = pdev->dev.of_node; 970 u32 config, max_tcs, ncpt, offset; 971 int i, ret, n, st = 0; 972 struct tcs_group *tcs; 973 974 ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset); 975 if (ret) 976 return ret; 977 drv->tcs_base = drv->base + offset; 978 979 config = readl_relaxed(drv->base + drv->regs[DRV_PRNT_CHLD_CONFIG]); 980 981 max_tcs = config; 982 max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id); 983 max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id); 984 985 ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT); 986 ncpt = ncpt >> DRV_NCPT_SHIFT; 987 988 n = of_property_count_u32_elems(dn, "qcom,tcs-config"); 989 if (n != 2 * TCS_TYPE_NR) 990 return -EINVAL; 991 992 for (i = 0; i < TCS_TYPE_NR; i++) { 993 ret = of_property_read_u32_index(dn, "qcom,tcs-config", 994 i * 2, &tcs_cfg[i].type); 995 if (ret) 996 return ret; 997 if (tcs_cfg[i].type >= TCS_TYPE_NR) 998 return -EINVAL; 999 1000 ret = of_property_read_u32_index(dn, "qcom,tcs-config", 1001 i * 2 + 1, &tcs_cfg[i].n); 1002 if (ret) 1003 return ret; 1004 if (tcs_cfg[i].n > MAX_TCS_PER_TYPE) 1005 return -EINVAL; 1006 } 1007 1008 for (i = 0; i < TCS_TYPE_NR; i++) { 1009 tcs = &drv->tcs[tcs_cfg[i].type]; 1010 if (tcs->drv) 1011 return -EINVAL; 1012 tcs->drv = drv; 1013 tcs->type = tcs_cfg[i].type; 1014 tcs->num_tcs = tcs_cfg[i].n; 1015 tcs->ncpt = ncpt; 1016 1017 if (!tcs->num_tcs || tcs->type == CONTROL_TCS) 1018 continue; 1019 1020 if (st + tcs->num_tcs > max_tcs || 1021 st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask)) 1022 return -EINVAL; 1023 1024 tcs->mask = ((1 << tcs->num_tcs) - 1) << st; 1025 tcs->offset = st; 1026 st += tcs->num_tcs; 1027 } 1028 1029 drv->num_tcs = st; 1030 1031 return 0; 1032 } 1033 1034 static int rpmh_rsc_probe(struct platform_device *pdev) 1035 { 1036 struct device_node *dn = pdev->dev.of_node; 1037 struct rsc_drv *drv; 1038 char drv_id[10] = {0}; 1039 int ret, irq; 1040 u32 solver_config; 1041 u32 rsc_id; 1042 1043 /* 1044 * Even though RPMh doesn't directly use cmd-db, all of its children 1045 * do. To avoid adding this check to our children we'll do it now. 1046 */ 1047 ret = cmd_db_ready(); 1048 if (ret) { 1049 if (ret != -EPROBE_DEFER) 1050 dev_err(&pdev->dev, "Command DB not available (%d)\n", 1051 ret); 1052 return ret; 1053 } 1054 1055 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); 1056 if (!drv) 1057 return -ENOMEM; 1058 1059 ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id); 1060 if (ret) 1061 return ret; 1062 1063 drv->name = of_get_property(dn, "label", NULL); 1064 if (!drv->name) 1065 drv->name = dev_name(&pdev->dev); 1066 1067 snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id); 1068 drv->base = devm_platform_ioremap_resource_byname(pdev, drv_id); 1069 if (IS_ERR(drv->base)) 1070 return PTR_ERR(drv->base); 1071 1072 rsc_id = readl_relaxed(drv->base + RSC_DRV_ID); 1073 drv->ver.major = rsc_id & (MAJOR_VER_MASK << MAJOR_VER_SHIFT); 1074 drv->ver.major >>= MAJOR_VER_SHIFT; 1075 drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT); 1076 drv->ver.minor >>= MINOR_VER_SHIFT; 1077 1078 if (drv->ver.major == 3) 1079 drv->regs = rpmh_rsc_reg_offset_ver_3_0; 1080 else 1081 drv->regs = rpmh_rsc_reg_offset_ver_2_7; 1082 1083 ret = rpmh_probe_tcs_config(pdev, drv); 1084 if (ret) 1085 return ret; 1086 1087 spin_lock_init(&drv->lock); 1088 init_waitqueue_head(&drv->tcs_wait); 1089 bitmap_zero(drv->tcs_in_use, MAX_TCS_NR); 1090 1091 irq = platform_get_irq(pdev, drv->id); 1092 if (irq < 0) 1093 return irq; 1094 1095 ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done, 1096 IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, 1097 drv->name, drv); 1098 if (ret) 1099 return ret; 1100 1101 /* 1102 * CPU PM/genpd notification are not required for controllers that support 1103 * 'HW solver' mode where they can be in autonomous mode executing low 1104 * power mode to power down. 1105 */ 1106 solver_config = readl_relaxed(drv->base + drv->regs[DRV_SOLVER_CONFIG]); 1107 solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT; 1108 solver_config = solver_config >> DRV_HW_SOLVER_SHIFT; 1109 if (!solver_config) { 1110 if (pdev->dev.pm_domain) { 1111 ret = rpmh_rsc_pd_attach(drv, &pdev->dev); 1112 if (ret) 1113 return ret; 1114 } else { 1115 drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback; 1116 cpu_pm_register_notifier(&drv->rsc_pm); 1117 } 1118 } 1119 1120 /* Enable the active TCS to send requests immediately */ 1121 writel_relaxed(drv->tcs[ACTIVE_TCS].mask, 1122 drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]); 1123 1124 spin_lock_init(&drv->client.cache_lock); 1125 INIT_LIST_HEAD(&drv->client.cache); 1126 INIT_LIST_HEAD(&drv->client.batch_cache); 1127 1128 dev_set_drvdata(&pdev->dev, drv); 1129 drv->dev = &pdev->dev; 1130 1131 ret = devm_of_platform_populate(&pdev->dev); 1132 if (ret && pdev->dev.pm_domain) { 1133 dev_pm_genpd_remove_notifier(&pdev->dev); 1134 pm_runtime_disable(&pdev->dev); 1135 } 1136 1137 return ret; 1138 } 1139 1140 static const struct of_device_id rpmh_drv_match[] = { 1141 { .compatible = "qcom,rpmh-rsc", }, 1142 { } 1143 }; 1144 MODULE_DEVICE_TABLE(of, rpmh_drv_match); 1145 1146 static struct platform_driver rpmh_driver = { 1147 .probe = rpmh_rsc_probe, 1148 .driver = { 1149 .name = "rpmh", 1150 .of_match_table = rpmh_drv_match, 1151 .suppress_bind_attrs = true, 1152 }, 1153 }; 1154 1155 static int __init rpmh_driver_init(void) 1156 { 1157 return platform_driver_register(&rpmh_driver); 1158 } 1159 core_initcall(rpmh_driver_init); 1160 1161 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver"); 1162 MODULE_LICENSE("GPL v2"); 1163