1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <asm/div64.h> 7 #include <linux/interconnect-provider.h> 8 #include <linux/list_sort.h> 9 #include <linux/module.h> 10 #include <linux/of.h> 11 #include <linux/platform_device.h> 12 13 #include <soc/qcom/rpmh.h> 14 #include <soc/qcom/tcs.h> 15 16 #include "bcm-voter.h" 17 #include "icc-rpmh.h" 18 19 static LIST_HEAD(bcm_voters); 20 static DEFINE_MUTEX(bcm_voter_lock); 21 22 /** 23 * struct bcm_voter - Bus Clock Manager voter 24 * @dev: reference to the device that communicates with the BCM 25 * @np: reference to the device node to match bcm voters 26 * @lock: mutex to protect commit and wake/sleep lists in the voter 27 * @commit_list: list containing bcms to be committed to hardware 28 * @ws_list: list containing bcms that have different wake/sleep votes 29 * @voter_node: list of bcm voters 30 * @tcs_wait: mask for which buckets require TCS completion 31 */ 32 struct bcm_voter { 33 struct device *dev; 34 struct device_node *np; 35 struct mutex lock; 36 struct list_head commit_list; 37 struct list_head ws_list; 38 struct list_head voter_node; 39 u32 tcs_wait; 40 }; 41 42 static int cmp_vcd(void *priv, const struct list_head *a, const struct list_head *b) 43 { 44 const struct qcom_icc_bcm *bcm_a = list_entry(a, struct qcom_icc_bcm, list); 45 const struct qcom_icc_bcm *bcm_b = list_entry(b, struct qcom_icc_bcm, list); 46 47 return bcm_a->aux_data.vcd - bcm_b->aux_data.vcd; 48 } 49 50 static u64 bcm_div(u64 num, u32 base) 51 { 52 /* Ensure that small votes aren't lost. */ 53 if (num && num < base) 54 return 1; 55 56 do_div(num, base); 57 58 return num; 59 } 60 61 static void bcm_aggregate(struct qcom_icc_bcm *bcm) 62 { 63 struct qcom_icc_node *node; 64 size_t i, bucket; 65 u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0}; 66 u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0}; 67 u64 temp; 68 69 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { 70 for (i = 0; i < bcm->num_nodes; i++) { 71 node = bcm->nodes[i]; 72 temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width, 73 node->buswidth * node->channels); 74 agg_avg[bucket] = max(agg_avg[bucket], temp); 75 76 temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width, 77 node->buswidth); 78 agg_peak[bucket] = max(agg_peak[bucket], temp); 79 } 80 81 temp = agg_avg[bucket] * bcm->vote_scale; 82 bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit); 83 84 temp = agg_peak[bucket] * bcm->vote_scale; 85 bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit); 86 87 if (bcm->enable_mask && (bcm->vote_x[bucket] || bcm->vote_y[bucket])) { 88 bcm->vote_x[bucket] = 0; 89 bcm->vote_y[bucket] = bcm->enable_mask; 90 } 91 } 92 93 if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 && 94 bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) { 95 bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1; 96 bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1; 97 bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1; 98 bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1; 99 } 100 } 101 102 static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y, 103 u32 addr, bool commit, bool wait) 104 { 105 bool valid = true; 106 107 if (!cmd) 108 return; 109 110 memset(cmd, 0, sizeof(*cmd)); 111 112 if (vote_x == 0 && vote_y == 0) 113 valid = false; 114 115 if (vote_x > BCM_TCS_CMD_VOTE_MASK) 116 vote_x = BCM_TCS_CMD_VOTE_MASK; 117 118 if (vote_y > BCM_TCS_CMD_VOTE_MASK) 119 vote_y = BCM_TCS_CMD_VOTE_MASK; 120 121 cmd->addr = addr; 122 cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y); 123 124 /* 125 * Set the wait for completion flag on command that need to be completed 126 * before the next command. 127 */ 128 cmd->wait = wait; 129 } 130 131 static void tcs_list_gen(struct bcm_voter *voter, int bucket, 132 struct tcs_cmd tcs_list[MAX_VCD], 133 int n[MAX_VCD + 1]) 134 { 135 struct list_head *bcm_list = &voter->commit_list; 136 struct qcom_icc_bcm *bcm; 137 bool commit, wait; 138 size_t idx = 0, batch = 0, cur_vcd_size = 0; 139 140 memset(n, 0, sizeof(int) * (MAX_VCD + 1)); 141 142 list_for_each_entry(bcm, bcm_list, list) { 143 commit = false; 144 cur_vcd_size++; 145 if ((list_is_last(&bcm->list, bcm_list)) || 146 bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) { 147 commit = true; 148 cur_vcd_size = 0; 149 } 150 151 wait = commit && (voter->tcs_wait & BIT(bucket)); 152 153 tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket], 154 bcm->vote_y[bucket], bcm->addr, commit, wait); 155 idx++; 156 n[batch]++; 157 /* 158 * Batch the BCMs in such a way that we do not split them in 159 * multiple payloads when they are under the same VCD. This is 160 * to ensure that every BCM is committed since we only set the 161 * commit bit on the last BCM request of every VCD. 162 */ 163 if (n[batch] >= MAX_RPMH_PAYLOAD) { 164 if (!commit) { 165 n[batch] -= cur_vcd_size; 166 n[batch + 1] = cur_vcd_size; 167 } 168 batch++; 169 } 170 } 171 } 172 173 /** 174 * of_bcm_voter_get - gets a bcm voter handle from DT node 175 * @dev: device pointer for the consumer device 176 * @name: name for the bcm voter device 177 * 178 * This function will match a device_node pointer for the phandle 179 * specified in the device DT and return a bcm_voter handle on success. 180 * 181 * Returns bcm_voter pointer or ERR_PTR() on error. EPROBE_DEFER is returned 182 * when matching bcm voter is yet to be found. 183 */ 184 struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name) 185 { 186 struct bcm_voter *voter = ERR_PTR(-EPROBE_DEFER); 187 struct bcm_voter *temp; 188 struct device_node *np, *node; 189 int idx = 0; 190 191 if (!dev || !dev->of_node) 192 return ERR_PTR(-ENODEV); 193 194 np = dev->of_node; 195 196 if (name) { 197 idx = of_property_match_string(np, "qcom,bcm-voter-names", name); 198 if (idx < 0) 199 return ERR_PTR(idx); 200 } 201 202 node = of_parse_phandle(np, "qcom,bcm-voters", idx); 203 204 mutex_lock(&bcm_voter_lock); 205 list_for_each_entry(temp, &bcm_voters, voter_node) { 206 if (temp->np == node) { 207 voter = temp; 208 break; 209 } 210 } 211 mutex_unlock(&bcm_voter_lock); 212 213 of_node_put(node); 214 return voter; 215 } 216 EXPORT_SYMBOL_GPL(of_bcm_voter_get); 217 218 /** 219 * qcom_icc_bcm_voter_add - queues up the bcm nodes that require updates 220 * @voter: voter that the bcms are being added to 221 * @bcm: bcm to add to the commit and wake sleep list 222 */ 223 void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm) 224 { 225 if (!voter) 226 return; 227 228 mutex_lock(&voter->lock); 229 if (list_empty(&bcm->list)) 230 list_add_tail(&bcm->list, &voter->commit_list); 231 232 if (list_empty(&bcm->ws_list)) 233 list_add_tail(&bcm->ws_list, &voter->ws_list); 234 235 mutex_unlock(&voter->lock); 236 } 237 EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_add); 238 239 /** 240 * qcom_icc_bcm_voter_commit - generates and commits tcs cmds based on bcms 241 * @voter: voter that needs flushing 242 * 243 * This function generates a set of AMC commands and flushes to the BCM device 244 * associated with the voter. It conditionally generate WAKE and SLEEP commands 245 * based on deltas between WAKE/SLEEP requirements. The ws_list persists 246 * through multiple commit requests and bcm nodes are removed only when the 247 * requirements for WAKE matches SLEEP. 248 * 249 * Returns 0 on success, or an appropriate error code otherwise. 250 */ 251 int qcom_icc_bcm_voter_commit(struct bcm_voter *voter) 252 { 253 struct qcom_icc_bcm *bcm; 254 struct qcom_icc_bcm *bcm_tmp; 255 int commit_idx[MAX_VCD + 1]; 256 struct tcs_cmd cmds[MAX_BCMS]; 257 int ret = 0; 258 259 if (!voter) 260 return 0; 261 262 mutex_lock(&voter->lock); 263 list_for_each_entry(bcm, &voter->commit_list, list) 264 bcm_aggregate(bcm); 265 266 /* 267 * Pre sort the BCMs based on VCD for ease of generating a command list 268 * that groups the BCMs with the same VCD together. VCDs are numbered 269 * with lowest being the most expensive time wise, ensuring that 270 * those commands are being sent the earliest in the queue. This needs 271 * to be sorted every commit since we can't guarantee the order in which 272 * the BCMs are added to the list. 273 */ 274 list_sort(NULL, &voter->commit_list, cmp_vcd); 275 276 /* 277 * Construct the command list based on a pre ordered list of BCMs 278 * based on VCD. 279 */ 280 tcs_list_gen(voter, QCOM_ICC_BUCKET_AMC, cmds, commit_idx); 281 if (!commit_idx[0]) 282 goto out; 283 284 rpmh_invalidate(voter->dev); 285 286 ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE, 287 cmds, commit_idx); 288 if (ret) { 289 pr_err("Error sending AMC RPMH requests (%d)\n", ret); 290 goto out; 291 } 292 293 list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list) 294 list_del_init(&bcm->list); 295 296 list_for_each_entry_safe(bcm, bcm_tmp, &voter->ws_list, ws_list) { 297 /* 298 * Only generate WAKE and SLEEP commands if a resource's 299 * requirements change as the execution environment transitions 300 * between different power states. 301 */ 302 if (bcm->vote_x[QCOM_ICC_BUCKET_WAKE] != 303 bcm->vote_x[QCOM_ICC_BUCKET_SLEEP] || 304 bcm->vote_y[QCOM_ICC_BUCKET_WAKE] != 305 bcm->vote_y[QCOM_ICC_BUCKET_SLEEP]) 306 list_add_tail(&bcm->list, &voter->commit_list); 307 else 308 list_del_init(&bcm->ws_list); 309 } 310 311 if (list_empty(&voter->commit_list)) 312 goto out; 313 314 list_sort(NULL, &voter->commit_list, cmp_vcd); 315 316 tcs_list_gen(voter, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx); 317 318 ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx); 319 if (ret) { 320 pr_err("Error sending WAKE RPMH requests (%d)\n", ret); 321 goto out; 322 } 323 324 tcs_list_gen(voter, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx); 325 326 ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx); 327 if (ret) { 328 pr_err("Error sending SLEEP RPMH requests (%d)\n", ret); 329 goto out; 330 } 331 332 out: 333 list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list) 334 list_del_init(&bcm->list); 335 336 mutex_unlock(&voter->lock); 337 return ret; 338 } 339 EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_commit); 340 341 static int qcom_icc_bcm_voter_probe(struct platform_device *pdev) 342 { 343 struct device_node *np = pdev->dev.of_node; 344 struct bcm_voter *voter; 345 346 voter = devm_kzalloc(&pdev->dev, sizeof(*voter), GFP_KERNEL); 347 if (!voter) 348 return -ENOMEM; 349 350 voter->dev = &pdev->dev; 351 voter->np = np; 352 353 if (of_property_read_u32(np, "qcom,tcs-wait", &voter->tcs_wait)) 354 voter->tcs_wait = QCOM_ICC_TAG_ACTIVE_ONLY; 355 356 mutex_init(&voter->lock); 357 INIT_LIST_HEAD(&voter->commit_list); 358 INIT_LIST_HEAD(&voter->ws_list); 359 360 mutex_lock(&bcm_voter_lock); 361 list_add_tail(&voter->voter_node, &bcm_voters); 362 mutex_unlock(&bcm_voter_lock); 363 364 return 0; 365 } 366 367 static const struct of_device_id bcm_voter_of_match[] = { 368 { .compatible = "qcom,bcm-voter" }, 369 { } 370 }; 371 MODULE_DEVICE_TABLE(of, bcm_voter_of_match); 372 373 static struct platform_driver qcom_icc_bcm_voter_driver = { 374 .probe = qcom_icc_bcm_voter_probe, 375 .driver = { 376 .name = "bcm_voter", 377 .of_match_table = bcm_voter_of_match, 378 }, 379 }; 380 module_platform_driver(qcom_icc_bcm_voter_driver); 381 382 MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>"); 383 MODULE_DESCRIPTION("Qualcomm BCM Voter interconnect driver"); 384 MODULE_LICENSE("GPL v2"); 385