1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm ICE (Inline Crypto Engine) support. 4 * 5 * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. 6 * Copyright (c) 2019, Google LLC 7 * Copyright (c) 2023, Linaro Limited 8 */ 9 10 #include <linux/bitfield.h> 11 #include <linux/cleanup.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/iopoll.h> 15 #include <linux/of.h> 16 #include <linux/of_platform.h> 17 #include <linux/platform_device.h> 18 19 #include <linux/firmware/qcom/qcom_scm.h> 20 21 #include <soc/qcom/ice.h> 22 23 #define AES_256_XTS_KEY_SIZE 64 24 25 /* QCOM ICE registers */ 26 #define QCOM_ICE_REG_VERSION 0x0008 27 #define QCOM_ICE_REG_FUSE_SETTING 0x0010 28 #define QCOM_ICE_REG_BIST_STATUS 0x0070 29 #define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 30 31 /* BIST ("built-in self-test") status flags */ 32 #define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28) 33 34 #define QCOM_ICE_FUSE_SETTING_MASK 0x1 35 #define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 36 #define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 37 38 #define qcom_ice_writel(engine, val, reg) \ 39 writel((val), (engine)->base + (reg)) 40 41 #define qcom_ice_readl(engine, reg) \ 42 readl((engine)->base + (reg)) 43 44 struct qcom_ice { 45 struct device *dev; 46 void __iomem *base; 47 48 struct clk *core_clk; 49 }; 50 51 static bool qcom_ice_check_supported(struct qcom_ice *ice) 52 { 53 u32 regval = qcom_ice_readl(ice, QCOM_ICE_REG_VERSION); 54 struct device *dev = ice->dev; 55 int major = FIELD_GET(GENMASK(31, 24), regval); 56 int minor = FIELD_GET(GENMASK(23, 16), regval); 57 int step = FIELD_GET(GENMASK(15, 0), regval); 58 59 /* For now this driver only supports ICE version 3 and 4. */ 60 if (major != 3 && major != 4) { 61 dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n", 62 major, minor, step); 63 return false; 64 } 65 66 dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n", 67 major, minor, step); 68 69 /* If fuses are blown, ICE might not work in the standard way. */ 70 regval = qcom_ice_readl(ice, QCOM_ICE_REG_FUSE_SETTING); 71 if (regval & (QCOM_ICE_FUSE_SETTING_MASK | 72 QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK | 73 QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) { 74 dev_warn(dev, "Fuses are blown; ICE is unusable!\n"); 75 return false; 76 } 77 78 return true; 79 } 80 81 static void qcom_ice_low_power_mode_enable(struct qcom_ice *ice) 82 { 83 u32 regval; 84 85 regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL); 86 87 /* Enable low power mode sequence */ 88 regval |= 0x7000; 89 qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL); 90 } 91 92 static void qcom_ice_optimization_enable(struct qcom_ice *ice) 93 { 94 u32 regval; 95 96 /* ICE Optimizations Enable Sequence */ 97 regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL); 98 regval |= 0xd807100; 99 /* ICE HPG requires delay before writing */ 100 udelay(5); 101 qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL); 102 udelay(5); 103 } 104 105 /* 106 * Wait until the ICE BIST (built-in self-test) has completed. 107 * 108 * This may be necessary before ICE can be used. 109 * Note that we don't really care whether the BIST passed or failed; 110 * we really just want to make sure that it isn't still running. This is 111 * because (a) the BIST is a FIPS compliance thing that never fails in 112 * practice, (b) ICE is documented to reject crypto requests if the BIST 113 * fails, so we needn't do it in software too, and (c) properly testing 114 * storage encryption requires testing the full storage stack anyway, 115 * and not relying on hardware-level self-tests. 116 */ 117 static int qcom_ice_wait_bist_status(struct qcom_ice *ice) 118 { 119 u32 regval; 120 int err; 121 122 err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS, 123 regval, !(regval & QCOM_ICE_BIST_STATUS_MASK), 124 50, 5000); 125 if (err) 126 dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n"); 127 128 return err; 129 } 130 131 int qcom_ice_enable(struct qcom_ice *ice) 132 { 133 qcom_ice_low_power_mode_enable(ice); 134 qcom_ice_optimization_enable(ice); 135 136 return qcom_ice_wait_bist_status(ice); 137 } 138 EXPORT_SYMBOL_GPL(qcom_ice_enable); 139 140 int qcom_ice_resume(struct qcom_ice *ice) 141 { 142 struct device *dev = ice->dev; 143 int err; 144 145 err = clk_prepare_enable(ice->core_clk); 146 if (err) { 147 dev_err(dev, "failed to enable core clock (%d)\n", 148 err); 149 return err; 150 } 151 152 return qcom_ice_wait_bist_status(ice); 153 } 154 EXPORT_SYMBOL_GPL(qcom_ice_resume); 155 156 int qcom_ice_suspend(struct qcom_ice *ice) 157 { 158 clk_disable_unprepare(ice->core_clk); 159 160 return 0; 161 } 162 EXPORT_SYMBOL_GPL(qcom_ice_suspend); 163 164 int qcom_ice_program_key(struct qcom_ice *ice, 165 u8 algorithm_id, u8 key_size, 166 const u8 crypto_key[], u8 data_unit_size, 167 int slot) 168 { 169 struct device *dev = ice->dev; 170 union { 171 u8 bytes[AES_256_XTS_KEY_SIZE]; 172 u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)]; 173 } key; 174 int i; 175 int err; 176 177 /* Only AES-256-XTS has been tested so far. */ 178 if (algorithm_id != QCOM_ICE_CRYPTO_ALG_AES_XTS || 179 key_size != QCOM_ICE_CRYPTO_KEY_SIZE_256) { 180 dev_err_ratelimited(dev, 181 "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n", 182 algorithm_id, key_size); 183 return -EINVAL; 184 } 185 186 memcpy(key.bytes, crypto_key, AES_256_XTS_KEY_SIZE); 187 188 /* The SCM call requires that the key words are encoded in big endian */ 189 for (i = 0; i < ARRAY_SIZE(key.words); i++) 190 __cpu_to_be32s(&key.words[i]); 191 192 err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE, 193 QCOM_SCM_ICE_CIPHER_AES_256_XTS, 194 data_unit_size); 195 196 memzero_explicit(&key, sizeof(key)); 197 198 return err; 199 } 200 EXPORT_SYMBOL_GPL(qcom_ice_program_key); 201 202 int qcom_ice_evict_key(struct qcom_ice *ice, int slot) 203 { 204 return qcom_scm_ice_invalidate_key(slot); 205 } 206 EXPORT_SYMBOL_GPL(qcom_ice_evict_key); 207 208 static struct qcom_ice *qcom_ice_create(struct device *dev, 209 void __iomem *base) 210 { 211 struct qcom_ice *engine; 212 213 if (!qcom_scm_is_available()) 214 return ERR_PTR(-EPROBE_DEFER); 215 216 if (!qcom_scm_ice_available()) { 217 dev_warn(dev, "ICE SCM interface not found\n"); 218 return NULL; 219 } 220 221 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); 222 if (!engine) 223 return ERR_PTR(-ENOMEM); 224 225 engine->dev = dev; 226 engine->base = base; 227 228 /* 229 * Legacy DT binding uses different clk names for each consumer, 230 * so lets try those first. If none of those are a match, it means 231 * the we only have one clock and it is part of the dedicated DT node. 232 * Also, enable the clock before we check what HW version the driver 233 * supports. 234 */ 235 engine->core_clk = devm_clk_get_optional_enabled(dev, "ice_core_clk"); 236 if (!engine->core_clk) 237 engine->core_clk = devm_clk_get_optional_enabled(dev, "ice"); 238 if (!engine->core_clk) 239 engine->core_clk = devm_clk_get_enabled(dev, NULL); 240 if (IS_ERR(engine->core_clk)) 241 return ERR_CAST(engine->core_clk); 242 243 if (!qcom_ice_check_supported(engine)) 244 return ERR_PTR(-EOPNOTSUPP); 245 246 dev_dbg(dev, "Registered Qualcomm Inline Crypto Engine\n"); 247 248 return engine; 249 } 250 251 /** 252 * of_qcom_ice_get() - get an ICE instance from a DT node 253 * @dev: device pointer for the consumer device 254 * 255 * This function will provide an ICE instance either by creating one for the 256 * consumer device if its DT node provides the 'ice' reg range and the 'ice' 257 * clock (for legacy DT style). On the other hand, if consumer provides a 258 * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already 259 * be created and so this function will return that instead. 260 * 261 * Return: ICE pointer on success, NULL if there is no ICE data provided by the 262 * consumer or ERR_PTR() on error. 263 */ 264 struct qcom_ice *of_qcom_ice_get(struct device *dev) 265 { 266 struct platform_device *pdev = to_platform_device(dev); 267 struct qcom_ice *ice; 268 struct resource *res; 269 void __iomem *base; 270 struct device_link *link; 271 272 if (!dev || !dev->of_node) 273 return ERR_PTR(-ENODEV); 274 275 /* 276 * In order to support legacy style devicetree bindings, we need 277 * to create the ICE instance using the consumer device and the reg 278 * range called 'ice' it provides. 279 */ 280 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"); 281 if (res) { 282 base = devm_ioremap_resource(&pdev->dev, res); 283 if (IS_ERR(base)) 284 return ERR_CAST(base); 285 286 /* create ICE instance using consumer dev */ 287 return qcom_ice_create(&pdev->dev, base); 288 } 289 290 /* 291 * If the consumer node does not provider an 'ice' reg range 292 * (legacy DT binding), then it must at least provide a phandle 293 * to the ICE devicetree node, otherwise ICE is not supported. 294 */ 295 struct device_node *node __free(device_node) = of_parse_phandle(dev->of_node, 296 "qcom,ice", 0); 297 if (!node) 298 return NULL; 299 300 pdev = of_find_device_by_node(node); 301 if (!pdev) { 302 dev_err(dev, "Cannot find device node %s\n", node->name); 303 return ERR_PTR(-EPROBE_DEFER); 304 } 305 306 ice = platform_get_drvdata(pdev); 307 if (!ice) { 308 dev_err(dev, "Cannot get ice instance from %s\n", 309 dev_name(&pdev->dev)); 310 platform_device_put(pdev); 311 return ERR_PTR(-EPROBE_DEFER); 312 } 313 314 link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER); 315 if (!link) { 316 dev_err(&pdev->dev, 317 "Failed to create device link to consumer %s\n", 318 dev_name(dev)); 319 platform_device_put(pdev); 320 ice = ERR_PTR(-EINVAL); 321 } 322 323 return ice; 324 } 325 EXPORT_SYMBOL_GPL(of_qcom_ice_get); 326 327 static int qcom_ice_probe(struct platform_device *pdev) 328 { 329 struct qcom_ice *engine; 330 void __iomem *base; 331 332 base = devm_platform_ioremap_resource(pdev, 0); 333 if (IS_ERR(base)) { 334 dev_warn(&pdev->dev, "ICE registers not found\n"); 335 return PTR_ERR(base); 336 } 337 338 engine = qcom_ice_create(&pdev->dev, base); 339 if (IS_ERR(engine)) 340 return PTR_ERR(engine); 341 342 platform_set_drvdata(pdev, engine); 343 344 return 0; 345 } 346 347 static const struct of_device_id qcom_ice_of_match_table[] = { 348 { .compatible = "qcom,inline-crypto-engine" }, 349 { }, 350 }; 351 MODULE_DEVICE_TABLE(of, qcom_ice_of_match_table); 352 353 static struct platform_driver qcom_ice_driver = { 354 .probe = qcom_ice_probe, 355 .driver = { 356 .name = "qcom-ice", 357 .of_match_table = qcom_ice_of_match_table, 358 }, 359 }; 360 361 module_platform_driver(qcom_ice_driver); 362 363 MODULE_DESCRIPTION("Qualcomm Inline Crypto Engine driver"); 364 MODULE_LICENSE("GPL"); 365