1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Qualcomm ICE (Inline Crypto Engine) support.
4 *
5 * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2019, Google LLC
7 * Copyright (c) 2023, Linaro Limited
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/cleanup.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/iopoll.h>
16 #include <linux/of.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19
20 #include <linux/firmware/qcom/qcom_scm.h>
21
22 #include <soc/qcom/ice.h>
23
24 #define AES_256_XTS_KEY_SIZE 64 /* for raw keys only */
25 #define QCOM_ICE_HWKM_WRAPPED_KEY_SIZE 100 /* assuming HWKM v2 */
26
27 /* QCOM ICE registers */
28
29 #define QCOM_ICE_REG_CONTROL 0x0000
30 #define QCOM_ICE_LEGACY_MODE_ENABLED BIT(0)
31
32 #define QCOM_ICE_REG_VERSION 0x0008
33
34 #define QCOM_ICE_REG_FUSE_SETTING 0x0010
35 #define QCOM_ICE_FUSE_SETTING_MASK BIT(0)
36 #define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK BIT(1)
37 #define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK BIT(2)
38
39 #define QCOM_ICE_REG_BIST_STATUS 0x0070
40 #define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28)
41
42 #define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
43
44 #define QCOM_ICE_REG_CRYPTOCFG_BASE 0x4040
45 #define QCOM_ICE_REG_CRYPTOCFG_SIZE 0x80
46 #define QCOM_ICE_REG_CRYPTOCFG(slot) (QCOM_ICE_REG_CRYPTOCFG_BASE + \
47 QCOM_ICE_REG_CRYPTOCFG_SIZE * (slot))
48 union crypto_cfg {
49 __le32 regval;
50 struct {
51 u8 dusize;
52 u8 capidx;
53 u8 reserved;
54 #define QCOM_ICE_HWKM_CFG_ENABLE_VAL BIT(7)
55 u8 cfge;
56 };
57 };
58
59 /* QCOM ICE HWKM (Hardware Key Manager) registers */
60
61 #define HWKM_OFFSET 0x8000
62
63 #define QCOM_ICE_REG_HWKM_TZ_KM_CTL (HWKM_OFFSET + 0x1000)
64 #define QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL (BIT(1) | BIT(2))
65
66 #define QCOM_ICE_REG_HWKM_TZ_KM_STATUS (HWKM_OFFSET + 0x1004)
67 #define QCOM_ICE_HWKM_KT_CLEAR_DONE BIT(0)
68 #define QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE BIT(1)
69 #define QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE BIT(2)
70 #define QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 BIT(7)
71 #define QCOM_ICE_HWKM_BIST_DONE_V2 BIT(9)
72
73 #define QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS (HWKM_OFFSET + 0x2008)
74 #define QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL BIT(3)
75
76 #define QCOM_ICE_REG_HWKM_BANK0_BBAC_0 (HWKM_OFFSET + 0x5000)
77 #define QCOM_ICE_REG_HWKM_BANK0_BBAC_1 (HWKM_OFFSET + 0x5004)
78 #define QCOM_ICE_REG_HWKM_BANK0_BBAC_2 (HWKM_OFFSET + 0x5008)
79 #define QCOM_ICE_REG_HWKM_BANK0_BBAC_3 (HWKM_OFFSET + 0x500C)
80 #define QCOM_ICE_REG_HWKM_BANK0_BBAC_4 (HWKM_OFFSET + 0x5010)
81
82 #define qcom_ice_writel(engine, val, reg) \
83 writel((val), (engine)->base + (reg))
84
85 #define qcom_ice_readl(engine, reg) \
86 readl((engine)->base + (reg))
87
88 static bool qcom_ice_use_wrapped_keys;
89 module_param_named(use_wrapped_keys, qcom_ice_use_wrapped_keys, bool, 0660);
90 MODULE_PARM_DESC(use_wrapped_keys,
91 "Support wrapped keys instead of raw keys, if available on the platform");
92
93 struct qcom_ice {
94 struct device *dev;
95 void __iomem *base;
96
97 struct clk *core_clk;
98 bool use_hwkm;
99 bool hwkm_init_complete;
100 };
101
qcom_ice_check_supported(struct qcom_ice * ice)102 static bool qcom_ice_check_supported(struct qcom_ice *ice)
103 {
104 u32 regval = qcom_ice_readl(ice, QCOM_ICE_REG_VERSION);
105 struct device *dev = ice->dev;
106 int major = FIELD_GET(GENMASK(31, 24), regval);
107 int minor = FIELD_GET(GENMASK(23, 16), regval);
108 int step = FIELD_GET(GENMASK(15, 0), regval);
109
110 /* For now this driver only supports ICE version 3 and 4. */
111 if (major != 3 && major != 4) {
112 dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n",
113 major, minor, step);
114 return false;
115 }
116
117 dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
118 major, minor, step);
119
120 /* If fuses are blown, ICE might not work in the standard way. */
121 regval = qcom_ice_readl(ice, QCOM_ICE_REG_FUSE_SETTING);
122 if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
123 QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK |
124 QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) {
125 dev_warn(dev, "Fuses are blown; ICE is unusable!\n");
126 return false;
127 }
128
129 /*
130 * Check for HWKM support and decide whether to use it or not. ICE
131 * v3.2.1 and later have HWKM v2. ICE v3.2.0 has HWKM v1. Earlier ICE
132 * versions don't have HWKM at all. However, for HWKM to be fully
133 * usable by Linux, the TrustZone software also needs to support certain
134 * SCM calls including the ones to generate and prepare keys. That
135 * effectively makes the earliest supported SoC be SM8650, which has
136 * HWKM v2. Therefore, this driver doesn't include support for HWKM v1,
137 * and it checks for the SCM call support before it decides to use HWKM.
138 *
139 * Also, since HWKM and legacy mode are mutually exclusive, and
140 * ICE-capable storage driver(s) need to know early on whether to
141 * advertise support for raw keys or wrapped keys, HWKM cannot be used
142 * unconditionally. A module parameter is used to opt into using it.
143 */
144 if ((major >= 4 ||
145 (major == 3 && (minor >= 3 || (minor == 2 && step >= 1)))) &&
146 qcom_scm_has_wrapped_key_support()) {
147 if (qcom_ice_use_wrapped_keys) {
148 dev_info(dev, "Using HWKM. Supporting wrapped keys only.\n");
149 ice->use_hwkm = true;
150 } else {
151 dev_info(dev, "Not using HWKM. Supporting raw keys only.\n");
152 }
153 } else if (qcom_ice_use_wrapped_keys) {
154 dev_warn(dev, "A supported HWKM is not present. Ignoring qcom_ice.use_wrapped_keys=1.\n");
155 } else {
156 dev_info(dev, "A supported HWKM is not present. Supporting raw keys only.\n");
157 }
158 return true;
159 }
160
qcom_ice_low_power_mode_enable(struct qcom_ice * ice)161 static void qcom_ice_low_power_mode_enable(struct qcom_ice *ice)
162 {
163 u32 regval;
164
165 regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL);
166
167 /* Enable low power mode sequence */
168 regval |= 0x7000;
169 qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
170 }
171
qcom_ice_optimization_enable(struct qcom_ice * ice)172 static void qcom_ice_optimization_enable(struct qcom_ice *ice)
173 {
174 u32 regval;
175
176 /* ICE Optimizations Enable Sequence */
177 regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL);
178 regval |= 0xd807100;
179 /* ICE HPG requires delay before writing */
180 udelay(5);
181 qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
182 udelay(5);
183 }
184
185 /*
186 * Wait until the ICE BIST (built-in self-test) has completed.
187 *
188 * This may be necessary before ICE can be used.
189 * Note that we don't really care whether the BIST passed or failed;
190 * we really just want to make sure that it isn't still running. This is
191 * because (a) the BIST is a FIPS compliance thing that never fails in
192 * practice, (b) ICE is documented to reject crypto requests if the BIST
193 * fails, so we needn't do it in software too, and (c) properly testing
194 * storage encryption requires testing the full storage stack anyway,
195 * and not relying on hardware-level self-tests.
196 */
qcom_ice_wait_bist_status(struct qcom_ice * ice)197 static int qcom_ice_wait_bist_status(struct qcom_ice *ice)
198 {
199 u32 regval;
200 int err;
201
202 err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS,
203 regval, !(regval & QCOM_ICE_BIST_STATUS_MASK),
204 50, 5000);
205 if (err) {
206 dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n");
207 return err;
208 }
209
210 if (ice->use_hwkm &&
211 qcom_ice_readl(ice, QCOM_ICE_REG_HWKM_TZ_KM_STATUS) !=
212 (QCOM_ICE_HWKM_KT_CLEAR_DONE |
213 QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE |
214 QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE |
215 QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 |
216 QCOM_ICE_HWKM_BIST_DONE_V2)) {
217 dev_err(ice->dev, "HWKM self-test error!\n");
218 /*
219 * Too late to revoke use_hwkm here, as it was already
220 * propagated up the stack into the crypto capabilities.
221 */
222 }
223 return 0;
224 }
225
qcom_ice_hwkm_init(struct qcom_ice * ice)226 static void qcom_ice_hwkm_init(struct qcom_ice *ice)
227 {
228 u32 regval;
229
230 if (!ice->use_hwkm)
231 return;
232
233 BUILD_BUG_ON(QCOM_ICE_HWKM_WRAPPED_KEY_SIZE >
234 BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE);
235 /*
236 * When ICE is in HWKM mode, it only supports wrapped keys.
237 * When ICE is in legacy mode, it only supports raw keys.
238 *
239 * Put ICE in HWKM mode. ICE defaults to legacy mode.
240 */
241 regval = qcom_ice_readl(ice, QCOM_ICE_REG_CONTROL);
242 regval &= ~QCOM_ICE_LEGACY_MODE_ENABLED;
243 qcom_ice_writel(ice, regval, QCOM_ICE_REG_CONTROL);
244
245 /* Disable CRC checks. This HWKM feature is not used. */
246 qcom_ice_writel(ice, QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL,
247 QCOM_ICE_REG_HWKM_TZ_KM_CTL);
248
249 /*
250 * Allow the HWKM slave to read and write the keyslots in the ICE HWKM
251 * slave. Without this, TrustZone cannot program keys into ICE.
252 */
253 qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_0);
254 qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_1);
255 qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_2);
256 qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_3);
257 qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_4);
258
259 /* Clear the HWKM response FIFO. */
260 qcom_ice_writel(ice, QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL,
261 QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS);
262 ice->hwkm_init_complete = true;
263 }
264
qcom_ice_enable(struct qcom_ice * ice)265 int qcom_ice_enable(struct qcom_ice *ice)
266 {
267 qcom_ice_low_power_mode_enable(ice);
268 qcom_ice_optimization_enable(ice);
269 qcom_ice_hwkm_init(ice);
270 return qcom_ice_wait_bist_status(ice);
271 }
272 EXPORT_SYMBOL_GPL(qcom_ice_enable);
273
qcom_ice_resume(struct qcom_ice * ice)274 int qcom_ice_resume(struct qcom_ice *ice)
275 {
276 struct device *dev = ice->dev;
277 int err;
278
279 err = clk_prepare_enable(ice->core_clk);
280 if (err) {
281 dev_err(dev, "failed to enable core clock (%d)\n",
282 err);
283 return err;
284 }
285 qcom_ice_hwkm_init(ice);
286 return qcom_ice_wait_bist_status(ice);
287 }
288 EXPORT_SYMBOL_GPL(qcom_ice_resume);
289
qcom_ice_suspend(struct qcom_ice * ice)290 int qcom_ice_suspend(struct qcom_ice *ice)
291 {
292 clk_disable_unprepare(ice->core_clk);
293 ice->hwkm_init_complete = false;
294
295 return 0;
296 }
297 EXPORT_SYMBOL_GPL(qcom_ice_suspend);
298
translate_hwkm_slot(struct qcom_ice * ice,unsigned int slot)299 static unsigned int translate_hwkm_slot(struct qcom_ice *ice, unsigned int slot)
300 {
301 return slot * 2;
302 }
303
qcom_ice_program_wrapped_key(struct qcom_ice * ice,unsigned int slot,const struct blk_crypto_key * bkey)304 static int qcom_ice_program_wrapped_key(struct qcom_ice *ice, unsigned int slot,
305 const struct blk_crypto_key *bkey)
306 {
307 struct device *dev = ice->dev;
308 union crypto_cfg cfg = {
309 .dusize = bkey->crypto_cfg.data_unit_size / 512,
310 .capidx = QCOM_SCM_ICE_CIPHER_AES_256_XTS,
311 .cfge = QCOM_ICE_HWKM_CFG_ENABLE_VAL,
312 };
313 int err;
314
315 if (!ice->use_hwkm) {
316 dev_err_ratelimited(dev, "Got wrapped key when not using HWKM\n");
317 return -EINVAL;
318 }
319 if (!ice->hwkm_init_complete) {
320 dev_err_ratelimited(dev, "HWKM not yet initialized\n");
321 return -EINVAL;
322 }
323
324 /* Clear CFGE before programming the key. */
325 qcom_ice_writel(ice, 0x0, QCOM_ICE_REG_CRYPTOCFG(slot));
326
327 /* Call into TrustZone to program the wrapped key using HWKM. */
328 err = qcom_scm_ice_set_key(translate_hwkm_slot(ice, slot), bkey->bytes,
329 bkey->size, cfg.capidx, cfg.dusize);
330 if (err) {
331 dev_err_ratelimited(dev,
332 "qcom_scm_ice_set_key failed; err=%d, slot=%u\n",
333 err, slot);
334 return err;
335 }
336
337 /* Set CFGE after programming the key. */
338 qcom_ice_writel(ice, le32_to_cpu(cfg.regval),
339 QCOM_ICE_REG_CRYPTOCFG(slot));
340 return 0;
341 }
342
qcom_ice_program_key(struct qcom_ice * ice,unsigned int slot,const struct blk_crypto_key * blk_key)343 int qcom_ice_program_key(struct qcom_ice *ice, unsigned int slot,
344 const struct blk_crypto_key *blk_key)
345 {
346 struct device *dev = ice->dev;
347 union {
348 u8 bytes[AES_256_XTS_KEY_SIZE];
349 u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)];
350 } key;
351 int i;
352 int err;
353
354 /* Only AES-256-XTS has been tested so far. */
355 if (blk_key->crypto_cfg.crypto_mode !=
356 BLK_ENCRYPTION_MODE_AES_256_XTS) {
357 dev_err_ratelimited(dev, "Unsupported crypto mode: %d\n",
358 blk_key->crypto_cfg.crypto_mode);
359 return -EINVAL;
360 }
361
362 if (blk_key->crypto_cfg.key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED)
363 return qcom_ice_program_wrapped_key(ice, slot, blk_key);
364
365 if (ice->use_hwkm) {
366 dev_err_ratelimited(dev, "Got raw key when using HWKM\n");
367 return -EINVAL;
368 }
369
370 if (blk_key->size != AES_256_XTS_KEY_SIZE) {
371 dev_err_ratelimited(dev, "Incorrect key size\n");
372 return -EINVAL;
373 }
374 memcpy(key.bytes, blk_key->bytes, AES_256_XTS_KEY_SIZE);
375
376 /* The SCM call requires that the key words are encoded in big endian */
377 for (i = 0; i < ARRAY_SIZE(key.words); i++)
378 __cpu_to_be32s(&key.words[i]);
379
380 err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
381 QCOM_SCM_ICE_CIPHER_AES_256_XTS,
382 blk_key->crypto_cfg.data_unit_size / 512);
383
384 memzero_explicit(&key, sizeof(key));
385
386 return err;
387 }
388 EXPORT_SYMBOL_GPL(qcom_ice_program_key);
389
qcom_ice_evict_key(struct qcom_ice * ice,int slot)390 int qcom_ice_evict_key(struct qcom_ice *ice, int slot)
391 {
392 if (ice->hwkm_init_complete)
393 slot = translate_hwkm_slot(ice, slot);
394 return qcom_scm_ice_invalidate_key(slot);
395 }
396 EXPORT_SYMBOL_GPL(qcom_ice_evict_key);
397
398 /**
399 * qcom_ice_get_supported_key_type() - Get the supported key type
400 * @ice: ICE driver data
401 *
402 * Return: the blk-crypto key type that the ICE driver is configured to use.
403 * This is the key type that ICE-capable storage drivers should advertise as
404 * supported in the crypto capabilities of any disks they register.
405 */
qcom_ice_get_supported_key_type(struct qcom_ice * ice)406 enum blk_crypto_key_type qcom_ice_get_supported_key_type(struct qcom_ice *ice)
407 {
408 if (ice->use_hwkm)
409 return BLK_CRYPTO_KEY_TYPE_HW_WRAPPED;
410 return BLK_CRYPTO_KEY_TYPE_RAW;
411 }
412 EXPORT_SYMBOL_GPL(qcom_ice_get_supported_key_type);
413
414 /**
415 * qcom_ice_derive_sw_secret() - Derive software secret from wrapped key
416 * @ice: ICE driver data
417 * @eph_key: an ephemerally-wrapped key
418 * @eph_key_size: size of @eph_key in bytes
419 * @sw_secret: output buffer for the software secret
420 *
421 * Use HWKM to derive the "software secret" from a hardware-wrapped key that is
422 * given in ephemerally-wrapped form.
423 *
424 * Return: 0 on success; -EBADMSG if the given ephemerally-wrapped key is
425 * invalid; or another -errno value.
426 */
qcom_ice_derive_sw_secret(struct qcom_ice * ice,const u8 * eph_key,size_t eph_key_size,u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])427 int qcom_ice_derive_sw_secret(struct qcom_ice *ice,
428 const u8 *eph_key, size_t eph_key_size,
429 u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
430 {
431 int err = qcom_scm_derive_sw_secret(eph_key, eph_key_size,
432 sw_secret,
433 BLK_CRYPTO_SW_SECRET_SIZE);
434 if (err == -EIO || err == -EINVAL)
435 err = -EBADMSG; /* probably invalid key */
436 return err;
437 }
438 EXPORT_SYMBOL_GPL(qcom_ice_derive_sw_secret);
439
440 /**
441 * qcom_ice_generate_key() - Generate a wrapped key for inline encryption
442 * @ice: ICE driver data
443 * @lt_key: output buffer for the long-term wrapped key
444 *
445 * Use HWKM to generate a new key and return it as a long-term wrapped key.
446 *
447 * Return: the size of the resulting wrapped key on success; -errno on failure.
448 */
qcom_ice_generate_key(struct qcom_ice * ice,u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])449 int qcom_ice_generate_key(struct qcom_ice *ice,
450 u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
451 {
452 int err;
453
454 err = qcom_scm_generate_ice_key(lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
455 if (err)
456 return err;
457
458 return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
459 }
460 EXPORT_SYMBOL_GPL(qcom_ice_generate_key);
461
462 /**
463 * qcom_ice_prepare_key() - Prepare a wrapped key for inline encryption
464 * @ice: ICE driver data
465 * @lt_key: a long-term wrapped key
466 * @lt_key_size: size of @lt_key in bytes
467 * @eph_key: output buffer for the ephemerally-wrapped key
468 *
469 * Use HWKM to re-wrap a long-term wrapped key with the per-boot ephemeral key.
470 *
471 * Return: the size of the resulting wrapped key on success; -EBADMSG if the
472 * given long-term wrapped key is invalid; or another -errno value.
473 */
qcom_ice_prepare_key(struct qcom_ice * ice,const u8 * lt_key,size_t lt_key_size,u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])474 int qcom_ice_prepare_key(struct qcom_ice *ice,
475 const u8 *lt_key, size_t lt_key_size,
476 u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
477 {
478 int err;
479
480 err = qcom_scm_prepare_ice_key(lt_key, lt_key_size,
481 eph_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
482 if (err == -EIO || err == -EINVAL)
483 err = -EBADMSG; /* probably invalid key */
484 if (err)
485 return err;
486
487 return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
488 }
489 EXPORT_SYMBOL_GPL(qcom_ice_prepare_key);
490
491 /**
492 * qcom_ice_import_key() - Import a raw key for inline encryption
493 * @ice: ICE driver data
494 * @raw_key: the raw key to import
495 * @raw_key_size: size of @raw_key in bytes
496 * @lt_key: output buffer for the long-term wrapped key
497 *
498 * Use HWKM to import a raw key and return it as a long-term wrapped key.
499 *
500 * Return: the size of the resulting wrapped key on success; -errno on failure.
501 */
qcom_ice_import_key(struct qcom_ice * ice,const u8 * raw_key,size_t raw_key_size,u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])502 int qcom_ice_import_key(struct qcom_ice *ice,
503 const u8 *raw_key, size_t raw_key_size,
504 u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
505 {
506 int err;
507
508 err = qcom_scm_import_ice_key(raw_key, raw_key_size,
509 lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
510 if (err)
511 return err;
512
513 return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
514 }
515 EXPORT_SYMBOL_GPL(qcom_ice_import_key);
516
qcom_ice_create(struct device * dev,void __iomem * base)517 static struct qcom_ice *qcom_ice_create(struct device *dev,
518 void __iomem *base)
519 {
520 struct qcom_ice *engine;
521
522 if (!qcom_scm_is_available())
523 return ERR_PTR(-EPROBE_DEFER);
524
525 if (!qcom_scm_ice_available()) {
526 dev_warn(dev, "ICE SCM interface not found\n");
527 return NULL;
528 }
529
530 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
531 if (!engine)
532 return ERR_PTR(-ENOMEM);
533
534 engine->dev = dev;
535 engine->base = base;
536
537 /*
538 * Legacy DT binding uses different clk names for each consumer,
539 * so lets try those first. If none of those are a match, it means
540 * the we only have one clock and it is part of the dedicated DT node.
541 * Also, enable the clock before we check what HW version the driver
542 * supports.
543 */
544 engine->core_clk = devm_clk_get_optional_enabled(dev, "ice_core_clk");
545 if (!engine->core_clk)
546 engine->core_clk = devm_clk_get_optional_enabled(dev, "ice");
547 if (!engine->core_clk)
548 engine->core_clk = devm_clk_get_enabled(dev, NULL);
549 if (IS_ERR(engine->core_clk))
550 return ERR_CAST(engine->core_clk);
551
552 if (!qcom_ice_check_supported(engine))
553 return ERR_PTR(-EOPNOTSUPP);
554
555 dev_dbg(dev, "Registered Qualcomm Inline Crypto Engine\n");
556
557 return engine;
558 }
559
560 /**
561 * of_qcom_ice_get() - get an ICE instance from a DT node
562 * @dev: device pointer for the consumer device
563 *
564 * This function will provide an ICE instance either by creating one for the
565 * consumer device if its DT node provides the 'ice' reg range and the 'ice'
566 * clock (for legacy DT style). On the other hand, if consumer provides a
567 * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
568 * be created and so this function will return that instead.
569 *
570 * Return: ICE pointer on success, NULL if there is no ICE data provided by the
571 * consumer or ERR_PTR() on error.
572 */
of_qcom_ice_get(struct device * dev)573 static struct qcom_ice *of_qcom_ice_get(struct device *dev)
574 {
575 struct platform_device *pdev = to_platform_device(dev);
576 struct qcom_ice *ice;
577 struct resource *res;
578 void __iomem *base;
579 struct device_link *link;
580
581 if (!dev || !dev->of_node)
582 return ERR_PTR(-ENODEV);
583
584 /*
585 * In order to support legacy style devicetree bindings, we need
586 * to create the ICE instance using the consumer device and the reg
587 * range called 'ice' it provides.
588 */
589 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice");
590 if (res) {
591 base = devm_ioremap_resource(&pdev->dev, res);
592 if (IS_ERR(base))
593 return ERR_CAST(base);
594
595 /* create ICE instance using consumer dev */
596 return qcom_ice_create(&pdev->dev, base);
597 }
598
599 /*
600 * If the consumer node does not provider an 'ice' reg range
601 * (legacy DT binding), then it must at least provide a phandle
602 * to the ICE devicetree node, otherwise ICE is not supported.
603 */
604 struct device_node *node __free(device_node) = of_parse_phandle(dev->of_node,
605 "qcom,ice", 0);
606 if (!node)
607 return NULL;
608
609 pdev = of_find_device_by_node(node);
610 if (!pdev) {
611 dev_err(dev, "Cannot find device node %s\n", node->name);
612 return ERR_PTR(-EPROBE_DEFER);
613 }
614
615 ice = platform_get_drvdata(pdev);
616 if (!ice) {
617 dev_err(dev, "Cannot get ice instance from %s\n",
618 dev_name(&pdev->dev));
619 platform_device_put(pdev);
620 return ERR_PTR(-EPROBE_DEFER);
621 }
622
623 link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
624 if (!link) {
625 dev_err(&pdev->dev,
626 "Failed to create device link to consumer %s\n",
627 dev_name(dev));
628 platform_device_put(pdev);
629 ice = ERR_PTR(-EINVAL);
630 }
631
632 return ice;
633 }
634
qcom_ice_put(const struct qcom_ice * ice)635 static void qcom_ice_put(const struct qcom_ice *ice)
636 {
637 struct platform_device *pdev = to_platform_device(ice->dev);
638
639 if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"))
640 platform_device_put(pdev);
641 }
642
devm_of_qcom_ice_put(struct device * dev,void * res)643 static void devm_of_qcom_ice_put(struct device *dev, void *res)
644 {
645 qcom_ice_put(*(struct qcom_ice **)res);
646 }
647
648 /**
649 * devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from
650 * a DT node.
651 * @dev: device pointer for the consumer device.
652 *
653 * This function will provide an ICE instance either by creating one for the
654 * consumer device if its DT node provides the 'ice' reg range and the 'ice'
655 * clock (for legacy DT style). On the other hand, if consumer provides a
656 * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
657 * be created and so this function will return that instead.
658 *
659 * Return: ICE pointer on success, NULL if there is no ICE data provided by the
660 * consumer or ERR_PTR() on error.
661 */
devm_of_qcom_ice_get(struct device * dev)662 struct qcom_ice *devm_of_qcom_ice_get(struct device *dev)
663 {
664 struct qcom_ice *ice, **dr;
665
666 dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL);
667 if (!dr)
668 return ERR_PTR(-ENOMEM);
669
670 ice = of_qcom_ice_get(dev);
671 if (!IS_ERR_OR_NULL(ice)) {
672 *dr = ice;
673 devres_add(dev, dr);
674 } else {
675 devres_free(dr);
676 }
677
678 return ice;
679 }
680 EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get);
681
qcom_ice_probe(struct platform_device * pdev)682 static int qcom_ice_probe(struct platform_device *pdev)
683 {
684 struct qcom_ice *engine;
685 void __iomem *base;
686
687 base = devm_platform_ioremap_resource(pdev, 0);
688 if (IS_ERR(base)) {
689 dev_warn(&pdev->dev, "ICE registers not found\n");
690 return PTR_ERR(base);
691 }
692
693 engine = qcom_ice_create(&pdev->dev, base);
694 if (IS_ERR(engine))
695 return PTR_ERR(engine);
696
697 platform_set_drvdata(pdev, engine);
698
699 return 0;
700 }
701
702 static const struct of_device_id qcom_ice_of_match_table[] = {
703 { .compatible = "qcom,inline-crypto-engine" },
704 { },
705 };
706 MODULE_DEVICE_TABLE(of, qcom_ice_of_match_table);
707
708 static struct platform_driver qcom_ice_driver = {
709 .probe = qcom_ice_probe,
710 .driver = {
711 .name = "qcom-ice",
712 .of_match_table = qcom_ice_of_match_table,
713 },
714 };
715
716 module_platform_driver(qcom_ice_driver);
717
718 MODULE_DESCRIPTION("Qualcomm Inline Crypto Engine driver");
719 MODULE_LICENSE("GPL");
720