1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
4 * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
5 * Copyright 2024-2025 NXP
6 */
7
8 #define pr_fmt(fmt) "caam blob_gen: " fmt
9
10 #include <linux/bitfield.h>
11 #include <linux/device.h>
12 #include <keys/trusted-type.h>
13 #include <soc/fsl/caam-blob.h>
14
15 #include "compat.h"
16 #include "desc_constr.h"
17 #include "desc.h"
18 #include "error.h"
19 #include "intern.h"
20 #include "jr.h"
21 #include "regs.h"
22
23 #define CAAM_BLOB_DESC_BYTES_MAX \
24 /* Command to initialize & stating length of descriptor */ \
25 (CAAM_CMD_SZ + \
26 /* Command to append the key-modifier + key-modifier data */ \
27 CAAM_CMD_SZ + CAAM_BLOB_KEYMOD_LENGTH + \
28 /* Command to include input key + pointer to the input key */ \
29 CAAM_CMD_SZ + CAAM_PTR_SZ_MAX + \
30 /* Command to include output key + pointer to the output key */ \
31 CAAM_CMD_SZ + CAAM_PTR_SZ_MAX + \
32 /* Command describing the operation to perform */ \
33 CAAM_CMD_SZ)
34
35 struct caam_blob_priv {
36 struct device jrdev;
37 };
38
39 struct caam_blob_job_result {
40 int err;
41 struct completion completion;
42 };
43
caam_blob_job_done(struct device * dev,u32 * desc,u32 err,void * context)44 static void caam_blob_job_done(struct device *dev, u32 *desc, u32 err, void *context)
45 {
46 struct caam_blob_job_result *res = context;
47 int ecode = 0;
48
49 dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
50
51 if (err)
52 ecode = caam_jr_strstatus(dev, err);
53
54 res->err = ecode;
55
56 /*
57 * Upon completion, desc points to a buffer containing a CAAM job
58 * descriptor which encapsulates data into an externally-storable
59 * blob.
60 */
61 complete(&res->completion);
62 }
63
check_caam_state(struct device * jrdev)64 static u32 check_caam_state(struct device *jrdev)
65 {
66 const struct caam_drv_private *ctrlpriv;
67
68 ctrlpriv = dev_get_drvdata(jrdev->parent);
69 return FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
70 }
71
caam_process_blob(struct caam_blob_priv * priv,struct caam_blob_info * info,bool encap)72 int caam_process_blob(struct caam_blob_priv *priv,
73 struct caam_blob_info *info, bool encap)
74 {
75 struct caam_blob_job_result testres;
76 struct device *jrdev = &priv->jrdev;
77 dma_addr_t dma_in, dma_out;
78 int op = OP_PCLID_BLOB;
79 int hwbk_caam_ovhd = 0;
80 size_t output_len;
81 u32 *desc;
82 u32 moo;
83 int ret;
84 int len;
85
86 if (info->key_mod_len > CAAM_BLOB_KEYMOD_LENGTH)
87 return -EINVAL;
88
89 if (encap) {
90 op |= OP_TYPE_ENCAP_PROTOCOL;
91 output_len = info->input_len + CAAM_BLOB_OVERHEAD;
92 } else {
93 op |= OP_TYPE_DECAP_PROTOCOL;
94 output_len = info->input_len - CAAM_BLOB_OVERHEAD;
95 info->output_len = output_len;
96 }
97
98 if (encap && info->pkey_info.is_pkey) {
99 op |= OP_PCL_BLOB_BLACK;
100 if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) {
101 op |= OP_PCL_BLOB_EKT;
102 hwbk_caam_ovhd = CAAM_CCM_OVERHEAD;
103 }
104 if ((info->input_len + hwbk_caam_ovhd) > MAX_KEY_SIZE)
105 return -EINVAL;
106
107 len = info->input_len + hwbk_caam_ovhd;
108 } else {
109 len = info->input_len;
110 }
111
112 desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL);
113 if (!desc)
114 return -ENOMEM;
115
116 dma_in = dma_map_single(jrdev, info->input, len,
117 encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
118 if (dma_mapping_error(jrdev, dma_in)) {
119 dev_err(jrdev, "unable to map input DMA buffer\n");
120 ret = -ENOMEM;
121 goto out_free;
122 }
123
124 dma_out = dma_map_single(jrdev, info->output, output_len,
125 DMA_FROM_DEVICE);
126 if (dma_mapping_error(jrdev, dma_out)) {
127 dev_err(jrdev, "unable to map output DMA buffer\n");
128 ret = -ENOMEM;
129 goto out_unmap_in;
130 }
131
132 moo = check_caam_state(jrdev);
133 if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
134 dev_warn(jrdev,
135 "using insecure test key, enable HAB to use unique device key!\n");
136
137 /*
138 * A data blob is encrypted using a blob key (BK); a random number.
139 * The BK is used as an AES-CCM key. The initial block (B0) and the
140 * initial counter (Ctr0) are generated automatically and stored in
141 * Class 1 Context DWords 0+1+2+3. The random BK is stored in the
142 * Class 1 Key Register. Operation Mode is set to AES-CCM.
143 */
144 init_job_desc(desc, 0);
145
146 if (encap && info->pkey_info.is_pkey) {
147 /*!1. key command used to load class 1 key register
148 * from input plain key.
149 */
150 append_key(desc, dma_in, info->input_len,
151 CLASS_1 | KEY_DEST_CLASS_REG);
152 /*!2. Fifostore to store protected key from class 1 key register. */
153 if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) {
154 append_fifo_store(desc, dma_in, info->input_len,
155 LDST_CLASS_1_CCB |
156 FIFOST_TYPE_KEY_CCM_JKEK);
157 } else {
158 append_fifo_store(desc, dma_in, info->input_len,
159 LDST_CLASS_1_CCB |
160 FIFOST_TYPE_KEY_KEK);
161 }
162 /*
163 * JUMP_OFFSET specifies the offset of the JUMP target from
164 * the JUMP command's address in the descriptor buffer.
165 */
166 append_jump(desc, JUMP_COND_NOP | BIT(0) << JUMP_OFFSET_SHIFT);
167 }
168
169 /*!3. Load class 2 key with key modifier. */
170 append_key_as_imm(desc, info->key_mod, info->key_mod_len,
171 info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG);
172
173 /*!4. SEQ IN PTR Command. */
174 append_seq_in_ptr(desc, dma_in, info->input_len, 0);
175
176 /*!5. SEQ OUT PTR Command. */
177 append_seq_out_ptr(desc, dma_out, output_len, 0);
178
179 /*!6. Blob encapsulation/decapsulation PROTOCOL Command. */
180 append_operation(desc, op);
181
182 print_hex_dump_debug("data@" __stringify(__LINE__)": ",
183 DUMP_PREFIX_ADDRESS, 16, 1, info->input,
184 len, false);
185 print_hex_dump_debug("jobdesc@" __stringify(__LINE__)": ",
186 DUMP_PREFIX_ADDRESS, 16, 1, desc,
187 desc_bytes(desc), false);
188
189 testres.err = 0;
190 init_completion(&testres.completion);
191
192 ret = caam_jr_enqueue(jrdev, desc, caam_blob_job_done, &testres);
193 if (ret == -EINPROGRESS) {
194 wait_for_completion(&testres.completion);
195 ret = testres.err;
196 print_hex_dump_debug("output@" __stringify(__LINE__)": ",
197 DUMP_PREFIX_ADDRESS, 16, 1, info->output,
198 output_len, false);
199 }
200
201 if (ret == 0)
202 info->output_len = output_len;
203
204 dma_unmap_single(jrdev, dma_out, output_len, DMA_FROM_DEVICE);
205 out_unmap_in:
206 dma_unmap_single(jrdev, dma_in, len,
207 encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
208 out_free:
209 kfree(desc);
210 return ret;
211 }
212 EXPORT_SYMBOL(caam_process_blob);
213
caam_blob_gen_init(void)214 struct caam_blob_priv *caam_blob_gen_init(void)
215 {
216 struct caam_drv_private *ctrlpriv;
217 struct device *jrdev;
218
219 /*
220 * caam_blob_gen_init() may expectedly fail with -ENODEV, e.g. when
221 * CAAM driver didn't probe or when SoC lacks BLOB support. An
222 * error would be harsh in this case, so we stick to info level.
223 */
224
225 jrdev = caam_jr_alloc();
226 if (IS_ERR(jrdev)) {
227 pr_info("job ring requested, but none currently available\n");
228 return ERR_PTR(-ENODEV);
229 }
230
231 ctrlpriv = dev_get_drvdata(jrdev->parent);
232 if (!ctrlpriv->blob_present) {
233 dev_info(jrdev, "no hardware blob generation support\n");
234 caam_jr_free(jrdev);
235 return ERR_PTR(-ENODEV);
236 }
237
238 return container_of(jrdev, struct caam_blob_priv, jrdev);
239 }
240 EXPORT_SYMBOL(caam_blob_gen_init);
241
caam_blob_gen_exit(struct caam_blob_priv * priv)242 void caam_blob_gen_exit(struct caam_blob_priv *priv)
243 {
244 caam_jr_free(&priv->jrdev);
245 }
246 EXPORT_SYMBOL(caam_blob_gen_exit);
247