xref: /linux/drivers/crypto/inside-secure/safexcel_ring.c (revision cbecf716ca618fd44feda6bd9a64a8179d031fc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Marvell
4  *
5  * Antoine Tenart <antoine.tenart@free-electrons.com>
6  */
7 
8 #include <linux/dma-mapping.h>
9 #include <linux/spinlock.h>
10 
11 #include "safexcel.h"
12 
safexcel_init_ring_descriptors(struct safexcel_crypto_priv * priv,struct safexcel_desc_ring * cdr,struct safexcel_desc_ring * rdr)13 int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
14 				   struct safexcel_desc_ring *cdr,
15 				   struct safexcel_desc_ring *rdr)
16 {
17 	int i;
18 	struct safexcel_command_desc *cdesc;
19 	dma_addr_t atok;
20 
21 	/* Actual command descriptor ring */
22 	cdr->offset = priv->config.cd_offset;
23 	cdr->base = dmam_alloc_coherent(priv->dev,
24 					cdr->offset * EIP197_DEFAULT_RING_SIZE,
25 					&cdr->base_dma, GFP_KERNEL);
26 	if (!cdr->base)
27 		return -ENOMEM;
28 	cdr->write = cdr->base;
29 	cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
30 	cdr->read = cdr->base;
31 
32 	/* Command descriptor shadow ring for storing additional token data */
33 	cdr->shoffset = priv->config.cdsh_offset;
34 	cdr->shbase = dmam_alloc_coherent(priv->dev,
35 					  cdr->shoffset *
36 					  EIP197_DEFAULT_RING_SIZE,
37 					  &cdr->shbase_dma, GFP_KERNEL);
38 	if (!cdr->shbase)
39 		return -ENOMEM;
40 	cdr->shwrite = cdr->shbase;
41 	cdr->shbase_end = cdr->shbase + cdr->shoffset *
42 					(EIP197_DEFAULT_RING_SIZE - 1);
43 
44 	/*
45 	 * Populate command descriptors with physical pointers to shadow descs.
46 	 * Note that we only need to do this once if we don't overwrite them.
47 	 */
48 	cdesc = cdr->base;
49 	atok = cdr->shbase_dma;
50 	for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) {
51 		cdesc->atok_lo = lower_32_bits(atok);
52 		cdesc->atok_hi = upper_32_bits(atok);
53 		cdesc = (void *)cdesc + cdr->offset;
54 		atok += cdr->shoffset;
55 	}
56 
57 	rdr->offset = priv->config.rd_offset;
58 	/* Use shoffset for result token offset here */
59 	rdr->shoffset = priv->config.res_offset;
60 	rdr->base = dmam_alloc_coherent(priv->dev,
61 					rdr->offset * EIP197_DEFAULT_RING_SIZE,
62 					&rdr->base_dma, GFP_KERNEL);
63 	if (!rdr->base)
64 		return -ENOMEM;
65 	rdr->write = rdr->base;
66 	rdr->base_end = rdr->base + rdr->offset  * (EIP197_DEFAULT_RING_SIZE - 1);
67 	rdr->read = rdr->base;
68 
69 	return 0;
70 }
71 
safexcel_select_ring(struct safexcel_crypto_priv * priv)72 inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
73 {
74 	return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
75 }
76 
safexcel_ring_next_cwptr(struct safexcel_crypto_priv * priv,struct safexcel_desc_ring * ring,bool first,struct safexcel_token ** atoken)77 static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv,
78 				     struct safexcel_desc_ring *ring,
79 				     bool first,
80 				     struct safexcel_token **atoken)
81 {
82 	void *ptr = ring->write;
83 
84 	if (first)
85 		*atoken = ring->shwrite;
86 
87 	if ((ring->write == ring->read - ring->offset) ||
88 	    (ring->read == ring->base && ring->write == ring->base_end))
89 		return ERR_PTR(-ENOMEM);
90 
91 	if (ring->write == ring->base_end) {
92 		ring->write = ring->base;
93 		ring->shwrite = ring->shbase;
94 	} else {
95 		ring->write += ring->offset;
96 		ring->shwrite += ring->shoffset;
97 	}
98 
99 	return ptr;
100 }
101 
safexcel_ring_next_rwptr(struct safexcel_crypto_priv * priv,struct safexcel_desc_ring * ring,struct result_data_desc ** rtoken)102 static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv,
103 				     struct safexcel_desc_ring *ring,
104 				     struct result_data_desc **rtoken)
105 {
106 	void *ptr = ring->write;
107 
108 	/* Result token at relative offset shoffset */
109 	*rtoken = ring->write + ring->shoffset;
110 
111 	if ((ring->write == ring->read - ring->offset) ||
112 	    (ring->read == ring->base && ring->write == ring->base_end))
113 		return ERR_PTR(-ENOMEM);
114 
115 	if (ring->write == ring->base_end)
116 		ring->write = ring->base;
117 	else
118 		ring->write += ring->offset;
119 
120 	return ptr;
121 }
122 
safexcel_ring_next_rptr(struct safexcel_crypto_priv * priv,struct safexcel_desc_ring * ring)123 void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
124 			      struct safexcel_desc_ring *ring)
125 {
126 	void *ptr = ring->read;
127 
128 	if (ring->write == ring->read)
129 		return ERR_PTR(-ENOENT);
130 
131 	if (ring->read == ring->base_end)
132 		ring->read = ring->base;
133 	else
134 		ring->read += ring->offset;
135 
136 	return ptr;
137 }
138 
safexcel_ring_curr_rptr(struct safexcel_crypto_priv * priv,int ring)139 inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
140 				     int ring)
141 {
142 	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
143 
144 	return rdr->read;
145 }
146 
safexcel_ring_first_rdr_index(struct safexcel_crypto_priv * priv,int ring)147 inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
148 					 int ring)
149 {
150 	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
151 
152 	return (rdr->read - rdr->base) / rdr->offset;
153 }
154 
safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv * priv,int ring,struct safexcel_result_desc * rdesc)155 inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
156 					 int ring,
157 					 struct safexcel_result_desc *rdesc)
158 {
159 	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
160 
161 	return ((void *)rdesc - rdr->base) / rdr->offset;
162 }
163 
safexcel_ring_rollback_wptr(struct safexcel_crypto_priv * priv,struct safexcel_desc_ring * ring)164 void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
165 				 struct safexcel_desc_ring *ring)
166 {
167 	if (ring->write == ring->read)
168 		return;
169 
170 	if (ring->write == ring->base) {
171 		ring->write = ring->base_end;
172 		ring->shwrite = ring->shbase_end;
173 	} else {
174 		ring->write -= ring->offset;
175 		ring->shwrite -= ring->shoffset;
176 	}
177 }
178 
safexcel_add_cdesc(struct safexcel_crypto_priv * priv,int ring_id,bool first,bool last,dma_addr_t data,u32 data_len,u32 full_data_len,dma_addr_t context,struct safexcel_token ** atoken)179 struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
180 						 int ring_id,
181 						 bool first, bool last,
182 						 dma_addr_t data, u32 data_len,
183 						 u32 full_data_len,
184 						 dma_addr_t context,
185 						 struct safexcel_token **atoken)
186 {
187 	struct safexcel_command_desc *cdesc;
188 
189 	cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr,
190 					 first, atoken);
191 	if (IS_ERR(cdesc))
192 		return cdesc;
193 
194 	cdesc->particle_size = data_len;
195 	cdesc->rsvd0 = 0;
196 	cdesc->last_seg = last;
197 	cdesc->first_seg = first;
198 	cdesc->additional_cdata_size = 0;
199 	cdesc->rsvd1 = 0;
200 	cdesc->data_lo = lower_32_bits(data);
201 	cdesc->data_hi = upper_32_bits(data);
202 
203 	if (first) {
204 		/*
205 		 * Note that the length here MUST be >0 or else the EIP(1)97
206 		 * may hang. Newer EIP197 firmware actually incorporates this
207 		 * fix already, but that doesn't help the EIP97 and we may
208 		 * also be running older firmware.
209 		 */
210 		cdesc->control_data.packet_length = full_data_len ?: 1;
211 		cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
212 					      EIP197_OPTION_64BIT_CTX |
213 					      EIP197_OPTION_CTX_CTRL_IN_CMD |
214 					      EIP197_OPTION_RC_AUTO;
215 		cdesc->control_data.type = EIP197_TYPE_BCLA;
216 		cdesc->control_data.context_lo = lower_32_bits(context) |
217 						 EIP197_CONTEXT_SMALL;
218 		cdesc->control_data.context_hi = upper_32_bits(context);
219 	}
220 
221 	return cdesc;
222 }
223 
safexcel_add_rdesc(struct safexcel_crypto_priv * priv,int ring_id,bool first,bool last,dma_addr_t data,u32 len)224 struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
225 						int ring_id,
226 						bool first, bool last,
227 						dma_addr_t data, u32 len)
228 {
229 	struct safexcel_result_desc *rdesc;
230 	struct result_data_desc *rtoken;
231 
232 	rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr,
233 					 &rtoken);
234 	if (IS_ERR(rdesc))
235 		return rdesc;
236 
237 	rdesc->particle_size = len;
238 	rdesc->rsvd0 = 0;
239 	rdesc->descriptor_overflow = 1; /* assume error */
240 	rdesc->buffer_overflow = 1;     /* assume error */
241 	rdesc->last_seg = last;
242 	rdesc->first_seg = first;
243 	rdesc->result_size = EIP197_RD64_RESULT_SIZE;
244 	rdesc->rsvd1 = 0;
245 	rdesc->data_lo = lower_32_bits(data);
246 	rdesc->data_hi = upper_32_bits(data);
247 
248 	/* Clear length in result token */
249 	rtoken->packet_length = 0;
250 	/* Assume errors - HW will clear if not the case */
251 	rtoken->error_code = 0x7fff;
252 
253 	return rdesc;
254 }
255