1 // SPDX-License-Identifier: GPL-2.0-only
2 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
3 *
4 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/platform_device.h>
14 #include <linux/cpumask.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/crypto.h>
18 #include <crypto/md5.h>
19 #include <crypto/sha1.h>
20 #include <crypto/sha2.h>
21 #include <crypto/aes.h>
22 #include <crypto/internal/des.h>
23 #include <linux/mutex.h>
24 #include <linux/delay.h>
25 #include <linux/sched.h>
26
27 #include <crypto/internal/hash.h>
28 #include <crypto/internal/skcipher.h>
29 #include <crypto/scatterwalk.h>
30 #include <crypto/algapi.h>
31
32 #include <asm/hypervisor.h>
33 #include <asm/mdesc.h>
34
35 #include "n2_core.h"
36
37 #define DRV_MODULE_NAME "n2_crypto"
38 #define DRV_MODULE_VERSION "0.2"
39 #define DRV_MODULE_RELDATE "July 28, 2011"
40
41 static const char version[] =
42 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
43
44 MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
45 MODULE_DESCRIPTION("Niagara2 Crypto driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(DRV_MODULE_VERSION);
48
49 #define N2_CRA_PRIORITY 200
50
51 static DEFINE_MUTEX(spu_lock);
52
53 struct spu_queue {
54 cpumask_t sharing;
55 unsigned long qhandle;
56
57 spinlock_t lock;
58 u8 q_type;
59 void *q;
60 unsigned long head;
61 unsigned long tail;
62 struct list_head jobs;
63
64 unsigned long devino;
65
66 char irq_name[32];
67 unsigned int irq;
68
69 struct list_head list;
70 };
71
72 struct spu_qreg {
73 struct spu_queue *queue;
74 unsigned long type;
75 };
76
77 static struct spu_queue **cpu_to_cwq;
78 static struct spu_queue **cpu_to_mau;
79
spu_next_offset(struct spu_queue * q,unsigned long off)80 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
81 {
82 if (q->q_type == HV_NCS_QTYPE_MAU) {
83 off += MAU_ENTRY_SIZE;
84 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
85 off = 0;
86 } else {
87 off += CWQ_ENTRY_SIZE;
88 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
89 off = 0;
90 }
91 return off;
92 }
93
94 struct n2_request_common {
95 struct list_head entry;
96 unsigned int offset;
97 };
98 #define OFFSET_NOT_RUNNING (~(unsigned int)0)
99
100 /* An async job request records the final tail value it used in
101 * n2_request_common->offset, test to see if that offset is in
102 * the range old_head, new_head, inclusive.
103 */
job_finished(struct spu_queue * q,unsigned int offset,unsigned long old_head,unsigned long new_head)104 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
105 unsigned long old_head, unsigned long new_head)
106 {
107 if (old_head <= new_head) {
108 if (offset > old_head && offset <= new_head)
109 return true;
110 } else {
111 if (offset > old_head || offset <= new_head)
112 return true;
113 }
114 return false;
115 }
116
117 /* When the HEAD marker is unequal to the actual HEAD, we get
118 * a virtual device INO interrupt. We should process the
119 * completed CWQ entries and adjust the HEAD marker to clear
120 * the IRQ.
121 */
cwq_intr(int irq,void * dev_id)122 static irqreturn_t cwq_intr(int irq, void *dev_id)
123 {
124 unsigned long off, new_head, hv_ret;
125 struct spu_queue *q = dev_id;
126
127 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
128 smp_processor_id(), q->qhandle);
129
130 spin_lock(&q->lock);
131
132 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
133
134 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
135 smp_processor_id(), new_head, hv_ret);
136
137 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
138 /* XXX ... XXX */
139 }
140
141 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
142 if (hv_ret == HV_EOK)
143 q->head = new_head;
144
145 spin_unlock(&q->lock);
146
147 return IRQ_HANDLED;
148 }
149
mau_intr(int irq,void * dev_id)150 static irqreturn_t mau_intr(int irq, void *dev_id)
151 {
152 struct spu_queue *q = dev_id;
153 unsigned long head, hv_ret;
154
155 spin_lock(&q->lock);
156
157 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
158 smp_processor_id(), q->qhandle);
159
160 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
161
162 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
163 smp_processor_id(), head, hv_ret);
164
165 sun4v_ncs_sethead_marker(q->qhandle, head);
166
167 spin_unlock(&q->lock);
168
169 return IRQ_HANDLED;
170 }
171
spu_queue_next(struct spu_queue * q,void * cur)172 static void *spu_queue_next(struct spu_queue *q, void *cur)
173 {
174 return q->q + spu_next_offset(q, cur - q->q);
175 }
176
spu_queue_num_free(struct spu_queue * q)177 static int spu_queue_num_free(struct spu_queue *q)
178 {
179 unsigned long head = q->head;
180 unsigned long tail = q->tail;
181 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
182 unsigned long diff;
183
184 if (head > tail)
185 diff = head - tail;
186 else
187 diff = (end - tail) + head;
188
189 return (diff / CWQ_ENTRY_SIZE) - 1;
190 }
191
spu_queue_alloc(struct spu_queue * q,int num_entries)192 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
193 {
194 int avail = spu_queue_num_free(q);
195
196 if (avail >= num_entries)
197 return q->q + q->tail;
198
199 return NULL;
200 }
201
spu_queue_submit(struct spu_queue * q,void * last)202 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
203 {
204 unsigned long hv_ret, new_tail;
205
206 new_tail = spu_next_offset(q, last - q->q);
207
208 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
209 if (hv_ret == HV_EOK)
210 q->tail = new_tail;
211 return hv_ret;
212 }
213
control_word_base(unsigned int len,unsigned int hmac_key_len,int enc_type,int auth_type,unsigned int hash_len,bool sfas,bool sob,bool eob,bool encrypt,int opcode)214 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
215 int enc_type, int auth_type,
216 unsigned int hash_len,
217 bool sfas, bool sob, bool eob, bool encrypt,
218 int opcode)
219 {
220 u64 word = (len - 1) & CONTROL_LEN;
221
222 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
223 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
224 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
225 if (sfas)
226 word |= CONTROL_STORE_FINAL_AUTH_STATE;
227 if (sob)
228 word |= CONTROL_START_OF_BLOCK;
229 if (eob)
230 word |= CONTROL_END_OF_BLOCK;
231 if (encrypt)
232 word |= CONTROL_ENCRYPT;
233 if (hmac_key_len)
234 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
235 if (hash_len)
236 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
237
238 return word;
239 }
240
241 #if 0
242 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
243 {
244 if (this_len >= 64 ||
245 qp->head != qp->tail)
246 return true;
247 return false;
248 }
249 #endif
250
251 struct n2_ahash_alg {
252 struct list_head entry;
253 const u8 *hash_zero;
254 const u8 *hash_init;
255 u8 hw_op_hashsz;
256 u8 digest_size;
257 u8 auth_type;
258 u8 hmac_type;
259 struct ahash_alg alg;
260 };
261
n2_ahash_alg(struct crypto_tfm * tfm)262 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
263 {
264 struct crypto_alg *alg = tfm->__crt_alg;
265 struct ahash_alg *ahash_alg;
266
267 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
268
269 return container_of(ahash_alg, struct n2_ahash_alg, alg);
270 }
271
272 struct n2_hmac_alg {
273 const char *child_alg;
274 struct n2_ahash_alg derived;
275 };
276
n2_hmac_alg(struct crypto_tfm * tfm)277 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
278 {
279 struct crypto_alg *alg = tfm->__crt_alg;
280 struct ahash_alg *ahash_alg;
281
282 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
283
284 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
285 }
286
287 struct n2_hash_ctx {
288 struct crypto_ahash *fallback_tfm;
289 };
290
291 #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
292
293 struct n2_hmac_ctx {
294 struct n2_hash_ctx base;
295
296 struct crypto_shash *child_shash;
297
298 int hash_key_len;
299 unsigned char hash_key[N2_HASH_KEY_MAX];
300 };
301
302 struct n2_hash_req_ctx {
303 union {
304 struct md5_state md5;
305 struct sha1_state sha1;
306 struct sha256_state sha256;
307 } u;
308
309 struct ahash_request fallback_req;
310 };
311
n2_hash_async_init(struct ahash_request * req)312 static int n2_hash_async_init(struct ahash_request *req)
313 {
314 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
315 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
316 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
317
318 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
319 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
320
321 return crypto_ahash_init(&rctx->fallback_req);
322 }
323
n2_hash_async_update(struct ahash_request * req)324 static int n2_hash_async_update(struct ahash_request *req)
325 {
326 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
327 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
328 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
329
330 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
331 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
332 rctx->fallback_req.nbytes = req->nbytes;
333 rctx->fallback_req.src = req->src;
334
335 return crypto_ahash_update(&rctx->fallback_req);
336 }
337
n2_hash_async_final(struct ahash_request * req)338 static int n2_hash_async_final(struct ahash_request *req)
339 {
340 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
341 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
342 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
343
344 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
345 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
346 rctx->fallback_req.result = req->result;
347
348 return crypto_ahash_final(&rctx->fallback_req);
349 }
350
n2_hash_async_finup(struct ahash_request * req)351 static int n2_hash_async_finup(struct ahash_request *req)
352 {
353 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
354 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
355 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
356
357 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
358 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
359 rctx->fallback_req.nbytes = req->nbytes;
360 rctx->fallback_req.src = req->src;
361 rctx->fallback_req.result = req->result;
362
363 return crypto_ahash_finup(&rctx->fallback_req);
364 }
365
n2_hash_async_noimport(struct ahash_request * req,const void * in)366 static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
367 {
368 return -ENOSYS;
369 }
370
n2_hash_async_noexport(struct ahash_request * req,void * out)371 static int n2_hash_async_noexport(struct ahash_request *req, void *out)
372 {
373 return -ENOSYS;
374 }
375
n2_hash_cra_init(struct crypto_tfm * tfm)376 static int n2_hash_cra_init(struct crypto_tfm *tfm)
377 {
378 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
379 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
380 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
381 struct crypto_ahash *fallback_tfm;
382 int err;
383
384 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
385 CRYPTO_ALG_NEED_FALLBACK);
386 if (IS_ERR(fallback_tfm)) {
387 pr_warn("Fallback driver '%s' could not be loaded!\n",
388 fallback_driver_name);
389 err = PTR_ERR(fallback_tfm);
390 goto out;
391 }
392
393 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
394 crypto_ahash_reqsize(fallback_tfm)));
395
396 ctx->fallback_tfm = fallback_tfm;
397 return 0;
398
399 out:
400 return err;
401 }
402
n2_hash_cra_exit(struct crypto_tfm * tfm)403 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
404 {
405 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
406 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
407
408 crypto_free_ahash(ctx->fallback_tfm);
409 }
410
n2_hmac_cra_init(struct crypto_tfm * tfm)411 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
412 {
413 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
414 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
415 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
416 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
417 struct crypto_ahash *fallback_tfm;
418 struct crypto_shash *child_shash;
419 int err;
420
421 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
422 CRYPTO_ALG_NEED_FALLBACK);
423 if (IS_ERR(fallback_tfm)) {
424 pr_warn("Fallback driver '%s' could not be loaded!\n",
425 fallback_driver_name);
426 err = PTR_ERR(fallback_tfm);
427 goto out;
428 }
429
430 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
431 if (IS_ERR(child_shash)) {
432 pr_warn("Child shash '%s' could not be loaded!\n",
433 n2alg->child_alg);
434 err = PTR_ERR(child_shash);
435 goto out_free_fallback;
436 }
437
438 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
439 crypto_ahash_reqsize(fallback_tfm)));
440
441 ctx->child_shash = child_shash;
442 ctx->base.fallback_tfm = fallback_tfm;
443 return 0;
444
445 out_free_fallback:
446 crypto_free_ahash(fallback_tfm);
447
448 out:
449 return err;
450 }
451
n2_hmac_cra_exit(struct crypto_tfm * tfm)452 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
453 {
454 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
455 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
456
457 crypto_free_ahash(ctx->base.fallback_tfm);
458 crypto_free_shash(ctx->child_shash);
459 }
460
n2_hmac_async_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)461 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
462 unsigned int keylen)
463 {
464 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
465 struct crypto_shash *child_shash = ctx->child_shash;
466 struct crypto_ahash *fallback_tfm;
467 int err, bs, ds;
468
469 fallback_tfm = ctx->base.fallback_tfm;
470 err = crypto_ahash_setkey(fallback_tfm, key, keylen);
471 if (err)
472 return err;
473
474 bs = crypto_shash_blocksize(child_shash);
475 ds = crypto_shash_digestsize(child_shash);
476 BUG_ON(ds > N2_HASH_KEY_MAX);
477 if (keylen > bs) {
478 err = crypto_shash_tfm_digest(child_shash, key, keylen,
479 ctx->hash_key);
480 if (err)
481 return err;
482 keylen = ds;
483 } else if (keylen <= N2_HASH_KEY_MAX)
484 memcpy(ctx->hash_key, key, keylen);
485
486 ctx->hash_key_len = keylen;
487
488 return err;
489 }
490
wait_for_tail(struct spu_queue * qp)491 static unsigned long wait_for_tail(struct spu_queue *qp)
492 {
493 unsigned long head, hv_ret;
494
495 do {
496 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
497 if (hv_ret != HV_EOK) {
498 pr_err("Hypervisor error on gethead\n");
499 break;
500 }
501 if (head == qp->tail) {
502 qp->head = head;
503 break;
504 }
505 } while (1);
506 return hv_ret;
507 }
508
submit_and_wait_for_tail(struct spu_queue * qp,struct cwq_initial_entry * ent)509 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
510 struct cwq_initial_entry *ent)
511 {
512 unsigned long hv_ret = spu_queue_submit(qp, ent);
513
514 if (hv_ret == HV_EOK)
515 hv_ret = wait_for_tail(qp);
516
517 return hv_ret;
518 }
519
n2_do_async_digest(struct ahash_request * req,unsigned int auth_type,unsigned int digest_size,unsigned int result_size,void * hash_loc,unsigned long auth_key,unsigned int auth_key_len)520 static int n2_do_async_digest(struct ahash_request *req,
521 unsigned int auth_type, unsigned int digest_size,
522 unsigned int result_size, void *hash_loc,
523 unsigned long auth_key, unsigned int auth_key_len)
524 {
525 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
526 struct cwq_initial_entry *ent;
527 struct crypto_hash_walk walk;
528 struct spu_queue *qp;
529 unsigned long flags;
530 int err = -ENODEV;
531 int nbytes, cpu;
532
533 /* The total effective length of the operation may not
534 * exceed 2^16.
535 */
536 if (unlikely(req->nbytes > (1 << 16))) {
537 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
538 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
539
540 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
541 rctx->fallback_req.base.flags =
542 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
543 rctx->fallback_req.nbytes = req->nbytes;
544 rctx->fallback_req.src = req->src;
545 rctx->fallback_req.result = req->result;
546
547 return crypto_ahash_digest(&rctx->fallback_req);
548 }
549
550 nbytes = crypto_hash_walk_first(req, &walk);
551
552 cpu = get_cpu();
553 qp = cpu_to_cwq[cpu];
554 if (!qp)
555 goto out;
556
557 spin_lock_irqsave(&qp->lock, flags);
558
559 /* XXX can do better, improve this later by doing a by-hand scatterlist
560 * XXX walk, etc.
561 */
562 ent = qp->q + qp->tail;
563
564 ent->control = control_word_base(nbytes, auth_key_len, 0,
565 auth_type, digest_size,
566 false, true, false, false,
567 OPCODE_INPLACE_BIT |
568 OPCODE_AUTH_MAC);
569 ent->src_addr = __pa(walk.data);
570 ent->auth_key_addr = auth_key;
571 ent->auth_iv_addr = __pa(hash_loc);
572 ent->final_auth_state_addr = 0UL;
573 ent->enc_key_addr = 0UL;
574 ent->enc_iv_addr = 0UL;
575 ent->dest_addr = __pa(hash_loc);
576
577 nbytes = crypto_hash_walk_done(&walk, 0);
578 while (nbytes > 0) {
579 ent = spu_queue_next(qp, ent);
580
581 ent->control = (nbytes - 1);
582 ent->src_addr = __pa(walk.data);
583 ent->auth_key_addr = 0UL;
584 ent->auth_iv_addr = 0UL;
585 ent->final_auth_state_addr = 0UL;
586 ent->enc_key_addr = 0UL;
587 ent->enc_iv_addr = 0UL;
588 ent->dest_addr = 0UL;
589
590 nbytes = crypto_hash_walk_done(&walk, 0);
591 }
592 ent->control |= CONTROL_END_OF_BLOCK;
593
594 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
595 err = -EINVAL;
596 else
597 err = 0;
598
599 spin_unlock_irqrestore(&qp->lock, flags);
600
601 if (!err)
602 memcpy(req->result, hash_loc, result_size);
603 out:
604 put_cpu();
605
606 return err;
607 }
608
n2_hash_async_digest(struct ahash_request * req)609 static int n2_hash_async_digest(struct ahash_request *req)
610 {
611 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
612 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
613 int ds;
614
615 ds = n2alg->digest_size;
616 if (unlikely(req->nbytes == 0)) {
617 memcpy(req->result, n2alg->hash_zero, ds);
618 return 0;
619 }
620 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
621
622 return n2_do_async_digest(req, n2alg->auth_type,
623 n2alg->hw_op_hashsz, ds,
624 &rctx->u, 0UL, 0);
625 }
626
n2_hmac_async_digest(struct ahash_request * req)627 static int n2_hmac_async_digest(struct ahash_request *req)
628 {
629 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
630 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
631 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
632 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
633 int ds;
634
635 ds = n2alg->derived.digest_size;
636 if (unlikely(req->nbytes == 0) ||
637 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
638 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
639 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
640
641 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
642 rctx->fallback_req.base.flags =
643 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
644 rctx->fallback_req.nbytes = req->nbytes;
645 rctx->fallback_req.src = req->src;
646 rctx->fallback_req.result = req->result;
647
648 return crypto_ahash_digest(&rctx->fallback_req);
649 }
650 memcpy(&rctx->u, n2alg->derived.hash_init,
651 n2alg->derived.hw_op_hashsz);
652
653 return n2_do_async_digest(req, n2alg->derived.hmac_type,
654 n2alg->derived.hw_op_hashsz, ds,
655 &rctx->u,
656 __pa(&ctx->hash_key),
657 ctx->hash_key_len);
658 }
659
660 struct n2_skcipher_context {
661 int key_len;
662 int enc_type;
663 union {
664 u8 aes[AES_MAX_KEY_SIZE];
665 u8 des[DES_KEY_SIZE];
666 u8 des3[3 * DES_KEY_SIZE];
667 } key;
668 };
669
670 #define N2_CHUNK_ARR_LEN 16
671
672 struct n2_crypto_chunk {
673 struct list_head entry;
674 unsigned long iv_paddr : 44;
675 unsigned long arr_len : 20;
676 unsigned long dest_paddr;
677 unsigned long dest_final;
678 struct {
679 unsigned long src_paddr : 44;
680 unsigned long src_len : 20;
681 } arr[N2_CHUNK_ARR_LEN];
682 };
683
684 struct n2_request_context {
685 struct skcipher_walk walk;
686 struct list_head chunk_list;
687 struct n2_crypto_chunk chunk;
688 u8 temp_iv[16];
689 };
690
691 /* The SPU allows some level of flexibility for partial cipher blocks
692 * being specified in a descriptor.
693 *
694 * It merely requires that every descriptor's length field is at least
695 * as large as the cipher block size. This means that a cipher block
696 * can span at most 2 descriptors. However, this does not allow a
697 * partial block to span into the final descriptor as that would
698 * violate the rule (since every descriptor's length must be at lest
699 * the block size). So, for example, assuming an 8 byte block size:
700 *
701 * 0xe --> 0xa --> 0x8
702 *
703 * is a valid length sequence, whereas:
704 *
705 * 0xe --> 0xb --> 0x7
706 *
707 * is not a valid sequence.
708 */
709
710 struct n2_skcipher_alg {
711 struct list_head entry;
712 u8 enc_type;
713 struct skcipher_alg skcipher;
714 };
715
n2_skcipher_alg(struct crypto_skcipher * tfm)716 static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
717 {
718 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
719
720 return container_of(alg, struct n2_skcipher_alg, skcipher);
721 }
722
n2_aes_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)723 static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
724 unsigned int keylen)
725 {
726 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
727 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
728 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
729
730 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
731
732 switch (keylen) {
733 case AES_KEYSIZE_128:
734 ctx->enc_type |= ENC_TYPE_ALG_AES128;
735 break;
736 case AES_KEYSIZE_192:
737 ctx->enc_type |= ENC_TYPE_ALG_AES192;
738 break;
739 case AES_KEYSIZE_256:
740 ctx->enc_type |= ENC_TYPE_ALG_AES256;
741 break;
742 default:
743 return -EINVAL;
744 }
745
746 ctx->key_len = keylen;
747 memcpy(ctx->key.aes, key, keylen);
748 return 0;
749 }
750
n2_des_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)751 static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
752 unsigned int keylen)
753 {
754 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
755 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
756 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
757 int err;
758
759 err = verify_skcipher_des_key(skcipher, key);
760 if (err)
761 return err;
762
763 ctx->enc_type = n2alg->enc_type;
764
765 ctx->key_len = keylen;
766 memcpy(ctx->key.des, key, keylen);
767 return 0;
768 }
769
n2_3des_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)770 static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
771 unsigned int keylen)
772 {
773 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
774 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
775 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
776 int err;
777
778 err = verify_skcipher_des3_key(skcipher, key);
779 if (err)
780 return err;
781
782 ctx->enc_type = n2alg->enc_type;
783
784 ctx->key_len = keylen;
785 memcpy(ctx->key.des3, key, keylen);
786 return 0;
787 }
788
skcipher_descriptor_len(int nbytes,unsigned int block_size)789 static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
790 {
791 int this_len = nbytes;
792
793 this_len -= (nbytes & (block_size - 1));
794 return this_len > (1 << 16) ? (1 << 16) : this_len;
795 }
796
__n2_crypt_chunk(struct crypto_skcipher * skcipher,struct n2_crypto_chunk * cp,struct spu_queue * qp,bool encrypt)797 static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
798 struct n2_crypto_chunk *cp,
799 struct spu_queue *qp, bool encrypt)
800 {
801 struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
802 struct cwq_initial_entry *ent;
803 bool in_place;
804 int i;
805
806 ent = spu_queue_alloc(qp, cp->arr_len);
807 if (!ent) {
808 pr_info("queue_alloc() of %d fails\n",
809 cp->arr_len);
810 return -EBUSY;
811 }
812
813 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
814
815 ent->control = control_word_base(cp->arr[0].src_len,
816 0, ctx->enc_type, 0, 0,
817 false, true, false, encrypt,
818 OPCODE_ENCRYPT |
819 (in_place ? OPCODE_INPLACE_BIT : 0));
820 ent->src_addr = cp->arr[0].src_paddr;
821 ent->auth_key_addr = 0UL;
822 ent->auth_iv_addr = 0UL;
823 ent->final_auth_state_addr = 0UL;
824 ent->enc_key_addr = __pa(&ctx->key);
825 ent->enc_iv_addr = cp->iv_paddr;
826 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
827
828 for (i = 1; i < cp->arr_len; i++) {
829 ent = spu_queue_next(qp, ent);
830
831 ent->control = cp->arr[i].src_len - 1;
832 ent->src_addr = cp->arr[i].src_paddr;
833 ent->auth_key_addr = 0UL;
834 ent->auth_iv_addr = 0UL;
835 ent->final_auth_state_addr = 0UL;
836 ent->enc_key_addr = 0UL;
837 ent->enc_iv_addr = 0UL;
838 ent->dest_addr = 0UL;
839 }
840 ent->control |= CONTROL_END_OF_BLOCK;
841
842 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
843 }
844
n2_compute_chunks(struct skcipher_request * req)845 static int n2_compute_chunks(struct skcipher_request *req)
846 {
847 struct n2_request_context *rctx = skcipher_request_ctx(req);
848 struct skcipher_walk *walk = &rctx->walk;
849 struct n2_crypto_chunk *chunk;
850 unsigned long dest_prev;
851 unsigned int tot_len;
852 bool prev_in_place;
853 int err, nbytes;
854
855 err = skcipher_walk_async(walk, req);
856 if (err)
857 return err;
858
859 INIT_LIST_HEAD(&rctx->chunk_list);
860
861 chunk = &rctx->chunk;
862 INIT_LIST_HEAD(&chunk->entry);
863
864 chunk->iv_paddr = 0UL;
865 chunk->arr_len = 0;
866 chunk->dest_paddr = 0UL;
867
868 prev_in_place = false;
869 dest_prev = ~0UL;
870 tot_len = 0;
871
872 while ((nbytes = walk->nbytes) != 0) {
873 unsigned long dest_paddr, src_paddr;
874 bool in_place;
875 int this_len;
876
877 src_paddr = (page_to_phys(walk->src.phys.page) +
878 walk->src.phys.offset);
879 dest_paddr = (page_to_phys(walk->dst.phys.page) +
880 walk->dst.phys.offset);
881 in_place = (src_paddr == dest_paddr);
882 this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
883
884 if (chunk->arr_len != 0) {
885 if (in_place != prev_in_place ||
886 (!prev_in_place &&
887 dest_paddr != dest_prev) ||
888 chunk->arr_len == N2_CHUNK_ARR_LEN ||
889 tot_len + this_len > (1 << 16)) {
890 chunk->dest_final = dest_prev;
891 list_add_tail(&chunk->entry,
892 &rctx->chunk_list);
893 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
894 if (!chunk) {
895 err = -ENOMEM;
896 break;
897 }
898 INIT_LIST_HEAD(&chunk->entry);
899 }
900 }
901 if (chunk->arr_len == 0) {
902 chunk->dest_paddr = dest_paddr;
903 tot_len = 0;
904 }
905 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
906 chunk->arr[chunk->arr_len].src_len = this_len;
907 chunk->arr_len++;
908
909 dest_prev = dest_paddr + this_len;
910 prev_in_place = in_place;
911 tot_len += this_len;
912
913 err = skcipher_walk_done(walk, nbytes - this_len);
914 if (err)
915 break;
916 }
917 if (!err && chunk->arr_len != 0) {
918 chunk->dest_final = dest_prev;
919 list_add_tail(&chunk->entry, &rctx->chunk_list);
920 }
921
922 return err;
923 }
924
n2_chunk_complete(struct skcipher_request * req,void * final_iv)925 static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
926 {
927 struct n2_request_context *rctx = skcipher_request_ctx(req);
928 struct n2_crypto_chunk *c, *tmp;
929
930 if (final_iv)
931 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
932
933 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
934 list_del(&c->entry);
935 if (unlikely(c != &rctx->chunk))
936 kfree(c);
937 }
938
939 }
940
n2_do_ecb(struct skcipher_request * req,bool encrypt)941 static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
942 {
943 struct n2_request_context *rctx = skcipher_request_ctx(req);
944 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
945 int err = n2_compute_chunks(req);
946 struct n2_crypto_chunk *c, *tmp;
947 unsigned long flags, hv_ret;
948 struct spu_queue *qp;
949
950 if (err)
951 return err;
952
953 qp = cpu_to_cwq[get_cpu()];
954 err = -ENODEV;
955 if (!qp)
956 goto out;
957
958 spin_lock_irqsave(&qp->lock, flags);
959
960 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
961 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
962 if (err)
963 break;
964 list_del(&c->entry);
965 if (unlikely(c != &rctx->chunk))
966 kfree(c);
967 }
968 if (!err) {
969 hv_ret = wait_for_tail(qp);
970 if (hv_ret != HV_EOK)
971 err = -EINVAL;
972 }
973
974 spin_unlock_irqrestore(&qp->lock, flags);
975
976 out:
977 put_cpu();
978
979 n2_chunk_complete(req, NULL);
980 return err;
981 }
982
n2_encrypt_ecb(struct skcipher_request * req)983 static int n2_encrypt_ecb(struct skcipher_request *req)
984 {
985 return n2_do_ecb(req, true);
986 }
987
n2_decrypt_ecb(struct skcipher_request * req)988 static int n2_decrypt_ecb(struct skcipher_request *req)
989 {
990 return n2_do_ecb(req, false);
991 }
992
n2_do_chaining(struct skcipher_request * req,bool encrypt)993 static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
994 {
995 struct n2_request_context *rctx = skcipher_request_ctx(req);
996 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
997 unsigned long flags, hv_ret, iv_paddr;
998 int err = n2_compute_chunks(req);
999 struct n2_crypto_chunk *c, *tmp;
1000 struct spu_queue *qp;
1001 void *final_iv_addr;
1002
1003 final_iv_addr = NULL;
1004
1005 if (err)
1006 return err;
1007
1008 qp = cpu_to_cwq[get_cpu()];
1009 err = -ENODEV;
1010 if (!qp)
1011 goto out;
1012
1013 spin_lock_irqsave(&qp->lock, flags);
1014
1015 if (encrypt) {
1016 iv_paddr = __pa(rctx->walk.iv);
1017 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1018 entry) {
1019 c->iv_paddr = iv_paddr;
1020 err = __n2_crypt_chunk(tfm, c, qp, true);
1021 if (err)
1022 break;
1023 iv_paddr = c->dest_final - rctx->walk.blocksize;
1024 list_del(&c->entry);
1025 if (unlikely(c != &rctx->chunk))
1026 kfree(c);
1027 }
1028 final_iv_addr = __va(iv_paddr);
1029 } else {
1030 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1031 entry) {
1032 if (c == &rctx->chunk) {
1033 iv_paddr = __pa(rctx->walk.iv);
1034 } else {
1035 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1036 tmp->arr[tmp->arr_len-1].src_len -
1037 rctx->walk.blocksize);
1038 }
1039 if (!final_iv_addr) {
1040 unsigned long pa;
1041
1042 pa = (c->arr[c->arr_len-1].src_paddr +
1043 c->arr[c->arr_len-1].src_len -
1044 rctx->walk.blocksize);
1045 final_iv_addr = rctx->temp_iv;
1046 memcpy(rctx->temp_iv, __va(pa),
1047 rctx->walk.blocksize);
1048 }
1049 c->iv_paddr = iv_paddr;
1050 err = __n2_crypt_chunk(tfm, c, qp, false);
1051 if (err)
1052 break;
1053 list_del(&c->entry);
1054 if (unlikely(c != &rctx->chunk))
1055 kfree(c);
1056 }
1057 }
1058 if (!err) {
1059 hv_ret = wait_for_tail(qp);
1060 if (hv_ret != HV_EOK)
1061 err = -EINVAL;
1062 }
1063
1064 spin_unlock_irqrestore(&qp->lock, flags);
1065
1066 out:
1067 put_cpu();
1068
1069 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1070 return err;
1071 }
1072
n2_encrypt_chaining(struct skcipher_request * req)1073 static int n2_encrypt_chaining(struct skcipher_request *req)
1074 {
1075 return n2_do_chaining(req, true);
1076 }
1077
n2_decrypt_chaining(struct skcipher_request * req)1078 static int n2_decrypt_chaining(struct skcipher_request *req)
1079 {
1080 return n2_do_chaining(req, false);
1081 }
1082
1083 struct n2_skcipher_tmpl {
1084 const char *name;
1085 const char *drv_name;
1086 u8 block_size;
1087 u8 enc_type;
1088 struct skcipher_alg skcipher;
1089 };
1090
1091 static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
1092 /* DES: ECB CBC and CFB are supported */
1093 { .name = "ecb(des)",
1094 .drv_name = "ecb-des",
1095 .block_size = DES_BLOCK_SIZE,
1096 .enc_type = (ENC_TYPE_ALG_DES |
1097 ENC_TYPE_CHAINING_ECB),
1098 .skcipher = {
1099 .min_keysize = DES_KEY_SIZE,
1100 .max_keysize = DES_KEY_SIZE,
1101 .setkey = n2_des_setkey,
1102 .encrypt = n2_encrypt_ecb,
1103 .decrypt = n2_decrypt_ecb,
1104 },
1105 },
1106 { .name = "cbc(des)",
1107 .drv_name = "cbc-des",
1108 .block_size = DES_BLOCK_SIZE,
1109 .enc_type = (ENC_TYPE_ALG_DES |
1110 ENC_TYPE_CHAINING_CBC),
1111 .skcipher = {
1112 .ivsize = DES_BLOCK_SIZE,
1113 .min_keysize = DES_KEY_SIZE,
1114 .max_keysize = DES_KEY_SIZE,
1115 .setkey = n2_des_setkey,
1116 .encrypt = n2_encrypt_chaining,
1117 .decrypt = n2_decrypt_chaining,
1118 },
1119 },
1120
1121 /* 3DES: ECB CBC and CFB are supported */
1122 { .name = "ecb(des3_ede)",
1123 .drv_name = "ecb-3des",
1124 .block_size = DES_BLOCK_SIZE,
1125 .enc_type = (ENC_TYPE_ALG_3DES |
1126 ENC_TYPE_CHAINING_ECB),
1127 .skcipher = {
1128 .min_keysize = 3 * DES_KEY_SIZE,
1129 .max_keysize = 3 * DES_KEY_SIZE,
1130 .setkey = n2_3des_setkey,
1131 .encrypt = n2_encrypt_ecb,
1132 .decrypt = n2_decrypt_ecb,
1133 },
1134 },
1135 { .name = "cbc(des3_ede)",
1136 .drv_name = "cbc-3des",
1137 .block_size = DES_BLOCK_SIZE,
1138 .enc_type = (ENC_TYPE_ALG_3DES |
1139 ENC_TYPE_CHAINING_CBC),
1140 .skcipher = {
1141 .ivsize = DES_BLOCK_SIZE,
1142 .min_keysize = 3 * DES_KEY_SIZE,
1143 .max_keysize = 3 * DES_KEY_SIZE,
1144 .setkey = n2_3des_setkey,
1145 .encrypt = n2_encrypt_chaining,
1146 .decrypt = n2_decrypt_chaining,
1147 },
1148 },
1149
1150 /* AES: ECB CBC and CTR are supported */
1151 { .name = "ecb(aes)",
1152 .drv_name = "ecb-aes",
1153 .block_size = AES_BLOCK_SIZE,
1154 .enc_type = (ENC_TYPE_ALG_AES128 |
1155 ENC_TYPE_CHAINING_ECB),
1156 .skcipher = {
1157 .min_keysize = AES_MIN_KEY_SIZE,
1158 .max_keysize = AES_MAX_KEY_SIZE,
1159 .setkey = n2_aes_setkey,
1160 .encrypt = n2_encrypt_ecb,
1161 .decrypt = n2_decrypt_ecb,
1162 },
1163 },
1164 { .name = "cbc(aes)",
1165 .drv_name = "cbc-aes",
1166 .block_size = AES_BLOCK_SIZE,
1167 .enc_type = (ENC_TYPE_ALG_AES128 |
1168 ENC_TYPE_CHAINING_CBC),
1169 .skcipher = {
1170 .ivsize = AES_BLOCK_SIZE,
1171 .min_keysize = AES_MIN_KEY_SIZE,
1172 .max_keysize = AES_MAX_KEY_SIZE,
1173 .setkey = n2_aes_setkey,
1174 .encrypt = n2_encrypt_chaining,
1175 .decrypt = n2_decrypt_chaining,
1176 },
1177 },
1178 { .name = "ctr(aes)",
1179 .drv_name = "ctr-aes",
1180 .block_size = AES_BLOCK_SIZE,
1181 .enc_type = (ENC_TYPE_ALG_AES128 |
1182 ENC_TYPE_CHAINING_COUNTER),
1183 .skcipher = {
1184 .ivsize = AES_BLOCK_SIZE,
1185 .min_keysize = AES_MIN_KEY_SIZE,
1186 .max_keysize = AES_MAX_KEY_SIZE,
1187 .setkey = n2_aes_setkey,
1188 .encrypt = n2_encrypt_chaining,
1189 .decrypt = n2_encrypt_chaining,
1190 },
1191 },
1192
1193 };
1194 #define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
1195
1196 static LIST_HEAD(skcipher_algs);
1197
1198 struct n2_hash_tmpl {
1199 const char *name;
1200 const u8 *hash_zero;
1201 const u8 *hash_init;
1202 u8 hw_op_hashsz;
1203 u8 digest_size;
1204 u8 statesize;
1205 u8 block_size;
1206 u8 auth_type;
1207 u8 hmac_type;
1208 };
1209
1210 static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
1211 cpu_to_le32(MD5_H0),
1212 cpu_to_le32(MD5_H1),
1213 cpu_to_le32(MD5_H2),
1214 cpu_to_le32(MD5_H3),
1215 };
1216 static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
1217 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1218 };
1219 static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
1220 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1221 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1222 };
1223 static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
1224 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1225 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1226 };
1227
1228 static const struct n2_hash_tmpl hash_tmpls[] = {
1229 { .name = "md5",
1230 .hash_zero = md5_zero_message_hash,
1231 .hash_init = (u8 *)n2_md5_init,
1232 .auth_type = AUTH_TYPE_MD5,
1233 .hmac_type = AUTH_TYPE_HMAC_MD5,
1234 .hw_op_hashsz = MD5_DIGEST_SIZE,
1235 .digest_size = MD5_DIGEST_SIZE,
1236 .statesize = sizeof(struct md5_state),
1237 .block_size = MD5_HMAC_BLOCK_SIZE },
1238 { .name = "sha1",
1239 .hash_zero = sha1_zero_message_hash,
1240 .hash_init = (u8 *)n2_sha1_init,
1241 .auth_type = AUTH_TYPE_SHA1,
1242 .hmac_type = AUTH_TYPE_HMAC_SHA1,
1243 .hw_op_hashsz = SHA1_DIGEST_SIZE,
1244 .digest_size = SHA1_DIGEST_SIZE,
1245 .statesize = sizeof(struct sha1_state),
1246 .block_size = SHA1_BLOCK_SIZE },
1247 { .name = "sha256",
1248 .hash_zero = sha256_zero_message_hash,
1249 .hash_init = (u8 *)n2_sha256_init,
1250 .auth_type = AUTH_TYPE_SHA256,
1251 .hmac_type = AUTH_TYPE_HMAC_SHA256,
1252 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1253 .digest_size = SHA256_DIGEST_SIZE,
1254 .statesize = sizeof(struct sha256_state),
1255 .block_size = SHA256_BLOCK_SIZE },
1256 { .name = "sha224",
1257 .hash_zero = sha224_zero_message_hash,
1258 .hash_init = (u8 *)n2_sha224_init,
1259 .auth_type = AUTH_TYPE_SHA256,
1260 .hmac_type = AUTH_TYPE_RESERVED,
1261 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1262 .digest_size = SHA224_DIGEST_SIZE,
1263 .statesize = sizeof(struct sha256_state),
1264 .block_size = SHA224_BLOCK_SIZE },
1265 };
1266 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1267
1268 static LIST_HEAD(ahash_algs);
1269 static LIST_HEAD(hmac_algs);
1270
1271 static int algs_registered;
1272
__n2_unregister_algs(void)1273 static void __n2_unregister_algs(void)
1274 {
1275 struct n2_skcipher_alg *skcipher, *skcipher_tmp;
1276 struct n2_ahash_alg *alg, *alg_tmp;
1277 struct n2_hmac_alg *hmac, *hmac_tmp;
1278
1279 list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
1280 crypto_unregister_skcipher(&skcipher->skcipher);
1281 list_del(&skcipher->entry);
1282 kfree(skcipher);
1283 }
1284 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1285 crypto_unregister_ahash(&hmac->derived.alg);
1286 list_del(&hmac->derived.entry);
1287 kfree(hmac);
1288 }
1289 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1290 crypto_unregister_ahash(&alg->alg);
1291 list_del(&alg->entry);
1292 kfree(alg);
1293 }
1294 }
1295
n2_skcipher_init_tfm(struct crypto_skcipher * tfm)1296 static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
1297 {
1298 crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
1299 return 0;
1300 }
1301
__n2_register_one_skcipher(const struct n2_skcipher_tmpl * tmpl)1302 static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
1303 {
1304 struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1305 struct skcipher_alg *alg;
1306 int err;
1307
1308 if (!p)
1309 return -ENOMEM;
1310
1311 alg = &p->skcipher;
1312 *alg = tmpl->skcipher;
1313
1314 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1315 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1316 alg->base.cra_priority = N2_CRA_PRIORITY;
1317 alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
1318 CRYPTO_ALG_ALLOCATES_MEMORY;
1319 alg->base.cra_blocksize = tmpl->block_size;
1320 p->enc_type = tmpl->enc_type;
1321 alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
1322 alg->base.cra_module = THIS_MODULE;
1323 alg->init = n2_skcipher_init_tfm;
1324
1325 list_add(&p->entry, &skcipher_algs);
1326 err = crypto_register_skcipher(alg);
1327 if (err) {
1328 pr_err("%s alg registration failed\n", alg->base.cra_name);
1329 list_del(&p->entry);
1330 kfree(p);
1331 } else {
1332 pr_info("%s alg registered\n", alg->base.cra_name);
1333 }
1334 return err;
1335 }
1336
__n2_register_one_hmac(struct n2_ahash_alg * n2ahash)1337 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1338 {
1339 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1340 struct ahash_alg *ahash;
1341 struct crypto_alg *base;
1342 int err;
1343
1344 if (!p)
1345 return -ENOMEM;
1346
1347 p->child_alg = n2ahash->alg.halg.base.cra_name;
1348 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1349 INIT_LIST_HEAD(&p->derived.entry);
1350
1351 ahash = &p->derived.alg;
1352 ahash->digest = n2_hmac_async_digest;
1353 ahash->setkey = n2_hmac_async_setkey;
1354
1355 base = &ahash->halg.base;
1356 err = -EINVAL;
1357 if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1358 p->child_alg) >= CRYPTO_MAX_ALG_NAME)
1359 goto out_free_p;
1360 if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2",
1361 p->child_alg) >= CRYPTO_MAX_ALG_NAME)
1362 goto out_free_p;
1363
1364 base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1365 base->cra_init = n2_hmac_cra_init;
1366 base->cra_exit = n2_hmac_cra_exit;
1367
1368 list_add(&p->derived.entry, &hmac_algs);
1369 err = crypto_register_ahash(ahash);
1370 if (err) {
1371 pr_err("%s alg registration failed\n", base->cra_name);
1372 list_del(&p->derived.entry);
1373 out_free_p:
1374 kfree(p);
1375 } else {
1376 pr_info("%s alg registered\n", base->cra_name);
1377 }
1378 return err;
1379 }
1380
__n2_register_one_ahash(const struct n2_hash_tmpl * tmpl)1381 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1382 {
1383 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1384 struct hash_alg_common *halg;
1385 struct crypto_alg *base;
1386 struct ahash_alg *ahash;
1387 int err;
1388
1389 if (!p)
1390 return -ENOMEM;
1391
1392 p->hash_zero = tmpl->hash_zero;
1393 p->hash_init = tmpl->hash_init;
1394 p->auth_type = tmpl->auth_type;
1395 p->hmac_type = tmpl->hmac_type;
1396 p->hw_op_hashsz = tmpl->hw_op_hashsz;
1397 p->digest_size = tmpl->digest_size;
1398
1399 ahash = &p->alg;
1400 ahash->init = n2_hash_async_init;
1401 ahash->update = n2_hash_async_update;
1402 ahash->final = n2_hash_async_final;
1403 ahash->finup = n2_hash_async_finup;
1404 ahash->digest = n2_hash_async_digest;
1405 ahash->export = n2_hash_async_noexport;
1406 ahash->import = n2_hash_async_noimport;
1407
1408 halg = &ahash->halg;
1409 halg->digestsize = tmpl->digest_size;
1410 halg->statesize = tmpl->statesize;
1411
1412 base = &halg->base;
1413 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1414 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1415 base->cra_priority = N2_CRA_PRIORITY;
1416 base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1417 CRYPTO_ALG_NEED_FALLBACK;
1418 base->cra_blocksize = tmpl->block_size;
1419 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1420 base->cra_module = THIS_MODULE;
1421 base->cra_init = n2_hash_cra_init;
1422 base->cra_exit = n2_hash_cra_exit;
1423
1424 list_add(&p->entry, &ahash_algs);
1425 err = crypto_register_ahash(ahash);
1426 if (err) {
1427 pr_err("%s alg registration failed\n", base->cra_name);
1428 list_del(&p->entry);
1429 kfree(p);
1430 } else {
1431 pr_info("%s alg registered\n", base->cra_name);
1432 }
1433 if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1434 err = __n2_register_one_hmac(p);
1435 return err;
1436 }
1437
n2_register_algs(void)1438 static int n2_register_algs(void)
1439 {
1440 int i, err = 0;
1441
1442 mutex_lock(&spu_lock);
1443 if (algs_registered++)
1444 goto out;
1445
1446 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1447 err = __n2_register_one_ahash(&hash_tmpls[i]);
1448 if (err) {
1449 __n2_unregister_algs();
1450 goto out;
1451 }
1452 }
1453 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1454 err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
1455 if (err) {
1456 __n2_unregister_algs();
1457 goto out;
1458 }
1459 }
1460
1461 out:
1462 mutex_unlock(&spu_lock);
1463 return err;
1464 }
1465
n2_unregister_algs(void)1466 static void n2_unregister_algs(void)
1467 {
1468 mutex_lock(&spu_lock);
1469 if (!--algs_registered)
1470 __n2_unregister_algs();
1471 mutex_unlock(&spu_lock);
1472 }
1473
1474 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1475 * a devino. This isn't very useful to us because all of the
1476 * interrupts listed in the device_node have been translated to
1477 * Linux virtual IRQ cookie numbers.
1478 *
1479 * So we have to back-translate, going through the 'intr' and 'ino'
1480 * property tables of the n2cp MDESC node, matching it with the OF
1481 * 'interrupts' property entries, in order to figure out which
1482 * devino goes to which already-translated IRQ.
1483 */
find_devino_index(struct platform_device * dev,struct spu_mdesc_info * ip,unsigned long dev_ino)1484 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1485 unsigned long dev_ino)
1486 {
1487 const unsigned int *dev_intrs;
1488 unsigned int intr;
1489 int i;
1490
1491 for (i = 0; i < ip->num_intrs; i++) {
1492 if (ip->ino_table[i].ino == dev_ino)
1493 break;
1494 }
1495 if (i == ip->num_intrs)
1496 return -ENODEV;
1497
1498 intr = ip->ino_table[i].intr;
1499
1500 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1501 if (!dev_intrs)
1502 return -ENODEV;
1503
1504 for (i = 0; i < dev->archdata.num_irqs; i++) {
1505 if (dev_intrs[i] == intr)
1506 return i;
1507 }
1508
1509 return -ENODEV;
1510 }
1511
spu_map_ino(struct platform_device * dev,struct spu_mdesc_info * ip,const char * irq_name,struct spu_queue * p,irq_handler_t handler)1512 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1513 const char *irq_name, struct spu_queue *p,
1514 irq_handler_t handler)
1515 {
1516 unsigned long herr;
1517 int index;
1518
1519 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1520 if (herr)
1521 return -EINVAL;
1522
1523 index = find_devino_index(dev, ip, p->devino);
1524 if (index < 0)
1525 return index;
1526
1527 p->irq = dev->archdata.irqs[index];
1528
1529 sprintf(p->irq_name, "%s-%d", irq_name, index);
1530
1531 return request_irq(p->irq, handler, 0, p->irq_name, p);
1532 }
1533
1534 static struct kmem_cache *queue_cache[2];
1535
new_queue(unsigned long q_type)1536 static void *new_queue(unsigned long q_type)
1537 {
1538 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1539 }
1540
free_queue(void * p,unsigned long q_type)1541 static void free_queue(void *p, unsigned long q_type)
1542 {
1543 kmem_cache_free(queue_cache[q_type - 1], p);
1544 }
1545
queue_cache_init(void)1546 static int queue_cache_init(void)
1547 {
1548 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1549 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1550 kmem_cache_create("mau_queue",
1551 (MAU_NUM_ENTRIES *
1552 MAU_ENTRY_SIZE),
1553 MAU_ENTRY_SIZE, 0, NULL);
1554 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1555 return -ENOMEM;
1556
1557 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1558 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1559 kmem_cache_create("cwq_queue",
1560 (CWQ_NUM_ENTRIES *
1561 CWQ_ENTRY_SIZE),
1562 CWQ_ENTRY_SIZE, 0, NULL);
1563 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1564 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1565 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1566 return -ENOMEM;
1567 }
1568 return 0;
1569 }
1570
queue_cache_destroy(void)1571 static void queue_cache_destroy(void)
1572 {
1573 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1574 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1575 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1576 queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1577 }
1578
spu_queue_register_workfn(void * arg)1579 static long spu_queue_register_workfn(void *arg)
1580 {
1581 struct spu_qreg *qr = arg;
1582 struct spu_queue *p = qr->queue;
1583 unsigned long q_type = qr->type;
1584 unsigned long hv_ret;
1585
1586 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1587 CWQ_NUM_ENTRIES, &p->qhandle);
1588 if (!hv_ret)
1589 sun4v_ncs_sethead_marker(p->qhandle, 0);
1590
1591 return hv_ret ? -EINVAL : 0;
1592 }
1593
spu_queue_register(struct spu_queue * p,unsigned long q_type)1594 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1595 {
1596 int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1597 struct spu_qreg qr = { .queue = p, .type = q_type };
1598
1599 return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1600 }
1601
spu_queue_setup(struct spu_queue * p)1602 static int spu_queue_setup(struct spu_queue *p)
1603 {
1604 int err;
1605
1606 p->q = new_queue(p->q_type);
1607 if (!p->q)
1608 return -ENOMEM;
1609
1610 err = spu_queue_register(p, p->q_type);
1611 if (err) {
1612 free_queue(p->q, p->q_type);
1613 p->q = NULL;
1614 }
1615
1616 return err;
1617 }
1618
spu_queue_destroy(struct spu_queue * p)1619 static void spu_queue_destroy(struct spu_queue *p)
1620 {
1621 unsigned long hv_ret;
1622
1623 if (!p->q)
1624 return;
1625
1626 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1627
1628 if (!hv_ret)
1629 free_queue(p->q, p->q_type);
1630 }
1631
spu_list_destroy(struct list_head * list)1632 static void spu_list_destroy(struct list_head *list)
1633 {
1634 struct spu_queue *p, *n;
1635
1636 list_for_each_entry_safe(p, n, list, list) {
1637 int i;
1638
1639 for (i = 0; i < NR_CPUS; i++) {
1640 if (cpu_to_cwq[i] == p)
1641 cpu_to_cwq[i] = NULL;
1642 }
1643
1644 if (p->irq) {
1645 free_irq(p->irq, p);
1646 p->irq = 0;
1647 }
1648 spu_queue_destroy(p);
1649 list_del(&p->list);
1650 kfree(p);
1651 }
1652 }
1653
1654 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1655 * gathering cpu membership information.
1656 */
spu_mdesc_walk_arcs(struct mdesc_handle * mdesc,struct platform_device * dev,u64 node,struct spu_queue * p,struct spu_queue ** table)1657 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1658 struct platform_device *dev,
1659 u64 node, struct spu_queue *p,
1660 struct spu_queue **table)
1661 {
1662 u64 arc;
1663
1664 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1665 u64 tgt = mdesc_arc_target(mdesc, arc);
1666 const char *name = mdesc_node_name(mdesc, tgt);
1667 const u64 *id;
1668
1669 if (strcmp(name, "cpu"))
1670 continue;
1671 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1672 if (table[*id] != NULL) {
1673 dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1674 dev->dev.of_node);
1675 return -EINVAL;
1676 }
1677 cpumask_set_cpu(*id, &p->sharing);
1678 table[*id] = p;
1679 }
1680 return 0;
1681 }
1682
1683 /* Process an 'exec-unit' MDESC node of type 'cwq'. */
handle_exec_unit(struct spu_mdesc_info * ip,struct list_head * list,struct platform_device * dev,struct mdesc_handle * mdesc,u64 node,const char * iname,unsigned long q_type,irq_handler_t handler,struct spu_queue ** table)1684 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1685 struct platform_device *dev, struct mdesc_handle *mdesc,
1686 u64 node, const char *iname, unsigned long q_type,
1687 irq_handler_t handler, struct spu_queue **table)
1688 {
1689 struct spu_queue *p;
1690 int err;
1691
1692 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1693 if (!p) {
1694 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1695 dev->dev.of_node);
1696 return -ENOMEM;
1697 }
1698
1699 cpumask_clear(&p->sharing);
1700 spin_lock_init(&p->lock);
1701 p->q_type = q_type;
1702 INIT_LIST_HEAD(&p->jobs);
1703 list_add(&p->list, list);
1704
1705 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1706 if (err)
1707 return err;
1708
1709 err = spu_queue_setup(p);
1710 if (err)
1711 return err;
1712
1713 return spu_map_ino(dev, ip, iname, p, handler);
1714 }
1715
spu_mdesc_scan(struct mdesc_handle * mdesc,struct platform_device * dev,struct spu_mdesc_info * ip,struct list_head * list,const char * exec_name,unsigned long q_type,irq_handler_t handler,struct spu_queue ** table)1716 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1717 struct spu_mdesc_info *ip, struct list_head *list,
1718 const char *exec_name, unsigned long q_type,
1719 irq_handler_t handler, struct spu_queue **table)
1720 {
1721 int err = 0;
1722 u64 node;
1723
1724 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1725 const char *type;
1726
1727 type = mdesc_get_property(mdesc, node, "type", NULL);
1728 if (!type || strcmp(type, exec_name))
1729 continue;
1730
1731 err = handle_exec_unit(ip, list, dev, mdesc, node,
1732 exec_name, q_type, handler, table);
1733 if (err) {
1734 spu_list_destroy(list);
1735 break;
1736 }
1737 }
1738
1739 return err;
1740 }
1741
get_irq_props(struct mdesc_handle * mdesc,u64 node,struct spu_mdesc_info * ip)1742 static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1743 struct spu_mdesc_info *ip)
1744 {
1745 const u64 *ino;
1746 int ino_len;
1747 int i;
1748
1749 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1750 if (!ino) {
1751 printk("NO 'ino'\n");
1752 return -ENODEV;
1753 }
1754
1755 ip->num_intrs = ino_len / sizeof(u64);
1756 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1757 ip->num_intrs),
1758 GFP_KERNEL);
1759 if (!ip->ino_table)
1760 return -ENOMEM;
1761
1762 for (i = 0; i < ip->num_intrs; i++) {
1763 struct ino_blob *b = &ip->ino_table[i];
1764 b->intr = i + 1;
1765 b->ino = ino[i];
1766 }
1767
1768 return 0;
1769 }
1770
grab_mdesc_irq_props(struct mdesc_handle * mdesc,struct platform_device * dev,struct spu_mdesc_info * ip,const char * node_name)1771 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1772 struct platform_device *dev,
1773 struct spu_mdesc_info *ip,
1774 const char *node_name)
1775 {
1776 u64 node, reg;
1777
1778 if (of_property_read_reg(dev->dev.of_node, 0, ®, NULL) < 0)
1779 return -ENODEV;
1780
1781 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1782 const char *name;
1783 const u64 *chdl;
1784
1785 name = mdesc_get_property(mdesc, node, "name", NULL);
1786 if (!name || strcmp(name, node_name))
1787 continue;
1788 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1789 if (!chdl || (*chdl != reg))
1790 continue;
1791 ip->cfg_handle = *chdl;
1792 return get_irq_props(mdesc, node, ip);
1793 }
1794
1795 return -ENODEV;
1796 }
1797
1798 static unsigned long n2_spu_hvapi_major;
1799 static unsigned long n2_spu_hvapi_minor;
1800
n2_spu_hvapi_register(void)1801 static int n2_spu_hvapi_register(void)
1802 {
1803 int err;
1804
1805 n2_spu_hvapi_major = 2;
1806 n2_spu_hvapi_minor = 0;
1807
1808 err = sun4v_hvapi_register(HV_GRP_NCS,
1809 n2_spu_hvapi_major,
1810 &n2_spu_hvapi_minor);
1811
1812 if (!err)
1813 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1814 n2_spu_hvapi_major,
1815 n2_spu_hvapi_minor);
1816
1817 return err;
1818 }
1819
n2_spu_hvapi_unregister(void)1820 static void n2_spu_hvapi_unregister(void)
1821 {
1822 sun4v_hvapi_unregister(HV_GRP_NCS);
1823 }
1824
1825 static int global_ref;
1826
grab_global_resources(void)1827 static int grab_global_resources(void)
1828 {
1829 int err = 0;
1830
1831 mutex_lock(&spu_lock);
1832
1833 if (global_ref++)
1834 goto out;
1835
1836 err = n2_spu_hvapi_register();
1837 if (err)
1838 goto out;
1839
1840 err = queue_cache_init();
1841 if (err)
1842 goto out_hvapi_release;
1843
1844 err = -ENOMEM;
1845 cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1846 GFP_KERNEL);
1847 if (!cpu_to_cwq)
1848 goto out_queue_cache_destroy;
1849
1850 cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1851 GFP_KERNEL);
1852 if (!cpu_to_mau)
1853 goto out_free_cwq_table;
1854
1855 err = 0;
1856
1857 out:
1858 if (err)
1859 global_ref--;
1860 mutex_unlock(&spu_lock);
1861 return err;
1862
1863 out_free_cwq_table:
1864 kfree(cpu_to_cwq);
1865 cpu_to_cwq = NULL;
1866
1867 out_queue_cache_destroy:
1868 queue_cache_destroy();
1869
1870 out_hvapi_release:
1871 n2_spu_hvapi_unregister();
1872 goto out;
1873 }
1874
release_global_resources(void)1875 static void release_global_resources(void)
1876 {
1877 mutex_lock(&spu_lock);
1878 if (!--global_ref) {
1879 kfree(cpu_to_cwq);
1880 cpu_to_cwq = NULL;
1881
1882 kfree(cpu_to_mau);
1883 cpu_to_mau = NULL;
1884
1885 queue_cache_destroy();
1886 n2_spu_hvapi_unregister();
1887 }
1888 mutex_unlock(&spu_lock);
1889 }
1890
alloc_n2cp(void)1891 static struct n2_crypto *alloc_n2cp(void)
1892 {
1893 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1894
1895 if (np)
1896 INIT_LIST_HEAD(&np->cwq_list);
1897
1898 return np;
1899 }
1900
free_n2cp(struct n2_crypto * np)1901 static void free_n2cp(struct n2_crypto *np)
1902 {
1903 kfree(np->cwq_info.ino_table);
1904 np->cwq_info.ino_table = NULL;
1905
1906 kfree(np);
1907 }
1908
n2_spu_driver_version(void)1909 static void n2_spu_driver_version(void)
1910 {
1911 static int n2_spu_version_printed;
1912
1913 if (n2_spu_version_printed++ == 0)
1914 pr_info("%s", version);
1915 }
1916
n2_crypto_probe(struct platform_device * dev)1917 static int n2_crypto_probe(struct platform_device *dev)
1918 {
1919 struct mdesc_handle *mdesc;
1920 struct n2_crypto *np;
1921 int err;
1922
1923 n2_spu_driver_version();
1924
1925 pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
1926
1927 np = alloc_n2cp();
1928 if (!np) {
1929 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
1930 dev->dev.of_node);
1931 return -ENOMEM;
1932 }
1933
1934 err = grab_global_resources();
1935 if (err) {
1936 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
1937 dev->dev.of_node);
1938 goto out_free_n2cp;
1939 }
1940
1941 mdesc = mdesc_grab();
1942
1943 if (!mdesc) {
1944 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
1945 dev->dev.of_node);
1946 err = -ENODEV;
1947 goto out_free_global;
1948 }
1949 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
1950 if (err) {
1951 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
1952 dev->dev.of_node);
1953 mdesc_release(mdesc);
1954 goto out_free_global;
1955 }
1956
1957 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
1958 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
1959 cpu_to_cwq);
1960 mdesc_release(mdesc);
1961
1962 if (err) {
1963 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
1964 dev->dev.of_node);
1965 goto out_free_global;
1966 }
1967
1968 err = n2_register_algs();
1969 if (err) {
1970 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
1971 dev->dev.of_node);
1972 goto out_free_spu_list;
1973 }
1974
1975 dev_set_drvdata(&dev->dev, np);
1976
1977 return 0;
1978
1979 out_free_spu_list:
1980 spu_list_destroy(&np->cwq_list);
1981
1982 out_free_global:
1983 release_global_resources();
1984
1985 out_free_n2cp:
1986 free_n2cp(np);
1987
1988 return err;
1989 }
1990
n2_crypto_remove(struct platform_device * dev)1991 static void n2_crypto_remove(struct platform_device *dev)
1992 {
1993 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
1994
1995 n2_unregister_algs();
1996
1997 spu_list_destroy(&np->cwq_list);
1998
1999 release_global_resources();
2000
2001 free_n2cp(np);
2002 }
2003
alloc_ncp(void)2004 static struct n2_mau *alloc_ncp(void)
2005 {
2006 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2007
2008 if (mp)
2009 INIT_LIST_HEAD(&mp->mau_list);
2010
2011 return mp;
2012 }
2013
free_ncp(struct n2_mau * mp)2014 static void free_ncp(struct n2_mau *mp)
2015 {
2016 kfree(mp->mau_info.ino_table);
2017 mp->mau_info.ino_table = NULL;
2018
2019 kfree(mp);
2020 }
2021
n2_mau_probe(struct platform_device * dev)2022 static int n2_mau_probe(struct platform_device *dev)
2023 {
2024 struct mdesc_handle *mdesc;
2025 struct n2_mau *mp;
2026 int err;
2027
2028 n2_spu_driver_version();
2029
2030 pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2031
2032 mp = alloc_ncp();
2033 if (!mp) {
2034 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2035 dev->dev.of_node);
2036 return -ENOMEM;
2037 }
2038
2039 err = grab_global_resources();
2040 if (err) {
2041 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2042 dev->dev.of_node);
2043 goto out_free_ncp;
2044 }
2045
2046 mdesc = mdesc_grab();
2047
2048 if (!mdesc) {
2049 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2050 dev->dev.of_node);
2051 err = -ENODEV;
2052 goto out_free_global;
2053 }
2054
2055 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2056 if (err) {
2057 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2058 dev->dev.of_node);
2059 mdesc_release(mdesc);
2060 goto out_free_global;
2061 }
2062
2063 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2064 "mau", HV_NCS_QTYPE_MAU, mau_intr,
2065 cpu_to_mau);
2066 mdesc_release(mdesc);
2067
2068 if (err) {
2069 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2070 dev->dev.of_node);
2071 goto out_free_global;
2072 }
2073
2074 dev_set_drvdata(&dev->dev, mp);
2075
2076 return 0;
2077
2078 out_free_global:
2079 release_global_resources();
2080
2081 out_free_ncp:
2082 free_ncp(mp);
2083
2084 return err;
2085 }
2086
n2_mau_remove(struct platform_device * dev)2087 static void n2_mau_remove(struct platform_device *dev)
2088 {
2089 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2090
2091 spu_list_destroy(&mp->mau_list);
2092
2093 release_global_resources();
2094
2095 free_ncp(mp);
2096 }
2097
2098 static const struct of_device_id n2_crypto_match[] = {
2099 {
2100 .name = "n2cp",
2101 .compatible = "SUNW,n2-cwq",
2102 },
2103 {
2104 .name = "n2cp",
2105 .compatible = "SUNW,vf-cwq",
2106 },
2107 {
2108 .name = "n2cp",
2109 .compatible = "SUNW,kt-cwq",
2110 },
2111 {},
2112 };
2113
2114 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2115
2116 static struct platform_driver n2_crypto_driver = {
2117 .driver = {
2118 .name = "n2cp",
2119 .of_match_table = n2_crypto_match,
2120 },
2121 .probe = n2_crypto_probe,
2122 .remove = n2_crypto_remove,
2123 };
2124
2125 static const struct of_device_id n2_mau_match[] = {
2126 {
2127 .name = "ncp",
2128 .compatible = "SUNW,n2-mau",
2129 },
2130 {
2131 .name = "ncp",
2132 .compatible = "SUNW,vf-mau",
2133 },
2134 {
2135 .name = "ncp",
2136 .compatible = "SUNW,kt-mau",
2137 },
2138 {},
2139 };
2140
2141 MODULE_DEVICE_TABLE(of, n2_mau_match);
2142
2143 static struct platform_driver n2_mau_driver = {
2144 .driver = {
2145 .name = "ncp",
2146 .of_match_table = n2_mau_match,
2147 },
2148 .probe = n2_mau_probe,
2149 .remove = n2_mau_remove,
2150 };
2151
2152 static struct platform_driver * const drivers[] = {
2153 &n2_crypto_driver,
2154 &n2_mau_driver,
2155 };
2156
n2_init(void)2157 static int __init n2_init(void)
2158 {
2159 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2160 }
2161
n2_exit(void)2162 static void __exit n2_exit(void)
2163 {
2164 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2165 }
2166
2167 module_init(n2_init);
2168 module_exit(n2_exit);
2169