1 /* 2 * Asynchronous Compression operations 3 * 4 * Copyright (c) 2016, Intel Corporation 5 * Authors: Weigang Li <weigang.li@intel.com> 6 * Giovanni Cabiddu <giovanni.cabiddu@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 */ 14 #include <linux/errno.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/seq_file.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/crypto.h> 21 #include <crypto/algapi.h> 22 #include <linux/cryptouser.h> 23 #include <linux/compiler.h> 24 #include <net/netlink.h> 25 #include <crypto/internal/acompress.h> 26 #include <crypto/internal/scompress.h> 27 #include "internal.h" 28 29 static const struct crypto_type crypto_acomp_type; 30 31 #ifdef CONFIG_NET 32 static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) 33 { 34 struct crypto_report_acomp racomp; 35 36 strncpy(racomp.type, "acomp", sizeof(racomp.type)); 37 38 if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, 39 sizeof(struct crypto_report_acomp), &racomp)) 40 goto nla_put_failure; 41 return 0; 42 43 nla_put_failure: 44 return -EMSGSIZE; 45 } 46 #else 47 static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg) 48 { 49 return -ENOSYS; 50 } 51 #endif 52 53 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) 54 __maybe_unused; 55 56 static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg) 57 { 58 seq_puts(m, "type : acomp\n"); 59 } 60 61 static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) 62 { 63 struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); 64 struct acomp_alg *alg = crypto_acomp_alg(acomp); 65 66 alg->exit(acomp); 67 } 68 69 static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) 70 { 71 struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); 72 struct acomp_alg *alg = crypto_acomp_alg(acomp); 73 74 if (tfm->__crt_alg->cra_type != &crypto_acomp_type) 75 return crypto_init_scomp_ops_async(tfm); 76 77 acomp->compress = alg->compress; 78 acomp->decompress = alg->decompress; 79 acomp->dst_free = alg->dst_free; 80 acomp->reqsize = alg->reqsize; 81 82 if (alg->exit) 83 acomp->base.exit = crypto_acomp_exit_tfm; 84 85 if (alg->init) 86 return alg->init(acomp); 87 88 return 0; 89 } 90 91 static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) 92 { 93 int extsize = crypto_alg_extsize(alg); 94 95 if (alg->cra_type != &crypto_acomp_type) 96 extsize += sizeof(struct crypto_scomp *); 97 98 return extsize; 99 } 100 101 static const struct crypto_type crypto_acomp_type = { 102 .extsize = crypto_acomp_extsize, 103 .init_tfm = crypto_acomp_init_tfm, 104 #ifdef CONFIG_PROC_FS 105 .show = crypto_acomp_show, 106 #endif 107 .report = crypto_acomp_report, 108 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 109 .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, 110 .type = CRYPTO_ALG_TYPE_ACOMPRESS, 111 .tfmsize = offsetof(struct crypto_acomp, base), 112 }; 113 114 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, 115 u32 mask) 116 { 117 return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask); 118 } 119 EXPORT_SYMBOL_GPL(crypto_alloc_acomp); 120 121 struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) 122 { 123 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); 124 struct acomp_req *req; 125 126 req = __acomp_request_alloc(acomp); 127 if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type)) 128 return crypto_acomp_scomp_alloc_ctx(req); 129 130 return req; 131 } 132 EXPORT_SYMBOL_GPL(acomp_request_alloc); 133 134 void acomp_request_free(struct acomp_req *req) 135 { 136 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); 137 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); 138 139 if (tfm->__crt_alg->cra_type != &crypto_acomp_type) 140 crypto_acomp_scomp_free_ctx(req); 141 142 if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) { 143 acomp->dst_free(req->dst); 144 req->dst = NULL; 145 } 146 147 __acomp_request_free(req); 148 } 149 EXPORT_SYMBOL_GPL(acomp_request_free); 150 151 int crypto_register_acomp(struct acomp_alg *alg) 152 { 153 struct crypto_alg *base = &alg->base; 154 155 base->cra_type = &crypto_acomp_type; 156 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 157 base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS; 158 159 return crypto_register_alg(base); 160 } 161 EXPORT_SYMBOL_GPL(crypto_register_acomp); 162 163 int crypto_unregister_acomp(struct acomp_alg *alg) 164 { 165 return crypto_unregister_alg(&alg->base); 166 } 167 EXPORT_SYMBOL_GPL(crypto_unregister_acomp); 168 169 int crypto_register_acomps(struct acomp_alg *algs, int count) 170 { 171 int i, ret; 172 173 for (i = 0; i < count; i++) { 174 ret = crypto_register_acomp(&algs[i]); 175 if (ret) 176 goto err; 177 } 178 179 return 0; 180 181 err: 182 for (--i; i >= 0; --i) 183 crypto_unregister_acomp(&algs[i]); 184 185 return ret; 186 } 187 EXPORT_SYMBOL_GPL(crypto_register_acomps); 188 189 void crypto_unregister_acomps(struct acomp_alg *algs, int count) 190 { 191 int i; 192 193 for (i = count - 1; i >= 0; --i) 194 crypto_unregister_acomp(&algs[i]); 195 } 196 EXPORT_SYMBOL_GPL(crypto_unregister_acomps); 197 198 MODULE_LICENSE("GPL"); 199 MODULE_DESCRIPTION("Asynchronous compression type"); 200