xref: /linux/block/blk-crypto-internal.h (revision 9b960d8cd6f712cb2c03e2bdd4d5ca058238037f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright 2019 Google LLC
4  */
5 
6 #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
7 #define __LINUX_BLK_CRYPTO_INTERNAL_H
8 
9 #include <linux/bio.h>
10 #include <linux/blk-mq.h>
11 
12 /* Represents a crypto mode supported by blk-crypto  */
13 struct blk_crypto_mode {
14 	const char *name; /* name of this mode, shown in sysfs */
15 	const char *cipher_str; /* crypto API name (for fallback case) */
16 	unsigned int keysize; /* key size in bytes */
17 	unsigned int security_strength; /* security strength in bytes */
18 	unsigned int ivsize; /* iv size in bytes */
19 };
20 
21 extern const struct blk_crypto_mode blk_crypto_modes[];
22 
23 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
24 
25 int blk_crypto_sysfs_register(struct gendisk *disk);
26 
27 void blk_crypto_sysfs_unregister(struct gendisk *disk);
28 
29 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
30 			     unsigned int inc);
31 
32 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
33 
34 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
35 			     struct bio_crypt_ctx *bc2);
36 
bio_crypt_ctx_back_mergeable(struct request * req,struct bio * bio)37 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
38 						struct bio *bio)
39 {
40 	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
41 				       bio->bi_crypt_context);
42 }
43 
bio_crypt_ctx_front_mergeable(struct request * req,struct bio * bio)44 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
45 						 struct bio *bio)
46 {
47 	return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
48 				       bio->bi_iter.bi_size, req->crypt_ctx);
49 }
50 
bio_crypt_ctx_merge_rq(struct request * req,struct request * next)51 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
52 					  struct request *next)
53 {
54 	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
55 				       next->crypt_ctx);
56 }
57 
blk_crypto_rq_set_defaults(struct request * rq)58 static inline void blk_crypto_rq_set_defaults(struct request *rq)
59 {
60 	rq->crypt_ctx = NULL;
61 	rq->crypt_keyslot = NULL;
62 }
63 
blk_crypto_rq_is_encrypted(struct request * rq)64 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
65 {
66 	return rq->crypt_ctx;
67 }
68 
blk_crypto_rq_has_keyslot(struct request * rq)69 static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
70 {
71 	return rq->crypt_keyslot;
72 }
73 
74 blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
75 				    const struct blk_crypto_key *key,
76 				    struct blk_crypto_keyslot **slot_ptr);
77 
78 void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot);
79 
80 int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
81 			   const struct blk_crypto_key *key);
82 
83 bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
84 				const struct blk_crypto_config *cfg);
85 
86 int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
87 		     void __user *argp);
88 
89 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
90 
blk_crypto_sysfs_register(struct gendisk * disk)91 static inline int blk_crypto_sysfs_register(struct gendisk *disk)
92 {
93 	return 0;
94 }
95 
blk_crypto_sysfs_unregister(struct gendisk * disk)96 static inline void blk_crypto_sysfs_unregister(struct gendisk *disk)
97 {
98 }
99 
bio_crypt_rq_ctx_compatible(struct request * rq,struct bio * bio)100 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
101 					       struct bio *bio)
102 {
103 	return true;
104 }
105 
bio_crypt_ctx_front_mergeable(struct request * req,struct bio * bio)106 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
107 						 struct bio *bio)
108 {
109 	return true;
110 }
111 
bio_crypt_ctx_back_mergeable(struct request * req,struct bio * bio)112 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
113 						struct bio *bio)
114 {
115 	return true;
116 }
117 
bio_crypt_ctx_merge_rq(struct request * req,struct request * next)118 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
119 					  struct request *next)
120 {
121 	return true;
122 }
123 
blk_crypto_rq_set_defaults(struct request * rq)124 static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
125 
blk_crypto_rq_is_encrypted(struct request * rq)126 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
127 {
128 	return false;
129 }
130 
blk_crypto_rq_has_keyslot(struct request * rq)131 static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
132 {
133 	return false;
134 }
135 
blk_crypto_ioctl(struct block_device * bdev,unsigned int cmd,void __user * argp)136 static inline int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
137 				   void __user *argp)
138 {
139 	return -ENOTTY;
140 }
141 
142 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
143 
144 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
bio_crypt_advance(struct bio * bio,unsigned int bytes)145 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
146 {
147 	if (bio_has_crypt_ctx(bio))
148 		__bio_crypt_advance(bio, bytes);
149 }
150 
151 void __bio_crypt_free_ctx(struct bio *bio);
bio_crypt_free_ctx(struct bio * bio)152 static inline void bio_crypt_free_ctx(struct bio *bio)
153 {
154 	if (bio_has_crypt_ctx(bio))
155 		__bio_crypt_free_ctx(bio);
156 }
157 
bio_crypt_do_front_merge(struct request * rq,struct bio * bio)158 static inline void bio_crypt_do_front_merge(struct request *rq,
159 					    struct bio *bio)
160 {
161 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
162 	if (bio_has_crypt_ctx(bio))
163 		memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
164 		       sizeof(rq->crypt_ctx->bc_dun));
165 #endif
166 }
167 
168 bool __blk_crypto_bio_prep(struct bio **bio_ptr);
blk_crypto_bio_prep(struct bio ** bio_ptr)169 static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
170 {
171 	if (bio_has_crypt_ctx(*bio_ptr))
172 		return __blk_crypto_bio_prep(bio_ptr);
173 	return true;
174 }
175 
176 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
blk_crypto_rq_get_keyslot(struct request * rq)177 static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
178 {
179 	if (blk_crypto_rq_is_encrypted(rq))
180 		return __blk_crypto_rq_get_keyslot(rq);
181 	return BLK_STS_OK;
182 }
183 
184 void __blk_crypto_rq_put_keyslot(struct request *rq);
blk_crypto_rq_put_keyslot(struct request * rq)185 static inline void blk_crypto_rq_put_keyslot(struct request *rq)
186 {
187 	if (blk_crypto_rq_has_keyslot(rq))
188 		__blk_crypto_rq_put_keyslot(rq);
189 }
190 
191 void __blk_crypto_free_request(struct request *rq);
blk_crypto_free_request(struct request * rq)192 static inline void blk_crypto_free_request(struct request *rq)
193 {
194 	if (blk_crypto_rq_is_encrypted(rq))
195 		__blk_crypto_free_request(rq);
196 }
197 
198 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
199 			     gfp_t gfp_mask);
200 /**
201  * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
202  *			    is inserted
203  * @rq: The request to prepare
204  * @bio: The first bio being inserted into the request
205  * @gfp_mask: Memory allocation flags
206  *
207  * Return: 0 on success, -ENOMEM if out of memory.  -ENOMEM is only possible if
208  *	   @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
209  */
blk_crypto_rq_bio_prep(struct request * rq,struct bio * bio,gfp_t gfp_mask)210 static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
211 					 gfp_t gfp_mask)
212 {
213 	if (bio_has_crypt_ctx(bio))
214 		return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
215 	return 0;
216 }
217 
218 #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
219 
220 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
221 
222 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
223 
224 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
225 
226 #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
227 
228 static inline int
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)229 blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
230 {
231 	pr_warn_once("crypto API fallback is disabled\n");
232 	return -ENOPKG;
233 }
234 
blk_crypto_fallback_bio_prep(struct bio ** bio_ptr)235 static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
236 {
237 	pr_warn_once("crypto API fallback disabled; failing request.\n");
238 	(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
239 	return false;
240 }
241 
242 static inline int
blk_crypto_fallback_evict_key(const struct blk_crypto_key * key)243 blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
244 {
245 	return 0;
246 }
247 
248 #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
249 
250 #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
251