1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright 2019 Google LLC 4 */ 5 6 #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H 7 #define __LINUX_BLK_CRYPTO_INTERNAL_H 8 9 #include <linux/bio.h> 10 #include <linux/blk-mq.h> 11 12 /* Represents a crypto mode supported by blk-crypto */ 13 struct blk_crypto_mode { 14 const char *name; /* name of this mode, shown in sysfs */ 15 const char *cipher_str; /* crypto API name (for fallback case) */ 16 unsigned int keysize; /* key size in bytes */ 17 unsigned int security_strength; /* security strength in bytes */ 18 unsigned int ivsize; /* iv size in bytes */ 19 }; 20 21 extern const struct blk_crypto_mode blk_crypto_modes[]; 22 23 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 24 25 int blk_crypto_sysfs_register(struct gendisk *disk); 26 27 void blk_crypto_sysfs_unregister(struct gendisk *disk); 28 29 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], 30 unsigned int inc); 31 32 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); 33 34 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, 35 struct bio_crypt_ctx *bc2); 36 37 static inline bool bio_crypt_ctx_back_mergeable(struct request *req, 38 struct bio *bio) 39 { 40 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req), 41 bio->bi_crypt_context); 42 } 43 44 static inline bool bio_crypt_ctx_front_mergeable(struct request *req, 45 struct bio *bio) 46 { 47 return bio_crypt_ctx_mergeable(bio->bi_crypt_context, 48 bio->bi_iter.bi_size, req->crypt_ctx); 49 } 50 51 static inline bool bio_crypt_ctx_merge_rq(struct request *req, 52 struct request *next) 53 { 54 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req), 55 next->crypt_ctx); 56 } 57 58 static inline void blk_crypto_rq_set_defaults(struct request *rq) 59 { 60 rq->crypt_ctx = NULL; 61 rq->crypt_keyslot = NULL; 62 } 63 64 static inline bool blk_crypto_rq_is_encrypted(struct request *rq) 65 { 66 return rq->crypt_ctx; 67 } 68 69 static inline bool blk_crypto_rq_has_keyslot(struct request *rq) 70 { 71 return rq->crypt_keyslot; 72 } 73 74 blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile, 75 const struct blk_crypto_key *key, 76 struct blk_crypto_keyslot **slot_ptr); 77 78 void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot); 79 80 int __blk_crypto_evict_key(struct blk_crypto_profile *profile, 81 const struct blk_crypto_key *key); 82 83 bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile, 84 const struct blk_crypto_config *cfg); 85 86 int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd, 87 void __user *argp); 88 89 static inline bool blk_crypto_supported(struct bio *bio) 90 { 91 return blk_crypto_config_supported_natively(bio->bi_bdev, 92 &bio->bi_crypt_context->bc_key->crypto_cfg); 93 } 94 95 #else /* CONFIG_BLK_INLINE_ENCRYPTION */ 96 97 static inline int blk_crypto_sysfs_register(struct gendisk *disk) 98 { 99 return 0; 100 } 101 102 static inline void blk_crypto_sysfs_unregister(struct gendisk *disk) 103 { 104 } 105 106 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, 107 struct bio *bio) 108 { 109 return true; 110 } 111 112 static inline bool bio_crypt_ctx_front_mergeable(struct request *req, 113 struct bio *bio) 114 { 115 return true; 116 } 117 118 static inline bool bio_crypt_ctx_back_mergeable(struct request *req, 119 struct bio *bio) 120 { 121 return true; 122 } 123 124 static inline bool bio_crypt_ctx_merge_rq(struct request *req, 125 struct request *next) 126 { 127 return true; 128 } 129 130 static inline void blk_crypto_rq_set_defaults(struct request *rq) { } 131 132 static inline bool blk_crypto_rq_is_encrypted(struct request *rq) 133 { 134 return false; 135 } 136 137 static inline bool blk_crypto_rq_has_keyslot(struct request *rq) 138 { 139 return false; 140 } 141 142 static inline int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd, 143 void __user *argp) 144 { 145 return -ENOTTY; 146 } 147 148 static inline bool blk_crypto_supported(struct bio *bio) 149 { 150 return false; 151 } 152 153 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ 154 155 void __bio_crypt_advance(struct bio *bio, unsigned int bytes); 156 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) 157 { 158 if (bio_has_crypt_ctx(bio)) 159 __bio_crypt_advance(bio, bytes); 160 } 161 162 void __bio_crypt_free_ctx(struct bio *bio); 163 static inline void bio_crypt_free_ctx(struct bio *bio) 164 { 165 if (bio_has_crypt_ctx(bio)) 166 __bio_crypt_free_ctx(bio); 167 } 168 169 static inline void bio_crypt_do_front_merge(struct request *rq, 170 struct bio *bio) 171 { 172 #ifdef CONFIG_BLK_INLINE_ENCRYPTION 173 if (bio_has_crypt_ctx(bio)) 174 memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun, 175 sizeof(rq->crypt_ctx->bc_dun)); 176 #endif 177 } 178 179 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq); 180 static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq) 181 { 182 if (blk_crypto_rq_is_encrypted(rq)) 183 return __blk_crypto_rq_get_keyslot(rq); 184 return BLK_STS_OK; 185 } 186 187 void __blk_crypto_rq_put_keyslot(struct request *rq); 188 static inline void blk_crypto_rq_put_keyslot(struct request *rq) 189 { 190 if (blk_crypto_rq_has_keyslot(rq)) 191 __blk_crypto_rq_put_keyslot(rq); 192 } 193 194 void __blk_crypto_free_request(struct request *rq); 195 static inline void blk_crypto_free_request(struct request *rq) 196 { 197 if (blk_crypto_rq_is_encrypted(rq)) 198 __blk_crypto_free_request(rq); 199 } 200 201 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, 202 gfp_t gfp_mask); 203 /** 204 * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio 205 * is inserted 206 * @rq: The request to prepare 207 * @bio: The first bio being inserted into the request 208 * @gfp_mask: Memory allocation flags 209 * 210 * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if 211 * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM. 212 */ 213 static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, 214 gfp_t gfp_mask) 215 { 216 if (bio_has_crypt_ctx(bio)) 217 return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask); 218 return 0; 219 } 220 221 bool blk_crypto_fallback_bio_prep(struct bio *bio); 222 223 #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK 224 225 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num); 226 227 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key); 228 229 #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ 230 231 static inline int 232 blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) 233 { 234 pr_warn_once("crypto API fallback is disabled\n"); 235 return -ENOPKG; 236 } 237 238 static inline int 239 blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) 240 { 241 return 0; 242 } 243 244 #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ 245 246 #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */ 247