1 /* 2 * This contains encryption functions for per-file encryption. 3 * 4 * Copyright (C) 2015, Google, Inc. 5 * Copyright (C) 2015, Motorola Mobility 6 * 7 * Written by Michael Halcrow, 2014. 8 * 9 * Filename encryption additions 10 * Uday Savagaonkar, 2014 11 * Encryption policy handling additions 12 * Ildar Muslukhov, 2014 13 * Add fscrypt_pullback_bio_page() 14 * Jaegeuk Kim, 2015. 15 * 16 * This has not yet undergone a rigorous security audit. 17 * 18 * The usage of AES-XTS should conform to recommendations in NIST 19 * Special Publication 800-38E and IEEE P1619/D16. 20 */ 21 22 #include <linux/pagemap.h> 23 #include <linux/mempool.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <linux/ratelimit.h> 27 #include <linux/bio.h> 28 #include <linux/dcache.h> 29 #include <linux/namei.h> 30 #include <linux/fscrypto.h> 31 #include <linux/ecryptfs.h> 32 33 static unsigned int num_prealloc_crypto_pages = 32; 34 static unsigned int num_prealloc_crypto_ctxs = 128; 35 36 module_param(num_prealloc_crypto_pages, uint, 0444); 37 MODULE_PARM_DESC(num_prealloc_crypto_pages, 38 "Number of crypto pages to preallocate"); 39 module_param(num_prealloc_crypto_ctxs, uint, 0444); 40 MODULE_PARM_DESC(num_prealloc_crypto_ctxs, 41 "Number of crypto contexts to preallocate"); 42 43 static mempool_t *fscrypt_bounce_page_pool = NULL; 44 45 static LIST_HEAD(fscrypt_free_ctxs); 46 static DEFINE_SPINLOCK(fscrypt_ctx_lock); 47 48 static struct workqueue_struct *fscrypt_read_workqueue; 49 static DEFINE_MUTEX(fscrypt_init_mutex); 50 51 static struct kmem_cache *fscrypt_ctx_cachep; 52 struct kmem_cache *fscrypt_info_cachep; 53 54 /** 55 * fscrypt_release_ctx() - Releases an encryption context 56 * @ctx: The encryption context to release. 57 * 58 * If the encryption context was allocated from the pre-allocated pool, returns 59 * it to that pool. Else, frees it. 60 * 61 * If there's a bounce page in the context, this frees that. 62 */ 63 void fscrypt_release_ctx(struct fscrypt_ctx *ctx) 64 { 65 unsigned long flags; 66 67 if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) { 68 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); 69 ctx->w.bounce_page = NULL; 70 } 71 ctx->w.control_page = NULL; 72 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { 73 kmem_cache_free(fscrypt_ctx_cachep, ctx); 74 } else { 75 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 76 list_add(&ctx->free_list, &fscrypt_free_ctxs); 77 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 78 } 79 } 80 EXPORT_SYMBOL(fscrypt_release_ctx); 81 82 /** 83 * fscrypt_get_ctx() - Gets an encryption context 84 * @inode: The inode for which we are doing the crypto 85 * @gfp_flags: The gfp flag for memory allocation 86 * 87 * Allocates and initializes an encryption context. 88 * 89 * Return: An allocated and initialized encryption context on success; error 90 * value or NULL otherwise. 91 */ 92 struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags) 93 { 94 struct fscrypt_ctx *ctx = NULL; 95 struct fscrypt_info *ci = inode->i_crypt_info; 96 unsigned long flags; 97 98 if (ci == NULL) 99 return ERR_PTR(-ENOKEY); 100 101 /* 102 * We first try getting the ctx from a free list because in 103 * the common case the ctx will have an allocated and 104 * initialized crypto tfm, so it's probably a worthwhile 105 * optimization. For the bounce page, we first try getting it 106 * from the kernel allocator because that's just about as fast 107 * as getting it from a list and because a cache of free pages 108 * should generally be a "last resort" option for a filesystem 109 * to be able to do its job. 110 */ 111 spin_lock_irqsave(&fscrypt_ctx_lock, flags); 112 ctx = list_first_entry_or_null(&fscrypt_free_ctxs, 113 struct fscrypt_ctx, free_list); 114 if (ctx) 115 list_del(&ctx->free_list); 116 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); 117 if (!ctx) { 118 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags); 119 if (!ctx) 120 return ERR_PTR(-ENOMEM); 121 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 122 } else { 123 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 124 } 125 ctx->flags &= ~FS_WRITE_PATH_FL; 126 return ctx; 127 } 128 EXPORT_SYMBOL(fscrypt_get_ctx); 129 130 /** 131 * fscrypt_complete() - The completion callback for page encryption 132 * @req: The asynchronous encryption request context 133 * @res: The result of the encryption operation 134 */ 135 static void fscrypt_complete(struct crypto_async_request *req, int res) 136 { 137 struct fscrypt_completion_result *ecr = req->data; 138 139 if (res == -EINPROGRESS) 140 return; 141 ecr->res = res; 142 complete(&ecr->completion); 143 } 144 145 typedef enum { 146 FS_DECRYPT = 0, 147 FS_ENCRYPT, 148 } fscrypt_direction_t; 149 150 static int do_page_crypto(struct inode *inode, 151 fscrypt_direction_t rw, pgoff_t index, 152 struct page *src_page, struct page *dest_page, 153 gfp_t gfp_flags) 154 { 155 u8 xts_tweak[FS_XTS_TWEAK_SIZE]; 156 struct skcipher_request *req = NULL; 157 DECLARE_FS_COMPLETION_RESULT(ecr); 158 struct scatterlist dst, src; 159 struct fscrypt_info *ci = inode->i_crypt_info; 160 struct crypto_skcipher *tfm = ci->ci_ctfm; 161 int res = 0; 162 163 req = skcipher_request_alloc(tfm, gfp_flags); 164 if (!req) { 165 printk_ratelimited(KERN_ERR 166 "%s: crypto_request_alloc() failed\n", 167 __func__); 168 return -ENOMEM; 169 } 170 171 skcipher_request_set_callback( 172 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 173 fscrypt_complete, &ecr); 174 175 BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); 176 memcpy(xts_tweak, &index, sizeof(index)); 177 memset(&xts_tweak[sizeof(index)], 0, 178 FS_XTS_TWEAK_SIZE - sizeof(index)); 179 180 sg_init_table(&dst, 1); 181 sg_set_page(&dst, dest_page, PAGE_SIZE, 0); 182 sg_init_table(&src, 1); 183 sg_set_page(&src, src_page, PAGE_SIZE, 0); 184 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, 185 xts_tweak); 186 if (rw == FS_DECRYPT) 187 res = crypto_skcipher_decrypt(req); 188 else 189 res = crypto_skcipher_encrypt(req); 190 if (res == -EINPROGRESS || res == -EBUSY) { 191 BUG_ON(req->base.data != &ecr); 192 wait_for_completion(&ecr.completion); 193 res = ecr.res; 194 } 195 skcipher_request_free(req); 196 if (res) { 197 printk_ratelimited(KERN_ERR 198 "%s: crypto_skcipher_encrypt() returned %d\n", 199 __func__, res); 200 return res; 201 } 202 return 0; 203 } 204 205 static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags) 206 { 207 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); 208 if (ctx->w.bounce_page == NULL) 209 return ERR_PTR(-ENOMEM); 210 ctx->flags |= FS_WRITE_PATH_FL; 211 return ctx->w.bounce_page; 212 } 213 214 /** 215 * fscypt_encrypt_page() - Encrypts a page 216 * @inode: The inode for which the encryption should take place 217 * @plaintext_page: The page to encrypt. Must be locked. 218 * @gfp_flags: The gfp flag for memory allocation 219 * 220 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx 221 * encryption context. 222 * 223 * Called on the page write path. The caller must call 224 * fscrypt_restore_control_page() on the returned ciphertext page to 225 * release the bounce buffer and the encryption context. 226 * 227 * Return: An allocated page with the encrypted content on success. Else, an 228 * error value or NULL. 229 */ 230 struct page *fscrypt_encrypt_page(struct inode *inode, 231 struct page *plaintext_page, gfp_t gfp_flags) 232 { 233 struct fscrypt_ctx *ctx; 234 struct page *ciphertext_page = NULL; 235 int err; 236 237 BUG_ON(!PageLocked(plaintext_page)); 238 239 ctx = fscrypt_get_ctx(inode, gfp_flags); 240 if (IS_ERR(ctx)) 241 return (struct page *)ctx; 242 243 /* The encryption operation will require a bounce page. */ 244 ciphertext_page = alloc_bounce_page(ctx, gfp_flags); 245 if (IS_ERR(ciphertext_page)) 246 goto errout; 247 248 ctx->w.control_page = plaintext_page; 249 err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index, 250 plaintext_page, ciphertext_page, 251 gfp_flags); 252 if (err) { 253 ciphertext_page = ERR_PTR(err); 254 goto errout; 255 } 256 SetPagePrivate(ciphertext_page); 257 set_page_private(ciphertext_page, (unsigned long)ctx); 258 lock_page(ciphertext_page); 259 return ciphertext_page; 260 261 errout: 262 fscrypt_release_ctx(ctx); 263 return ciphertext_page; 264 } 265 EXPORT_SYMBOL(fscrypt_encrypt_page); 266 267 /** 268 * f2crypt_decrypt_page() - Decrypts a page in-place 269 * @page: The page to decrypt. Must be locked. 270 * 271 * Decrypts page in-place using the ctx encryption context. 272 * 273 * Called from the read completion callback. 274 * 275 * Return: Zero on success, non-zero otherwise. 276 */ 277 int fscrypt_decrypt_page(struct page *page) 278 { 279 BUG_ON(!PageLocked(page)); 280 281 return do_page_crypto(page->mapping->host, 282 FS_DECRYPT, page->index, page, page, GFP_NOFS); 283 } 284 EXPORT_SYMBOL(fscrypt_decrypt_page); 285 286 int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk, 287 sector_t pblk, unsigned int len) 288 { 289 struct fscrypt_ctx *ctx; 290 struct page *ciphertext_page = NULL; 291 struct bio *bio; 292 int ret, err = 0; 293 294 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); 295 296 ctx = fscrypt_get_ctx(inode, GFP_NOFS); 297 if (IS_ERR(ctx)) 298 return PTR_ERR(ctx); 299 300 ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT); 301 if (IS_ERR(ciphertext_page)) { 302 err = PTR_ERR(ciphertext_page); 303 goto errout; 304 } 305 306 while (len--) { 307 err = do_page_crypto(inode, FS_ENCRYPT, lblk, 308 ZERO_PAGE(0), ciphertext_page, 309 GFP_NOFS); 310 if (err) 311 goto errout; 312 313 bio = bio_alloc(GFP_NOWAIT, 1); 314 if (!bio) { 315 err = -ENOMEM; 316 goto errout; 317 } 318 bio->bi_bdev = inode->i_sb->s_bdev; 319 bio->bi_iter.bi_sector = 320 pblk << (inode->i_sb->s_blocksize_bits - 9); 321 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 322 ret = bio_add_page(bio, ciphertext_page, 323 inode->i_sb->s_blocksize, 0); 324 if (ret != inode->i_sb->s_blocksize) { 325 /* should never happen! */ 326 WARN_ON(1); 327 bio_put(bio); 328 err = -EIO; 329 goto errout; 330 } 331 err = submit_bio_wait(bio); 332 if ((err == 0) && bio->bi_error) 333 err = -EIO; 334 bio_put(bio); 335 if (err) 336 goto errout; 337 lblk++; 338 pblk++; 339 } 340 err = 0; 341 errout: 342 fscrypt_release_ctx(ctx); 343 return err; 344 } 345 EXPORT_SYMBOL(fscrypt_zeroout_range); 346 347 /* 348 * Validate dentries for encrypted directories to make sure we aren't 349 * potentially caching stale data after a key has been added or 350 * removed. 351 */ 352 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 353 { 354 struct dentry *dir; 355 struct fscrypt_info *ci; 356 int dir_has_key, cached_with_key; 357 358 if (flags & LOOKUP_RCU) 359 return -ECHILD; 360 361 dir = dget_parent(dentry); 362 if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) { 363 dput(dir); 364 return 0; 365 } 366 367 ci = d_inode(dir)->i_crypt_info; 368 if (ci && ci->ci_keyring_key && 369 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | 370 (1 << KEY_FLAG_REVOKED) | 371 (1 << KEY_FLAG_DEAD)))) 372 ci = NULL; 373 374 /* this should eventually be an flag in d_flags */ 375 spin_lock(&dentry->d_lock); 376 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; 377 spin_unlock(&dentry->d_lock); 378 dir_has_key = (ci != NULL); 379 dput(dir); 380 381 /* 382 * If the dentry was cached without the key, and it is a 383 * negative dentry, it might be a valid name. We can't check 384 * if the key has since been made available due to locking 385 * reasons, so we fail the validation so ext4_lookup() can do 386 * this check. 387 * 388 * We also fail the validation if the dentry was created with 389 * the key present, but we no longer have the key, or vice versa. 390 */ 391 if ((!cached_with_key && d_is_negative(dentry)) || 392 (!cached_with_key && dir_has_key) || 393 (cached_with_key && !dir_has_key)) 394 return 0; 395 return 1; 396 } 397 398 const struct dentry_operations fscrypt_d_ops = { 399 .d_revalidate = fscrypt_d_revalidate, 400 }; 401 EXPORT_SYMBOL(fscrypt_d_ops); 402 403 /* 404 * Call fscrypt_decrypt_page on every single page, reusing the encryption 405 * context. 406 */ 407 static void completion_pages(struct work_struct *work) 408 { 409 struct fscrypt_ctx *ctx = 410 container_of(work, struct fscrypt_ctx, r.work); 411 struct bio *bio = ctx->r.bio; 412 struct bio_vec *bv; 413 int i; 414 415 bio_for_each_segment_all(bv, bio, i) { 416 struct page *page = bv->bv_page; 417 int ret = fscrypt_decrypt_page(page); 418 419 if (ret) { 420 WARN_ON_ONCE(1); 421 SetPageError(page); 422 } else { 423 SetPageUptodate(page); 424 } 425 unlock_page(page); 426 } 427 fscrypt_release_ctx(ctx); 428 bio_put(bio); 429 } 430 431 void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio) 432 { 433 INIT_WORK(&ctx->r.work, completion_pages); 434 ctx->r.bio = bio; 435 queue_work(fscrypt_read_workqueue, &ctx->r.work); 436 } 437 EXPORT_SYMBOL(fscrypt_decrypt_bio_pages); 438 439 void fscrypt_pullback_bio_page(struct page **page, bool restore) 440 { 441 struct fscrypt_ctx *ctx; 442 struct page *bounce_page; 443 444 /* The bounce data pages are unmapped. */ 445 if ((*page)->mapping) 446 return; 447 448 /* The bounce data page is unmapped. */ 449 bounce_page = *page; 450 ctx = (struct fscrypt_ctx *)page_private(bounce_page); 451 452 /* restore control page */ 453 *page = ctx->w.control_page; 454 455 if (restore) 456 fscrypt_restore_control_page(bounce_page); 457 } 458 EXPORT_SYMBOL(fscrypt_pullback_bio_page); 459 460 void fscrypt_restore_control_page(struct page *page) 461 { 462 struct fscrypt_ctx *ctx; 463 464 ctx = (struct fscrypt_ctx *)page_private(page); 465 set_page_private(page, (unsigned long)NULL); 466 ClearPagePrivate(page); 467 unlock_page(page); 468 fscrypt_release_ctx(ctx); 469 } 470 EXPORT_SYMBOL(fscrypt_restore_control_page); 471 472 static void fscrypt_destroy(void) 473 { 474 struct fscrypt_ctx *pos, *n; 475 476 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) 477 kmem_cache_free(fscrypt_ctx_cachep, pos); 478 INIT_LIST_HEAD(&fscrypt_free_ctxs); 479 mempool_destroy(fscrypt_bounce_page_pool); 480 fscrypt_bounce_page_pool = NULL; 481 } 482 483 /** 484 * fscrypt_initialize() - allocate major buffers for fs encryption. 485 * 486 * We only call this when we start accessing encrypted files, since it 487 * results in memory getting allocated that wouldn't otherwise be used. 488 * 489 * Return: Zero on success, non-zero otherwise. 490 */ 491 int fscrypt_initialize(void) 492 { 493 int i, res = -ENOMEM; 494 495 if (fscrypt_bounce_page_pool) 496 return 0; 497 498 mutex_lock(&fscrypt_init_mutex); 499 if (fscrypt_bounce_page_pool) 500 goto already_initialized; 501 502 for (i = 0; i < num_prealloc_crypto_ctxs; i++) { 503 struct fscrypt_ctx *ctx; 504 505 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); 506 if (!ctx) 507 goto fail; 508 list_add(&ctx->free_list, &fscrypt_free_ctxs); 509 } 510 511 fscrypt_bounce_page_pool = 512 mempool_create_page_pool(num_prealloc_crypto_pages, 0); 513 if (!fscrypt_bounce_page_pool) 514 goto fail; 515 516 already_initialized: 517 mutex_unlock(&fscrypt_init_mutex); 518 return 0; 519 fail: 520 fscrypt_destroy(); 521 mutex_unlock(&fscrypt_init_mutex); 522 return res; 523 } 524 EXPORT_SYMBOL(fscrypt_initialize); 525 526 /** 527 * fscrypt_init() - Set up for fs encryption. 528 */ 529 static int __init fscrypt_init(void) 530 { 531 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", 532 WQ_HIGHPRI, 0); 533 if (!fscrypt_read_workqueue) 534 goto fail; 535 536 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT); 537 if (!fscrypt_ctx_cachep) 538 goto fail_free_queue; 539 540 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); 541 if (!fscrypt_info_cachep) 542 goto fail_free_ctx; 543 544 return 0; 545 546 fail_free_ctx: 547 kmem_cache_destroy(fscrypt_ctx_cachep); 548 fail_free_queue: 549 destroy_workqueue(fscrypt_read_workqueue); 550 fail: 551 return -ENOMEM; 552 } 553 module_init(fscrypt_init) 554 555 /** 556 * fscrypt_exit() - Shutdown the fs encryption system 557 */ 558 static void __exit fscrypt_exit(void) 559 { 560 fscrypt_destroy(); 561 562 if (fscrypt_read_workqueue) 563 destroy_workqueue(fscrypt_read_workqueue); 564 kmem_cache_destroy(fscrypt_ctx_cachep); 565 kmem_cache_destroy(fscrypt_info_cachep); 566 } 567 module_exit(fscrypt_exit); 568 569 MODULE_LICENSE("GPL"); 570