1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* General netfs cache on cache files internal defs 3 * 4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #ifdef pr_fmt 9 #undef pr_fmt 10 #endif 11 12 #define pr_fmt(fmt) "CacheFiles: " fmt 13 14 15 #include <linux/fscache-cache.h> 16 #include <linux/cred.h> 17 #include <linux/security.h> 18 #include <linux/xarray.h> 19 #include <linux/cachefiles.h> 20 21 #define CACHEFILES_DIO_BLOCK_SIZE 4096 22 23 struct cachefiles_cache; 24 struct cachefiles_object; 25 26 enum cachefiles_content { 27 /* These values are saved on disk */ 28 CACHEFILES_CONTENT_NO_DATA = 0, /* No content stored */ 29 CACHEFILES_CONTENT_SINGLE = 1, /* Content is monolithic, all is present */ 30 CACHEFILES_CONTENT_ALL = 2, /* Content is all present, no map */ 31 CACHEFILES_CONTENT_BACKFS_MAP = 3, /* Content is piecemeal, mapped through backing fs */ 32 CACHEFILES_CONTENT_DIRTY = 4, /* Content is dirty (only seen on disk) */ 33 nr__cachefiles_content 34 }; 35 36 /* 37 * Cached volume representation. 38 */ 39 struct cachefiles_volume { 40 struct cachefiles_cache *cache; 41 struct list_head cache_link; /* Link in cache->volumes */ 42 struct fscache_volume *vcookie; /* The netfs's representation */ 43 struct dentry *dentry; /* The volume dentry */ 44 struct dentry *fanout[256]; /* Fanout subdirs */ 45 }; 46 47 enum cachefiles_object_state { 48 CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */ 49 CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */ 50 CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */ 51 }; 52 53 struct cachefiles_ondemand_info { 54 struct work_struct ondemand_work; 55 int ondemand_id; 56 enum cachefiles_object_state state; 57 struct cachefiles_object *object; 58 spinlock_t lock; 59 }; 60 61 /* 62 * Backing file state. 63 */ 64 struct cachefiles_object { 65 struct fscache_cookie *cookie; /* Netfs data storage object cookie */ 66 struct cachefiles_volume *volume; /* Cache volume that holds this object */ 67 struct list_head cache_link; /* Link in cache->*_list */ 68 struct file *file; /* The file representing this object */ 69 char *d_name; /* Backing file name */ 70 int debug_id; 71 spinlock_t lock; 72 refcount_t ref; 73 u8 d_name_len; /* Length of filename */ 74 enum cachefiles_content content_info:8; /* Info about content presence */ 75 unsigned long flags; 76 #define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */ 77 #ifdef CONFIG_CACHEFILES_ONDEMAND 78 struct cachefiles_ondemand_info *ondemand; 79 #endif 80 }; 81 82 #define CACHEFILES_ONDEMAND_ID_CLOSED -1 83 84 /* 85 * Cache files cache definition 86 */ 87 struct cachefiles_cache { 88 struct fscache_cache *cache; /* Cache cookie */ 89 struct vfsmount *mnt; /* mountpoint holding the cache */ 90 struct dentry *store; /* Directory into which live objects go */ 91 struct dentry *graveyard; /* directory into which dead objects go */ 92 struct file *cachefilesd; /* manager daemon handle */ 93 struct list_head volumes; /* List of volume objects */ 94 struct list_head object_list; /* List of active objects */ 95 spinlock_t object_list_lock; /* Lock for volumes and object_list */ 96 const struct cred *cache_cred; /* security override for accessing cache */ 97 struct mutex daemon_mutex; /* command serialisation mutex */ 98 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */ 99 atomic_t gravecounter; /* graveyard uniquifier */ 100 atomic_t f_released; /* number of objects released lately */ 101 atomic_long_t b_released; /* number of blocks released lately */ 102 atomic_long_t b_writing; /* Number of blocks being written */ 103 unsigned frun_percent; /* when to stop culling (% files) */ 104 unsigned fcull_percent; /* when to start culling (% files) */ 105 unsigned fstop_percent; /* when to stop allocating (% files) */ 106 unsigned brun_percent; /* when to stop culling (% blocks) */ 107 unsigned bcull_percent; /* when to start culling (% blocks) */ 108 unsigned bstop_percent; /* when to stop allocating (% blocks) */ 109 unsigned bsize; /* cache's block size */ 110 unsigned bshift; /* ilog2(bsize) */ 111 uint64_t frun; /* when to stop culling */ 112 uint64_t fcull; /* when to start culling */ 113 uint64_t fstop; /* when to stop allocating */ 114 sector_t brun; /* when to stop culling */ 115 sector_t bcull; /* when to start culling */ 116 sector_t bstop; /* when to stop allocating */ 117 unsigned long flags; 118 #define CACHEFILES_READY 0 /* T if cache prepared */ 119 #define CACHEFILES_DEAD 1 /* T if cache dead */ 120 #define CACHEFILES_CULLING 2 /* T if cull engaged */ 121 #define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */ 122 #define CACHEFILES_ONDEMAND_MODE 4 /* T if in on-demand read mode */ 123 char *rootdirname; /* name of cache root directory */ 124 char *secctx; /* LSM security context */ 125 char *tag; /* cache binding tag */ 126 refcount_t unbind_pincount;/* refcount to do daemon unbind */ 127 struct xarray reqs; /* xarray of pending on-demand requests */ 128 unsigned long req_id_next; 129 struct xarray ondemand_ids; /* xarray for ondemand_id allocation */ 130 u32 ondemand_id_next; 131 }; 132 133 static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache) 134 { 135 return IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND) && 136 test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags); 137 } 138 139 struct cachefiles_req { 140 struct cachefiles_object *object; 141 struct completion done; 142 refcount_t ref; 143 int error; 144 struct cachefiles_msg msg; 145 }; 146 147 #define CACHEFILES_REQ_NEW XA_MARK_1 148 149 #include <trace/events/cachefiles.h> 150 151 static inline 152 struct file *cachefiles_cres_file(struct netfs_cache_resources *cres) 153 { 154 return cres->cache_priv2; 155 } 156 157 static inline 158 struct cachefiles_object *cachefiles_cres_object(struct netfs_cache_resources *cres) 159 { 160 return fscache_cres_cookie(cres)->cache_priv; 161 } 162 163 /* 164 * note change of state for daemon 165 */ 166 static inline void cachefiles_state_changed(struct cachefiles_cache *cache) 167 { 168 set_bit(CACHEFILES_STATE_CHANGED, &cache->flags); 169 wake_up_all(&cache->daemon_pollwq); 170 } 171 172 /* 173 * cache.c 174 */ 175 extern int cachefiles_add_cache(struct cachefiles_cache *cache); 176 extern void cachefiles_withdraw_cache(struct cachefiles_cache *cache); 177 178 enum cachefiles_has_space_for { 179 cachefiles_has_space_check, 180 cachefiles_has_space_for_write, 181 cachefiles_has_space_for_create, 182 }; 183 extern int cachefiles_has_space(struct cachefiles_cache *cache, 184 unsigned fnr, unsigned bnr, 185 enum cachefiles_has_space_for reason); 186 187 /* 188 * daemon.c 189 */ 190 extern const struct file_operations cachefiles_daemon_fops; 191 extern void cachefiles_flush_reqs(struct cachefiles_cache *cache); 192 extern void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache); 193 extern void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache); 194 195 /* 196 * error_inject.c 197 */ 198 #ifdef CONFIG_CACHEFILES_ERROR_INJECTION 199 extern unsigned int cachefiles_error_injection_state; 200 extern int cachefiles_register_error_injection(void); 201 extern void cachefiles_unregister_error_injection(void); 202 203 #else 204 #define cachefiles_error_injection_state 0 205 206 static inline int cachefiles_register_error_injection(void) 207 { 208 return 0; 209 } 210 211 static inline void cachefiles_unregister_error_injection(void) 212 { 213 } 214 #endif 215 216 217 static inline int cachefiles_inject_read_error(void) 218 { 219 return cachefiles_error_injection_state & 2 ? -EIO : 0; 220 } 221 222 static inline int cachefiles_inject_write_error(void) 223 { 224 return cachefiles_error_injection_state & 2 ? -EIO : 225 cachefiles_error_injection_state & 1 ? -ENOSPC : 226 0; 227 } 228 229 static inline int cachefiles_inject_remove_error(void) 230 { 231 return cachefiles_error_injection_state & 2 ? -EIO : 0; 232 } 233 234 /* 235 * interface.c 236 */ 237 extern const struct fscache_cache_ops cachefiles_cache_ops; 238 extern void cachefiles_see_object(struct cachefiles_object *object, 239 enum cachefiles_obj_ref_trace why); 240 extern struct cachefiles_object *cachefiles_grab_object(struct cachefiles_object *object, 241 enum cachefiles_obj_ref_trace why); 242 extern void cachefiles_put_object(struct cachefiles_object *object, 243 enum cachefiles_obj_ref_trace why); 244 245 /* 246 * io.c 247 */ 248 extern bool cachefiles_begin_operation(struct netfs_cache_resources *cres, 249 enum fscache_want_state want_state); 250 extern int __cachefiles_prepare_write(struct cachefiles_object *object, 251 struct file *file, 252 loff_t *_start, size_t *_len, size_t upper_len, 253 bool no_space_allocated_yet); 254 extern int __cachefiles_write(struct cachefiles_object *object, 255 struct file *file, 256 loff_t start_pos, 257 struct iov_iter *iter, 258 netfs_io_terminated_t term_func, 259 void *term_func_priv); 260 261 /* 262 * key.c 263 */ 264 extern bool cachefiles_cook_key(struct cachefiles_object *object); 265 266 /* 267 * main.c 268 */ 269 extern struct kmem_cache *cachefiles_object_jar; 270 271 /* 272 * namei.c 273 */ 274 extern void cachefiles_unmark_inode_in_use(struct cachefiles_object *object, 275 struct file *file); 276 extern int cachefiles_bury_object(struct cachefiles_cache *cache, 277 struct cachefiles_object *object, 278 struct dentry *dir, 279 struct dentry *rep, 280 enum fscache_why_object_killed why); 281 extern int cachefiles_delete_object(struct cachefiles_object *object, 282 enum fscache_why_object_killed why); 283 extern bool cachefiles_look_up_object(struct cachefiles_object *object); 284 extern struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, 285 struct dentry *dir, 286 const char *name, 287 bool *_is_new); 288 extern void cachefiles_put_directory(struct dentry *dir); 289 290 extern int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, 291 char *filename); 292 293 extern int cachefiles_check_in_use(struct cachefiles_cache *cache, 294 struct dentry *dir, char *filename); 295 extern struct file *cachefiles_create_tmpfile(struct cachefiles_object *object); 296 extern bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache, 297 struct cachefiles_object *object); 298 299 /* 300 * ondemand.c 301 */ 302 #ifdef CONFIG_CACHEFILES_ONDEMAND 303 extern ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, 304 char __user *_buffer, size_t buflen); 305 306 extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache, 307 char *args); 308 309 extern int cachefiles_ondemand_restore(struct cachefiles_cache *cache, 310 char *args); 311 312 extern int cachefiles_ondemand_init_object(struct cachefiles_object *object); 313 extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); 314 315 extern int cachefiles_ondemand_read(struct cachefiles_object *object, 316 loff_t pos, size_t len); 317 318 extern int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj, 319 struct cachefiles_volume *volume); 320 extern void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj); 321 322 #define CACHEFILES_OBJECT_STATE_FUNCS(_state, _STATE) \ 323 static inline bool \ 324 cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \ 325 { \ 326 return object->ondemand->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ 327 } \ 328 \ 329 static inline void \ 330 cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ 331 { \ 332 object->ondemand->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ 333 } 334 335 CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); 336 CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE); 337 CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING); 338 339 static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) 340 { 341 return cachefiles_ondemand_object_is_reopening(req->object) && 342 req->msg.opcode == CACHEFILES_OP_READ; 343 } 344 345 #else 346 static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, 347 char __user *_buffer, size_t buflen) 348 { 349 return -EOPNOTSUPP; 350 } 351 352 static inline int cachefiles_ondemand_init_object(struct cachefiles_object *object) 353 { 354 return 0; 355 } 356 357 static inline void cachefiles_ondemand_clean_object(struct cachefiles_object *object) 358 { 359 } 360 361 static inline int cachefiles_ondemand_read(struct cachefiles_object *object, 362 loff_t pos, size_t len) 363 { 364 return -EOPNOTSUPP; 365 } 366 367 static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj, 368 struct cachefiles_volume *volume) 369 { 370 return 0; 371 } 372 static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj) 373 { 374 } 375 376 static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) 377 { 378 return false; 379 } 380 #endif 381 382 /* 383 * security.c 384 */ 385 extern int cachefiles_get_security_ID(struct cachefiles_cache *cache); 386 extern int cachefiles_determine_cache_security(struct cachefiles_cache *cache, 387 struct dentry *root, 388 const struct cred **_saved_cred); 389 390 static inline void cachefiles_begin_secure(struct cachefiles_cache *cache, 391 const struct cred **_saved_cred) 392 { 393 *_saved_cred = override_creds(cache->cache_cred); 394 } 395 396 static inline void cachefiles_end_secure(struct cachefiles_cache *cache, 397 const struct cred *saved_cred) 398 { 399 revert_creds(saved_cred); 400 } 401 402 /* 403 * volume.c 404 */ 405 void cachefiles_acquire_volume(struct fscache_volume *volume); 406 void cachefiles_free_volume(struct fscache_volume *volume); 407 void cachefiles_withdraw_volume(struct cachefiles_volume *volume); 408 409 /* 410 * xattr.c 411 */ 412 extern int cachefiles_set_object_xattr(struct cachefiles_object *object); 413 extern int cachefiles_check_auxdata(struct cachefiles_object *object, 414 struct file *file); 415 extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, 416 struct cachefiles_object *object, 417 struct dentry *dentry); 418 extern void cachefiles_prepare_to_write(struct fscache_cookie *cookie); 419 extern bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume); 420 extern int cachefiles_check_volume_xattr(struct cachefiles_volume *volume); 421 422 /* 423 * Error handling 424 */ 425 #define cachefiles_io_error(___cache, FMT, ...) \ 426 do { \ 427 pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \ 428 fscache_io_error((___cache)->cache); \ 429 set_bit(CACHEFILES_DEAD, &(___cache)->flags); \ 430 if (cachefiles_in_ondemand_mode(___cache)) \ 431 cachefiles_flush_reqs(___cache); \ 432 } while (0) 433 434 #define cachefiles_io_error_obj(object, FMT, ...) \ 435 do { \ 436 struct cachefiles_cache *___cache; \ 437 \ 438 ___cache = (object)->volume->cache; \ 439 cachefiles_io_error(___cache, FMT " [o=%08x]", ##__VA_ARGS__, \ 440 (object)->debug_id); \ 441 } while (0) 442 443 444 /* 445 * Debug tracing 446 */ 447 extern unsigned cachefiles_debug; 448 #define CACHEFILES_DEBUG_KENTER 1 449 #define CACHEFILES_DEBUG_KLEAVE 2 450 #define CACHEFILES_DEBUG_KDEBUG 4 451 452 #define dbgprintk(FMT, ...) \ 453 printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) 454 455 #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) 456 #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 457 #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__) 458 459 460 #if defined(__KDEBUG) 461 #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__) 462 #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__) 463 #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__) 464 465 #elif defined(CONFIG_CACHEFILES_DEBUG) 466 #define _enter(FMT, ...) \ 467 do { \ 468 if (cachefiles_debug & CACHEFILES_DEBUG_KENTER) \ 469 kenter(FMT, ##__VA_ARGS__); \ 470 } while (0) 471 472 #define _leave(FMT, ...) \ 473 do { \ 474 if (cachefiles_debug & CACHEFILES_DEBUG_KLEAVE) \ 475 kleave(FMT, ##__VA_ARGS__); \ 476 } while (0) 477 478 #define _debug(FMT, ...) \ 479 do { \ 480 if (cachefiles_debug & CACHEFILES_DEBUG_KDEBUG) \ 481 kdebug(FMT, ##__VA_ARGS__); \ 482 } while (0) 483 484 #else 485 #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__) 486 #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__) 487 #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__) 488 #endif 489 490 #if 1 /* defined(__KDEBUGALL) */ 491 492 #define ASSERT(X) \ 493 do { \ 494 if (unlikely(!(X))) { \ 495 pr_err("\n"); \ 496 pr_err("Assertion failed\n"); \ 497 BUG(); \ 498 } \ 499 } while (0) 500 501 #define ASSERTCMP(X, OP, Y) \ 502 do { \ 503 if (unlikely(!((X) OP (Y)))) { \ 504 pr_err("\n"); \ 505 pr_err("Assertion failed\n"); \ 506 pr_err("%lx " #OP " %lx is false\n", \ 507 (unsigned long)(X), (unsigned long)(Y)); \ 508 BUG(); \ 509 } \ 510 } while (0) 511 512 #define ASSERTIF(C, X) \ 513 do { \ 514 if (unlikely((C) && !(X))) { \ 515 pr_err("\n"); \ 516 pr_err("Assertion failed\n"); \ 517 BUG(); \ 518 } \ 519 } while (0) 520 521 #define ASSERTIFCMP(C, X, OP, Y) \ 522 do { \ 523 if (unlikely((C) && !((X) OP (Y)))) { \ 524 pr_err("\n"); \ 525 pr_err("Assertion failed\n"); \ 526 pr_err("%lx " #OP " %lx is false\n", \ 527 (unsigned long)(X), (unsigned long)(Y)); \ 528 BUG(); \ 529 } \ 530 } while (0) 531 532 #else 533 534 #define ASSERT(X) do {} while (0) 535 #define ASSERTCMP(X, OP, Y) do {} while (0) 536 #define ASSERTIF(C, X) do {} while (0) 537 #define ASSERTIFCMP(C, X, OP, Y) do {} while (0) 538 539 #endif 540