1 #ifndef _FS_CEPH_MDS_CLIENT_H 2 #define _FS_CEPH_MDS_CLIENT_H 3 4 #include <linux/completion.h> 5 #include <linux/kref.h> 6 #include <linux/list.h> 7 #include <linux/mutex.h> 8 #include <linux/rbtree.h> 9 #include <linux/spinlock.h> 10 11 #include <linux/ceph/types.h> 12 #include <linux/ceph/messenger.h> 13 #include <linux/ceph/mdsmap.h> 14 15 /* 16 * Some lock dependencies: 17 * 18 * session->s_mutex 19 * mdsc->mutex 20 * 21 * mdsc->snap_rwsem 22 * 23 * inode->i_lock 24 * mdsc->snap_flush_lock 25 * mdsc->cap_delay_lock 26 * 27 */ 28 29 struct ceph_fs_client; 30 struct ceph_cap; 31 32 /* 33 * parsed info about a single inode. pointers are into the encoded 34 * on-wire structures within the mds reply message payload. 35 */ 36 struct ceph_mds_reply_info_in { 37 struct ceph_mds_reply_inode *in; 38 struct ceph_dir_layout dir_layout; 39 u32 symlink_len; 40 char *symlink; 41 u32 xattr_len; 42 char *xattr_data; 43 }; 44 45 /* 46 * parsed info about an mds reply, including information about 47 * either: 1) the target inode and/or its parent directory and dentry, 48 * and directory contents (for readdir results), or 49 * 2) the file range lock info (for fcntl F_GETLK results). 50 */ 51 struct ceph_mds_reply_info_parsed { 52 struct ceph_mds_reply_head *head; 53 54 /* trace */ 55 struct ceph_mds_reply_info_in diri, targeti; 56 struct ceph_mds_reply_dirfrag *dirfrag; 57 char *dname; 58 u32 dname_len; 59 struct ceph_mds_reply_lease *dlease; 60 61 /* extra */ 62 union { 63 /* for fcntl F_GETLK results */ 64 struct ceph_filelock *filelock_reply; 65 66 /* for readdir results */ 67 struct { 68 struct ceph_mds_reply_dirfrag *dir_dir; 69 int dir_nr; 70 char **dir_dname; 71 u32 *dir_dname_len; 72 struct ceph_mds_reply_lease **dir_dlease; 73 struct ceph_mds_reply_info_in *dir_in; 74 u8 dir_complete, dir_end; 75 }; 76 }; 77 78 /* encoded blob describing snapshot contexts for certain 79 operations (e.g., open) */ 80 void *snapblob; 81 int snapblob_len; 82 }; 83 84 85 /* 86 * cap releases are batched and sent to the MDS en masse. 87 */ 88 #define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \ 89 sizeof(struct ceph_mds_cap_release)) / \ 90 sizeof(struct ceph_mds_cap_item)) 91 92 93 /* 94 * state associated with each MDS<->client session 95 */ 96 enum { 97 CEPH_MDS_SESSION_NEW = 1, 98 CEPH_MDS_SESSION_OPENING = 2, 99 CEPH_MDS_SESSION_OPEN = 3, 100 CEPH_MDS_SESSION_HUNG = 4, 101 CEPH_MDS_SESSION_CLOSING = 5, 102 CEPH_MDS_SESSION_RESTARTING = 6, 103 CEPH_MDS_SESSION_RECONNECTING = 7, 104 }; 105 106 struct ceph_mds_session { 107 struct ceph_mds_client *s_mdsc; 108 int s_mds; 109 int s_state; 110 unsigned long s_ttl; /* time until mds kills us */ 111 u64 s_seq; /* incoming msg seq # */ 112 struct mutex s_mutex; /* serialize session messages */ 113 114 struct ceph_connection s_con; 115 116 struct ceph_authorizer *s_authorizer; 117 void *s_authorizer_buf, *s_authorizer_reply_buf; 118 size_t s_authorizer_buf_len, s_authorizer_reply_buf_len; 119 120 /* protected by s_cap_lock */ 121 spinlock_t s_cap_lock; 122 u32 s_cap_gen; /* inc each time we get mds stale msg */ 123 unsigned long s_cap_ttl; /* when session caps expire */ 124 struct list_head s_caps; /* all caps issued by this session */ 125 int s_nr_caps, s_trim_caps; 126 int s_num_cap_releases; 127 struct list_head s_cap_releases; /* waiting cap_release messages */ 128 struct list_head s_cap_releases_done; /* ready to send */ 129 struct ceph_cap *s_cap_iterator; 130 131 /* protected by mutex */ 132 struct list_head s_cap_flushing; /* inodes w/ flushing caps */ 133 struct list_head s_cap_snaps_flushing; 134 unsigned long s_renew_requested; /* last time we sent a renew req */ 135 u64 s_renew_seq; 136 137 atomic_t s_ref; 138 struct list_head s_waiting; /* waiting requests */ 139 struct list_head s_unsafe; /* unsafe requests */ 140 }; 141 142 /* 143 * modes of choosing which MDS to send a request to 144 */ 145 enum { 146 USE_ANY_MDS, 147 USE_RANDOM_MDS, 148 USE_AUTH_MDS, /* prefer authoritative mds for this metadata item */ 149 }; 150 151 struct ceph_mds_request; 152 struct ceph_mds_client; 153 154 /* 155 * request completion callback 156 */ 157 typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc, 158 struct ceph_mds_request *req); 159 160 /* 161 * an in-flight mds request 162 */ 163 struct ceph_mds_request { 164 u64 r_tid; /* transaction id */ 165 struct rb_node r_node; 166 struct ceph_mds_client *r_mdsc; 167 168 int r_op; /* mds op code */ 169 170 /* operation on what? */ 171 struct inode *r_inode; /* arg1 */ 172 struct dentry *r_dentry; /* arg1 */ 173 struct dentry *r_old_dentry; /* arg2: rename from or link from */ 174 struct inode *r_old_dentry_dir; /* arg2: old dentry's parent dir */ 175 char *r_path1, *r_path2; 176 struct ceph_vino r_ino1, r_ino2; 177 178 struct inode *r_locked_dir; /* dir (if any) i_mutex locked by vfs */ 179 struct inode *r_target_inode; /* resulting inode */ 180 181 struct mutex r_fill_mutex; 182 183 union ceph_mds_request_args r_args; 184 int r_fmode; /* file mode, if expecting cap */ 185 uid_t r_uid; 186 gid_t r_gid; 187 188 /* for choosing which mds to send this request to */ 189 int r_direct_mode; 190 u32 r_direct_hash; /* choose dir frag based on this dentry hash */ 191 bool r_direct_is_hash; /* true if r_direct_hash is valid */ 192 193 /* data payload is used for xattr ops */ 194 struct page **r_pages; 195 int r_num_pages; 196 int r_data_len; 197 198 /* what caps shall we drop? */ 199 int r_inode_drop, r_inode_unless; 200 int r_dentry_drop, r_dentry_unless; 201 int r_old_dentry_drop, r_old_dentry_unless; 202 struct inode *r_old_inode; 203 int r_old_inode_drop, r_old_inode_unless; 204 205 struct ceph_msg *r_request; /* original request */ 206 int r_request_release_offset; 207 struct ceph_msg *r_reply; 208 struct ceph_mds_reply_info_parsed r_reply_info; 209 int r_err; 210 bool r_aborted; 211 212 unsigned long r_timeout; /* optional. jiffies */ 213 unsigned long r_started; /* start time to measure timeout against */ 214 unsigned long r_request_started; /* start time for mds request only, 215 used to measure lease durations */ 216 217 /* link unsafe requests to parent directory, for fsync */ 218 struct inode *r_unsafe_dir; 219 struct list_head r_unsafe_dir_item; 220 221 struct ceph_mds_session *r_session; 222 223 int r_attempts; /* resend attempts */ 224 int r_num_fwd; /* number of forward attempts */ 225 int r_resend_mds; /* mds to resend to next, if any*/ 226 u32 r_sent_on_mseq; /* cap mseq request was sent at*/ 227 228 struct kref r_kref; 229 struct list_head r_wait; 230 struct completion r_completion; 231 struct completion r_safe_completion; 232 ceph_mds_request_callback_t r_callback; 233 struct list_head r_unsafe_item; /* per-session unsafe list item */ 234 bool r_got_unsafe, r_got_safe, r_got_result; 235 236 bool r_did_prepopulate; 237 u32 r_readdir_offset; 238 239 struct ceph_cap_reservation r_caps_reservation; 240 int r_num_caps; 241 }; 242 243 /* 244 * mds client state 245 */ 246 struct ceph_mds_client { 247 struct ceph_fs_client *fsc; 248 struct mutex mutex; /* all nested structures */ 249 250 struct ceph_mdsmap *mdsmap; 251 struct completion safe_umount_waiters; 252 wait_queue_head_t session_close_wq; 253 struct list_head waiting_for_map; 254 255 struct ceph_mds_session **sessions; /* NULL for mds if no session */ 256 int max_sessions; /* len of s_mds_sessions */ 257 int stopping; /* true if shutting down */ 258 259 /* 260 * snap_rwsem will cover cap linkage into snaprealms, and 261 * realm snap contexts. (later, we can do per-realm snap 262 * contexts locks..) the empty list contains realms with no 263 * references (implying they contain no inodes with caps) that 264 * should be destroyed. 265 */ 266 struct rw_semaphore snap_rwsem; 267 struct rb_root snap_realms; 268 struct list_head snap_empty; 269 spinlock_t snap_empty_lock; /* protect snap_empty */ 270 271 u64 last_tid; /* most recent mds request */ 272 struct rb_root request_tree; /* pending mds requests */ 273 struct delayed_work delayed_work; /* delayed work */ 274 unsigned long last_renew_caps; /* last time we renewed our caps */ 275 struct list_head cap_delay_list; /* caps with delayed release */ 276 spinlock_t cap_delay_lock; /* protects cap_delay_list */ 277 struct list_head snap_flush_list; /* cap_snaps ready to flush */ 278 spinlock_t snap_flush_lock; 279 280 u64 cap_flush_seq; 281 struct list_head cap_dirty; /* inodes with dirty caps */ 282 struct list_head cap_dirty_migrating; /* ...that are migration... */ 283 int num_cap_flushing; /* # caps we are flushing */ 284 spinlock_t cap_dirty_lock; /* protects above items */ 285 wait_queue_head_t cap_flushing_wq; 286 287 /* 288 * Cap reservations 289 * 290 * Maintain a global pool of preallocated struct ceph_caps, referenced 291 * by struct ceph_caps_reservations. This ensures that we preallocate 292 * memory needed to successfully process an MDS response. (If an MDS 293 * sends us cap information and we fail to process it, we will have 294 * problems due to the client and MDS being out of sync.) 295 * 296 * Reservations are 'owned' by a ceph_cap_reservation context. 297 */ 298 spinlock_t caps_list_lock; 299 struct list_head caps_list; /* unused (reserved or 300 unreserved) */ 301 int caps_total_count; /* total caps allocated */ 302 int caps_use_count; /* in use */ 303 int caps_reserve_count; /* unused, reserved */ 304 int caps_avail_count; /* unused, unreserved */ 305 int caps_min_count; /* keep at least this many 306 (unreserved) */ 307 spinlock_t dentry_lru_lock; 308 struct list_head dentry_lru; 309 int num_dentry; 310 }; 311 312 extern const char *ceph_mds_op_name(int op); 313 314 extern struct ceph_mds_session * 315 __ceph_lookup_mds_session(struct ceph_mds_client *, int mds); 316 317 static inline struct ceph_mds_session * 318 ceph_get_mds_session(struct ceph_mds_session *s) 319 { 320 atomic_inc(&s->s_ref); 321 return s; 322 } 323 324 extern void ceph_put_mds_session(struct ceph_mds_session *s); 325 326 extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc, 327 struct ceph_msg *msg, int mds); 328 329 extern int ceph_mdsc_init(struct ceph_fs_client *fsc); 330 extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc); 331 extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc); 332 333 extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc); 334 335 extern void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, 336 struct inode *inode, 337 struct dentry *dn); 338 339 extern void ceph_invalidate_dir_request(struct ceph_mds_request *req); 340 341 extern struct ceph_mds_request * 342 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode); 343 extern void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, 344 struct ceph_mds_request *req); 345 extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, 346 struct inode *dir, 347 struct ceph_mds_request *req); 348 static inline void ceph_mdsc_get_request(struct ceph_mds_request *req) 349 { 350 kref_get(&req->r_kref); 351 } 352 extern void ceph_mdsc_release_request(struct kref *kref); 353 static inline void ceph_mdsc_put_request(struct ceph_mds_request *req) 354 { 355 kref_put(&req->r_kref, ceph_mdsc_release_request); 356 } 357 358 extern int ceph_add_cap_releases(struct ceph_mds_client *mdsc, 359 struct ceph_mds_session *session); 360 extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc, 361 struct ceph_mds_session *session); 362 363 extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc); 364 365 extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, 366 int stop_on_nosnap); 367 368 extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry); 369 extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, 370 struct inode *inode, 371 struct dentry *dentry, char action, 372 u32 seq); 373 374 extern void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, 375 struct ceph_msg *msg); 376 377 extern void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, 378 struct ceph_mds_session *session); 379 380 #endif 381