1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2014 Nexenta Systems, Inc. All rights reserved. 14 */ 15 16 #include <sys/types.h> 17 #include <sys/param.h> 18 #include <sys/systm.h> 19 #include <sys/cmn_err.h> 20 #include <sys/cred.h> 21 #include <sys/debug.h> 22 #include <sys/errno.h> 23 #include <sys/t_lock.h> 24 #include <sys/user.h> 25 #include <sys/uio.h> 26 #include <sys/file.h> 27 #include <sys/pathname.h> 28 #include <sys/sysmacros.h> 29 #include <sys/vfs.h> 30 #include <sys/vnode.h> 31 #include <sys/avl.h> 32 #include <sys/stat.h> 33 #include <sys/mode.h> 34 35 #include <fcntl.h> 36 #include <unistd.h> 37 38 #include "vncache.h" 39 40 kmem_cache_t *vn_cache; 41 42 /* 43 * You can dump this AVL tree with mdb, i.e. 44 * vncache_avl ::walk avl |::print -s1 vnode_t 45 */ 46 avl_tree_t vncache_avl; 47 kmutex_t vncache_lock; 48 49 /* 50 * Vnode cache. 51 */ 52 53 /* ARGSUSED */ 54 static int 55 vn_cache_constructor(void *buf, void *cdrarg, int kmflags) 56 { 57 struct vnode *vp; 58 59 vp = buf; 60 bzero(vp, sizeof (*vp)); 61 62 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL); 63 vp->v_fd = -1; 64 65 return (0); 66 } 67 68 /* ARGSUSED */ 69 static void 70 vn_cache_destructor(void *buf, void *cdrarg) 71 { 72 struct vnode *vp; 73 74 vp = buf; 75 76 mutex_destroy(&vp->v_lock); 77 } 78 79 /* 80 * Used by file systems when fs-specific nodes (e.g., ufs inodes) are 81 * cached by the file system and vnodes remain associated. 82 */ 83 void 84 vn_recycle(vnode_t *vp) 85 { 86 87 ASSERT(vp->v_fd == -1); 88 89 vp->v_rdcnt = 0; 90 vp->v_wrcnt = 0; 91 92 if (vp->v_path) { 93 strfree(vp->v_path); 94 vp->v_path = NULL; 95 } 96 } 97 98 99 /* 100 * Used to reset the vnode fields including those that are directly accessible 101 * as well as those which require an accessor function. 102 * 103 * Does not initialize: 104 * synchronization objects: v_lock, v_vsd_lock, v_nbllock, v_cv 105 * v_data (since FS-nodes and vnodes point to each other and should 106 * be updated simultaneously) 107 * v_op (in case someone needs to make a VOP call on this object) 108 */ 109 void 110 vn_reinit(vnode_t *vp) 111 { 112 vp->v_count = 1; 113 vp->v_vfsp = NULL; 114 vp->v_stream = NULL; 115 vp->v_flag = 0; 116 vp->v_type = VNON; 117 vp->v_rdev = NODEV; 118 119 vn_recycle(vp); 120 } 121 122 vnode_t * 123 vn_alloc(int kmflag) 124 { 125 vnode_t *vp; 126 127 vp = kmem_cache_alloc(vn_cache, kmflag); 128 129 if (vp != NULL) { 130 vn_reinit(vp); 131 } 132 133 return (vp); 134 } 135 136 void 137 vn_free(vnode_t *vp) 138 { 139 140 /* 141 * Some file systems call vn_free() with v_count of zero, 142 * some with v_count of 1. In any case, the value should 143 * never be anything else. 144 */ 145 ASSERT((vp->v_count == 0) || (vp->v_count == 1)); 146 if (vp->v_path != NULL) { 147 strfree(vp->v_path); 148 vp->v_path = NULL; 149 } 150 ASSERT(vp->v_fd != -1); 151 (void) close(vp->v_fd); 152 vp->v_fd = -1; 153 154 kmem_cache_free(vn_cache, vp); 155 } 156 157 int 158 vncache_cmp(const void *v1, const void *v2) 159 { 160 const vnode_t *vp1, *vp2; 161 162 vp1 = v1; 163 vp2 = v2; 164 165 if (vp1->v_st_dev < vp2->v_st_dev) 166 return (-1); 167 if (vp1->v_st_dev > vp2->v_st_dev) 168 return (+1); 169 if (vp1->v_st_ino < vp2->v_st_ino) 170 return (-1); 171 if (vp1->v_st_ino > vp2->v_st_ino) 172 return (+1); 173 174 return (0); 175 } 176 177 vnode_t * 178 vncache_lookup(struct stat *st) 179 { 180 vnode_t tmp_vn; 181 vnode_t *vp; 182 183 tmp_vn.v_st_dev = st->st_dev; 184 tmp_vn.v_st_ino = st->st_ino; 185 186 mutex_enter(&vncache_lock); 187 vp = avl_find(&vncache_avl, &tmp_vn, NULL); 188 if (vp != NULL) 189 vn_hold(vp); 190 mutex_exit(&vncache_lock); 191 192 return (vp); 193 } 194 195 vnode_t * 196 vncache_enter(struct stat *st, vnode_t *dvp, char *name, int fd) 197 { 198 vnode_t *old_vp; 199 vnode_t *new_vp; 200 vfs_t *vfs; 201 char *vpath; 202 avl_index_t where; 203 int len; 204 205 /* 206 * Fill in v_path 207 * Note: fsop_root() calls with dvp=NULL 208 */ 209 len = strlen(name) + 1; 210 if (dvp == NULL) { 211 vpath = kmem_alloc(len, KM_SLEEP); 212 (void) strlcpy(vpath, name, len); 213 vfs = rootvfs; 214 } else { 215 /* add to length for parent path + "/" */ 216 len += (strlen(dvp->v_path) + 1); 217 vpath = kmem_alloc(len, KM_SLEEP); 218 (void) snprintf(vpath, len, "%s/%s", dvp->v_path, name); 219 vfs = dvp->v_vfsp; 220 } 221 222 new_vp = vn_alloc(KM_SLEEP); 223 new_vp->v_path = vpath; 224 new_vp->v_fd = fd; 225 new_vp->v_st_dev = st->st_dev; 226 new_vp->v_st_ino = st->st_ino; 227 new_vp->v_vfsp = vfs; 228 new_vp->v_type = IFTOVT(st->st_mode); 229 230 mutex_enter(&vncache_lock); 231 old_vp = avl_find(&vncache_avl, new_vp, &where); 232 if (old_vp != NULL) 233 vn_hold(old_vp); 234 else 235 avl_insert(&vncache_avl, new_vp, where); 236 mutex_exit(&vncache_lock); 237 238 /* If we lost the race, free new_vp */ 239 if (old_vp != NULL) { 240 vn_free(new_vp); 241 return (old_vp); 242 } 243 244 return (new_vp); 245 } 246 247 /* 248 * Called after a successful rename to update v_path 249 */ 250 void 251 vncache_renamed(vnode_t *vp, vnode_t *to_dvp, char *to_name) 252 { 253 char *vpath; 254 char *ovpath; 255 int len; 256 257 len = strlen(to_name) + 1; 258 /* add to length for parent path + "/" */ 259 len += (strlen(to_dvp->v_path) + 1); 260 vpath = kmem_alloc(len, KM_SLEEP); 261 (void) snprintf(vpath, len, "%s/%s", to_dvp->v_path, to_name); 262 263 mutex_enter(&vncache_lock); 264 ovpath = vp->v_path; 265 vp->v_path = vpath; 266 mutex_exit(&vncache_lock); 267 268 strfree(ovpath); 269 } 270 271 /* 272 * Last reference to this vnode is (possibly) going away. 273 * This is normally called by vn_rele() when v_count==1. 274 * Note that due to lock order concerns, we have to take 275 * the vncache_lock (for the avl tree) and then recheck 276 * v_count, which might have gained a ref during the time 277 * we did not hold vp->v_lock. 278 */ 279 void 280 vncache_inactive(vnode_t *vp) 281 { 282 uint_t count; 283 284 mutex_enter(&vncache_lock); 285 mutex_enter(&vp->v_lock); 286 287 if ((count = vp->v_count) <= 1) { 288 /* This is (still) the last ref. */ 289 avl_remove(&vncache_avl, vp); 290 } 291 292 mutex_exit(&vp->v_lock); 293 mutex_exit(&vncache_lock); 294 295 if (count <= 1) { 296 vn_free(vp); 297 } 298 } 299 300 #pragma init(vncache_init) 301 int 302 vncache_init(void) 303 { 304 vn_cache = kmem_cache_create("vn_cache", sizeof (struct vnode), 305 VNODE_ALIGN, vn_cache_constructor, vn_cache_destructor, NULL, NULL, 306 NULL, 0); 307 avl_create(&vncache_avl, 308 vncache_cmp, 309 sizeof (vnode_t), 310 offsetof(vnode_t, v_avl_node)); 311 mutex_init(&vncache_lock, NULL, MUTEX_DEFAULT, NULL); 312 return (0); 313 } 314 315 #pragma fini(vncache_fini) 316 void 317 vncache_fini(void) 318 { 319 mutex_destroy(&vncache_lock); 320 avl_destroy(&vncache_avl); 321 kmem_cache_destroy(vn_cache); 322 } 323