Lines Matching +full:oc +full:- +full:shutdown +full:- +full:all
3 * All rights reserved.
68 /* all fields zero */
107 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
141 rc = -ENOMEM; in nfsd4_create_laundry_wq()
152 return ses->se_dead; in is_session_dead()
157 if (atomic_read(&ses->se_ref) > ref_held_by_me) in mark_session_dead_locked()
159 ses->se_dead = true; in mark_session_dead_locked()
165 return clp->cl_time == 0; in is_client_expired()
171 if (clp->cl_state != NFSD4_ACTIVE) in nfsd4_dec_courtesy_client_count()
172 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0); in nfsd4_dec_courtesy_client_count()
177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in get_client_locked()
179 lockdep_assert_held(&nn->client_lock); in get_client_locked()
183 atomic_inc(&clp->cl_rpc_users); in get_client_locked()
185 clp->cl_state = NFSD4_ACTIVE; in get_client_locked()
193 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in renew_client_locked()
199 clp->cl_clientid.cl_boot, in renew_client_locked()
200 clp->cl_clientid.cl_id); in renew_client_locked()
204 list_move_tail(&clp->cl_lru, &nn->client_lru); in renew_client_locked()
205 clp->cl_time = ktime_get_boottime_seconds(); in renew_client_locked()
207 clp->cl_state = NFSD4_ACTIVE; in renew_client_locked()
212 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in put_client_renew_locked()
214 lockdep_assert_held(&nn->client_lock); in put_client_renew_locked()
216 if (!atomic_dec_and_test(&clp->cl_rpc_users)) in put_client_renew_locked()
226 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in put_client_renew()
228 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock)) in put_client_renew()
234 spin_unlock(&nn->client_lock); in put_client_renew()
243 status = get_client_locked(ses->se_client); in nfsd4_get_session_locked()
246 atomic_inc(&ses->se_ref); in nfsd4_get_session_locked()
252 struct nfs4_client *clp = ses->se_client; in nfsd4_put_session_locked()
253 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in nfsd4_put_session_locked()
255 lockdep_assert_held(&nn->client_lock); in nfsd4_put_session_locked()
257 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) in nfsd4_put_session_locked()
264 struct nfs4_client *clp = ses->se_client; in nfsd4_put_session()
265 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in nfsd4_put_session()
267 spin_lock(&nn->client_lock); in nfsd4_put_session()
269 spin_unlock(&nn->client_lock); in nfsd4_put_session()
278 spin_lock(&nn->blocked_locks_lock); in find_blocked_lock()
279 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { in find_blocked_lock()
280 if (fh_match(fh, &cur->nbl_fh)) { in find_blocked_lock()
281 list_del_init(&cur->nbl_list); in find_blocked_lock()
282 WARN_ON(list_empty(&cur->nbl_lru)); in find_blocked_lock()
283 list_del_init(&cur->nbl_lru); in find_blocked_lock()
288 spin_unlock(&nn->blocked_locks_lock); in find_blocked_lock()
290 locks_delete_block(&found->nbl_lock); in find_blocked_lock()
304 INIT_LIST_HEAD(&nbl->nbl_list); in find_or_allocate_block()
305 INIT_LIST_HEAD(&nbl->nbl_lru); in find_or_allocate_block()
306 fh_copy_shallow(&nbl->nbl_fh, fh); in find_or_allocate_block()
307 locks_init_lock(&nbl->nbl_lock); in find_or_allocate_block()
308 kref_init(&nbl->nbl_kref); in find_or_allocate_block()
309 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, in find_or_allocate_block()
323 locks_release_private(&nbl->nbl_lock); in free_nbl()
330 locks_delete_block(&nbl->nbl_lock); in free_blocked_lock()
331 kref_put(&nbl->nbl_kref, free_nbl); in free_blocked_lock()
337 struct nfs4_client *clp = lo->lo_owner.so_client; in remove_blocked_locks()
338 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in remove_blocked_locks()
342 /* Dequeue all blocked locks */ in remove_blocked_locks()
343 spin_lock(&nn->blocked_locks_lock); in remove_blocked_locks()
344 while (!list_empty(&lo->lo_blocked)) { in remove_blocked_locks()
345 nbl = list_first_entry(&lo->lo_blocked, in remove_blocked_locks()
348 list_del_init(&nbl->nbl_list); in remove_blocked_locks()
349 WARN_ON(list_empty(&nbl->nbl_lru)); in remove_blocked_locks()
350 list_move(&nbl->nbl_lru, &reaplist); in remove_blocked_locks()
352 spin_unlock(&nn->blocked_locks_lock); in remove_blocked_locks()
358 list_del_init(&nbl->nbl_lru); in remove_blocked_locks()
368 locks_delete_block(&nbl->nbl_lock); in nfsd4_cb_notify_lock_prepare()
381 switch (task->tk_status) { in nfsd4_cb_notify_lock_done()
382 case -NFS4ERR_DELAY: in nfsd4_cb_notify_lock_done()
412 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
450 stp->st_access_bmap |= mask; in set_access()
460 stp->st_access_bmap &= ~mask; in clear_access()
469 return (bool)(stp->st_access_bmap & mask); in test_access()
479 stp->st_deny_bmap |= mask; in set_deny()
489 stp->st_deny_bmap &= ~mask; in clear_deny()
498 return (bool)(stp->st_deny_bmap & mask); in test_deny()
533 atomic_inc(&sop->so_count); in nfs4_get_stateowner()
540 return (sop->so_owner.len == owner->len) && in same_owner_str()
541 0 == memcmp(sop->so_owner.data, owner->data, owner->len); in same_owner_str()
550 lockdep_assert_held(&clp->cl_lock); in find_openstateowner_str()
552 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], in find_openstateowner_str()
554 if (!so->so_is_open_owner) in find_openstateowner_str()
556 if (same_owner_str(so, &open->op_owner)) in find_openstateowner_str()
568 while (nbytes--) { in opaque_hashval()
578 if (refcount_dec_and_test(&fi->fi_ref)) { in put_nfs4_file()
580 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); in put_nfs4_file()
581 WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); in put_nfs4_file()
591 lockdep_assert_held(&f->fi_lock); in find_writeable_file_locked()
593 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); in find_writeable_file_locked()
595 ret = nfsd_file_get(f->fi_fds[O_RDWR]); in find_writeable_file_locked()
604 spin_lock(&f->fi_lock); in find_writeable_file()
606 spin_unlock(&f->fi_lock); in find_writeable_file()
616 lockdep_assert_held(&f->fi_lock); in find_readable_file_locked()
618 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); in find_readable_file_locked()
620 ret = nfsd_file_get(f->fi_fds[O_RDWR]); in find_readable_file_locked()
629 spin_lock(&f->fi_lock); in find_readable_file()
631 spin_unlock(&f->fi_lock); in find_readable_file()
643 spin_lock(&f->fi_lock); in find_any_file()
644 ret = nfsd_file_get(f->fi_fds[O_RDWR]); in find_any_file()
646 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); in find_any_file()
648 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); in find_any_file()
650 spin_unlock(&f->fi_lock); in find_any_file()
656 lockdep_assert_held(&f->fi_lock); in find_any_file_locked()
658 if (f->fi_fds[O_RDWR]) in find_any_file_locked()
659 return f->fi_fds[O_RDWR]; in find_any_file_locked()
660 if (f->fi_fds[O_WRONLY]) in find_any_file_locked()
661 return f->fi_fds[O_WRONLY]; in find_any_file_locked()
662 if (f->fi_fds[O_RDONLY]) in find_any_file_locked()
663 return f->fi_fds[O_RDONLY]; in find_any_file_locked()
677 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
683 ret = opaque_hashval(ownername->data, ownername->len); in ownerstr_hashval()
715 * false - access/deny mode conflict with normal client.
716 * true - no conflict or conflict with courtesy client(s) is resolved.
728 lockdep_assert_held(&fp->fi_lock); in nfs4_resolve_deny_conflicts_locked()
729 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { in nfs4_resolve_deny_conflicts_locked()
731 if (st->st_openstp) in nfs4_resolve_deny_conflicts_locked()
736 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap; in nfs4_resolve_deny_conflicts_locked()
739 clp = st->st_stid.sc_client; in nfs4_resolve_deny_conflicts_locked()
746 clp = stp->st_stid.sc_client; in nfs4_resolve_deny_conflicts_locked()
747 nn = net_generic(clp->net, nfsd_net_id); in nfs4_resolve_deny_conflicts_locked()
748 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in nfs4_resolve_deny_conflicts_locked()
756 lockdep_assert_held(&fp->fi_lock); in __nfs4_file_get_access()
759 atomic_inc(&fp->fi_access[O_WRONLY]); in __nfs4_file_get_access()
761 atomic_inc(&fp->fi_access[O_RDONLY]); in __nfs4_file_get_access()
767 lockdep_assert_held(&fp->fi_lock); in nfs4_file_get_access()
774 if ((access & fp->fi_share_deny) != 0) in nfs4_file_get_access()
790 atomic_read(&fp->fi_access[O_RDONLY])) in nfs4_file_check_deny()
794 atomic_read(&fp->fi_access[O_WRONLY])) in nfs4_file_check_deny()
802 might_lock(&fp->fi_lock); in __nfs4_file_put_access()
804 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { in __nfs4_file_put_access()
808 swap(f1, fp->fi_fds[oflag]); in __nfs4_file_put_access()
809 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) in __nfs4_file_put_access()
810 swap(f2, fp->fi_fds[O_RDWR]); in __nfs4_file_put_access()
811 spin_unlock(&fp->fi_lock); in __nfs4_file_put_access()
833 * Note that we only allocate it for pNFS-enabled exports, otherwise
834 * all pointers to struct nfs4_clnt_odstate are always NULL.
843 co->co_client = clp; in alloc_clnt_odstate()
844 refcount_set(&co->co_odcount, 1); in alloc_clnt_odstate()
852 struct nfs4_file *fp = co->co_file; in hash_clnt_odstate_locked()
854 lockdep_assert_held(&fp->fi_lock); in hash_clnt_odstate_locked()
855 list_add(&co->co_perfile, &fp->fi_clnt_odstate); in hash_clnt_odstate_locked()
862 refcount_inc(&co->co_odcount); in get_clnt_odstate()
873 fp = co->co_file; in put_clnt_odstate()
874 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { in put_clnt_odstate()
875 list_del(&co->co_perfile); in put_clnt_odstate()
876 spin_unlock(&fp->fi_lock); in put_clnt_odstate()
878 nfsd4_return_all_file_layouts(co->co_client, fp); in put_clnt_odstate()
892 cl = new->co_client; in find_or_hash_clnt_odstate()
894 spin_lock(&fp->fi_lock); in find_or_hash_clnt_odstate()
895 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { in find_or_hash_clnt_odstate()
896 if (co->co_client == cl) { in find_or_hash_clnt_odstate()
902 co->co_file = fp; in find_or_hash_clnt_odstate()
905 spin_unlock(&fp->fi_lock); in find_or_hash_clnt_odstate()
920 spin_lock(&cl->cl_lock); in nfs4_alloc_stid()
922 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT); in nfs4_alloc_stid()
923 spin_unlock(&cl->cl_lock); in nfs4_alloc_stid()
928 stid->sc_free = sc_free; in nfs4_alloc_stid()
929 stid->sc_client = cl; in nfs4_alloc_stid()
930 stid->sc_stateid.si_opaque.so_id = new_id; in nfs4_alloc_stid()
931 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; in nfs4_alloc_stid()
933 refcount_set(&stid->sc_count, 1); in nfs4_alloc_stid()
934 spin_lock_init(&stid->sc_lock); in nfs4_alloc_stid()
935 INIT_LIST_HEAD(&stid->sc_cp_list); in nfs4_alloc_stid()
951 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time; in nfs4_init_cp_state()
952 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; in nfs4_init_cp_state()
955 spin_lock(&nn->s2s_cp_lock); in nfs4_init_cp_state()
956 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); in nfs4_init_cp_state()
957 stid->cs_stid.si_opaque.so_id = new_id; in nfs4_init_cp_state()
958 stid->cs_stid.si_generation = 1; in nfs4_init_cp_state()
959 spin_unlock(&nn->s2s_cp_lock); in nfs4_init_cp_state()
963 stid->cs_type = cs_type; in nfs4_init_cp_state()
969 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID); in nfs4_init_copy_state()
980 cps->cpntf_time = ktime_get_boottime_seconds(); in nfs4_alloc_init_cpntf_state()
981 refcount_set(&cps->cp_stateid.cs_count, 1); in nfs4_alloc_init_cpntf_state()
982 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID)) in nfs4_alloc_init_cpntf_state()
984 spin_lock(&nn->s2s_cp_lock); in nfs4_alloc_init_cpntf_state()
985 list_add(&cps->cp_list, &p_stid->sc_cp_list); in nfs4_alloc_init_cpntf_state()
986 spin_unlock(&nn->s2s_cp_lock); in nfs4_alloc_init_cpntf_state()
997 if (copy->cp_stateid.cs_type != NFS4_COPY_STID) in nfs4_free_copy_state()
999 nn = net_generic(copy->cp_clp->net, nfsd_net_id); in nfs4_free_copy_state()
1000 spin_lock(&nn->s2s_cp_lock); in nfs4_free_copy_state()
1001 idr_remove(&nn->s2s_cp_stateids, in nfs4_free_copy_state()
1002 copy->cp_stateid.cs_stid.si_opaque.so_id); in nfs4_free_copy_state()
1003 spin_unlock(&nn->s2s_cp_lock); in nfs4_free_copy_state()
1012 spin_lock(&nn->s2s_cp_lock); in nfs4_free_cpntf_statelist()
1013 while (!list_empty(&stid->sc_cp_list)) { in nfs4_free_cpntf_statelist()
1014 cps = list_first_entry(&stid->sc_cp_list, in nfs4_free_cpntf_statelist()
1018 spin_unlock(&nn->s2s_cp_lock); in nfs4_free_cpntf_statelist()
1035 * Considering nfsd_break_one_deleg is called with the flc->flc_lock held,
1042 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list)); in nfs4_free_deleg()
1043 WARN_ON_ONCE(!list_empty(&dp->dl_perfile)); in nfs4_free_deleg()
1044 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt)); in nfs4_free_deleg()
1045 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru)); in nfs4_free_deleg()
1063 * low 3 bytes as hash-table indices.
1082 if (bd->entries == 0) in delegation_blocked()
1084 if (ktime_get_seconds() - bd->swap_time > 30) { in delegation_blocked()
1086 if (ktime_get_seconds() - bd->swap_time > 30) { in delegation_blocked()
1087 bd->entries -= bd->old_entries; in delegation_blocked()
1088 bd->old_entries = bd->entries; in delegation_blocked()
1089 bd->new = 1-bd->new; in delegation_blocked()
1090 memset(bd->set[bd->new], 0, in delegation_blocked()
1091 sizeof(bd->set[0])); in delegation_blocked()
1092 bd->swap_time = ktime_get_seconds(); in delegation_blocked()
1096 hash = jhash(&fh->fh_raw, fh->fh_size, 0); in delegation_blocked()
1097 if (test_bit(hash&255, bd->set[0]) && in delegation_blocked()
1098 test_bit((hash>>8)&255, bd->set[0]) && in delegation_blocked()
1099 test_bit((hash>>16)&255, bd->set[0])) in delegation_blocked()
1102 if (test_bit(hash&255, bd->set[1]) && in delegation_blocked()
1103 test_bit((hash>>8)&255, bd->set[1]) && in delegation_blocked()
1104 test_bit((hash>>16)&255, bd->set[1])) in delegation_blocked()
1115 hash = jhash(&fh->fh_raw, fh->fh_size, 0); in block_delegations()
1118 __set_bit(hash&255, bd->set[bd->new]); in block_delegations()
1119 __set_bit((hash>>8)&255, bd->set[bd->new]); in block_delegations()
1120 __set_bit((hash>>16)&255, bd->set[bd->new]); in block_delegations()
1121 if (bd->entries == 0) in block_delegations()
1122 bd->swap_time = ktime_get_seconds(); in block_delegations()
1123 bd->entries += 1; in block_delegations()
1139 if (delegation_blocked(&fp->fi_fhandle)) in alloc_init_deleg()
1151 dp->dl_stid.sc_stateid.si_generation = 1; in alloc_init_deleg()
1152 INIT_LIST_HEAD(&dp->dl_perfile); in alloc_init_deleg()
1153 INIT_LIST_HEAD(&dp->dl_perclnt); in alloc_init_deleg()
1154 INIT_LIST_HEAD(&dp->dl_recall_lru); in alloc_init_deleg()
1155 dp->dl_clnt_odstate = odstate; in alloc_init_deleg()
1157 dp->dl_type = dl_type; in alloc_init_deleg()
1158 dp->dl_retries = 1; in alloc_init_deleg()
1159 dp->dl_recalled = false; in alloc_init_deleg()
1160 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, in alloc_init_deleg()
1162 nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client, in alloc_init_deleg()
1164 dp->dl_cb_fattr.ncf_file_modified = false; in alloc_init_deleg()
1166 dp->dl_stid.sc_file = fp; in alloc_init_deleg()
1176 struct nfs4_file *fp = s->sc_file; in nfs4_put_stid()
1177 struct nfs4_client *clp = s->sc_client; in nfs4_put_stid()
1179 might_lock(&clp->cl_lock); in nfs4_put_stid()
1181 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) { in nfs4_put_stid()
1185 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); in nfs4_put_stid()
1186 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) in nfs4_put_stid()
1187 atomic_dec(&s->sc_client->cl_admin_revoked); in nfs4_put_stid()
1188 nfs4_free_cpntf_statelist(clp->net, s); in nfs4_put_stid()
1189 spin_unlock(&clp->cl_lock); in nfs4_put_stid()
1190 s->sc_free(s); in nfs4_put_stid()
1198 stateid_t *src = &stid->sc_stateid; in nfs4_inc_and_copy_stateid()
1200 spin_lock(&stid->sc_lock); in nfs4_inc_and_copy_stateid()
1201 if (unlikely(++src->si_generation == 0)) in nfs4_inc_and_copy_stateid()
1202 src->si_generation = 1; in nfs4_inc_and_copy_stateid()
1204 spin_unlock(&stid->sc_lock); in nfs4_inc_and_copy_stateid()
1212 spin_lock(&fp->fi_lock); in put_deleg_file()
1213 if (--fp->fi_delegees == 0) { in put_deleg_file()
1214 swap(nf, fp->fi_deleg_file); in put_deleg_file()
1215 swap(rnf, fp->fi_rdeleg_file); in put_deleg_file()
1217 spin_unlock(&fp->fi_lock); in put_deleg_file()
1232 if ((READ_ONCE(f->f_mode) & FMODE_NOCMTIME) == 0) in nfsd4_finalize_deleg_timestamps()
1235 spin_lock(&f->f_lock); in nfsd4_finalize_deleg_timestamps()
1236 f->f_mode &= ~FMODE_NOCMTIME; in nfsd4_finalize_deleg_timestamps()
1237 spin_unlock(&f->f_lock); in nfsd4_finalize_deleg_timestamps()
1240 if (!dp->dl_written) in nfsd4_finalize_deleg_timestamps()
1244 if (dp->dl_setattr) in nfsd4_finalize_deleg_timestamps()
1249 ret = notify_change(&nop_mnt_idmap, f->f_path.dentry, &ia, NULL); in nfsd4_finalize_deleg_timestamps()
1255 MAJOR(inode->i_sb->s_dev), in nfsd4_finalize_deleg_timestamps()
1256 MINOR(inode->i_sb->s_dev), in nfsd4_finalize_deleg_timestamps()
1257 inode->i_ino, ret); in nfsd4_finalize_deleg_timestamps()
1263 struct nfs4_file *fp = dp->dl_stid.sc_file; in nfs4_unlock_deleg_lease()
1264 struct nfsd_file *nf = fp->fi_deleg_file; in nfs4_unlock_deleg_lease()
1266 WARN_ON_ONCE(!fp->fi_delegees); in nfs4_unlock_deleg_lease()
1268 nfsd4_finalize_deleg_timestamps(dp, nf->nf_file); in nfs4_unlock_deleg_lease()
1269 kernel_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); in nfs4_unlock_deleg_lease()
1275 put_clnt_odstate(dp->dl_clnt_odstate); in destroy_unhashed_deleg()
1277 nfs4_put_stid(&dp->dl_stid); in destroy_unhashed_deleg()
1281 * nfs4_delegation_exists - Discover if this delegation already exists
1296 lockdep_assert_held(&fp->fi_lock); in nfs4_delegation_exists()
1298 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) { in nfs4_delegation_exists()
1299 searchclp = searchdp->dl_stid.sc_client; in nfs4_delegation_exists()
1308 * hash_delegation_locked - Add a delegation to the appropriate lists
1315 * On error: -EAGAIN if one was previously granted to this
1323 struct nfs4_client *clp = dp->dl_stid.sc_client; in hash_delegation_locked()
1326 lockdep_assert_held(&fp->fi_lock); in hash_delegation_locked()
1327 lockdep_assert_held(&clp->cl_lock); in hash_delegation_locked()
1330 return -EAGAIN; in hash_delegation_locked()
1331 refcount_inc(&dp->dl_stid.sc_count); in hash_delegation_locked()
1332 dp->dl_stid.sc_type = SC_TYPE_DELEG; in hash_delegation_locked()
1333 list_add(&dp->dl_perfile, &fp->fi_delegations); in hash_delegation_locked()
1334 list_add(&dp->dl_perclnt, &clp->cl_delegations); in hash_delegation_locked()
1340 return !(list_empty(&dp->dl_perfile)); in delegation_hashed()
1346 struct nfs4_file *fp = dp->dl_stid.sc_file; in unhash_delegation_locked()
1354 dp->dl_stid.sc_client->cl_minorversion == 0) in unhash_delegation_locked()
1356 dp->dl_stid.sc_status |= statusmask; in unhash_delegation_locked()
1358 atomic_inc(&dp->dl_stid.sc_client->cl_admin_revoked); in unhash_delegation_locked()
1361 ++dp->dl_time; in unhash_delegation_locked()
1362 spin_lock(&fp->fi_lock); in unhash_delegation_locked()
1363 list_del_init(&dp->dl_perclnt); in unhash_delegation_locked()
1364 list_del_init(&dp->dl_recall_lru); in unhash_delegation_locked()
1365 list_del_init(&dp->dl_perfile); in unhash_delegation_locked()
1366 spin_unlock(&fp->fi_lock); in unhash_delegation_locked()
1382 * revoke_delegation - perform nfs4 delegation structure cleanup
1401 * in the revocation process is protected by the clp->cl_lock.
1405 struct nfs4_client *clp = dp->dl_stid.sc_client; in revoke_delegation()
1407 WARN_ON(!list_empty(&dp->dl_recall_lru)); in revoke_delegation()
1408 WARN_ON_ONCE(dp->dl_stid.sc_client->cl_minorversion > 0 && in revoke_delegation()
1409 !(dp->dl_stid.sc_status & in revoke_delegation()
1412 trace_nfsd_stid_revoke(&dp->dl_stid); in revoke_delegation()
1414 spin_lock(&clp->cl_lock); in revoke_delegation()
1415 if (dp->dl_stid.sc_status & SC_STATUS_FREED) { in revoke_delegation()
1416 list_del_init(&dp->dl_recall_lru); in revoke_delegation()
1419 list_add(&dp->dl_recall_lru, &clp->cl_revoked); in revoke_delegation()
1420 dp->dl_stid.sc_status |= SC_STATUS_FREEABLE; in revoke_delegation()
1422 spin_unlock(&clp->cl_lock); in revoke_delegation()
1450 spin_lock(&fp->fi_lock); in recalculate_deny_mode()
1451 old_deny = fp->fi_share_deny; in recalculate_deny_mode()
1452 fp->fi_share_deny = 0; in recalculate_deny_mode()
1453 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { in recalculate_deny_mode()
1454 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); in recalculate_deny_mode()
1455 if (fp->fi_share_deny == old_deny) in recalculate_deny_mode()
1458 spin_unlock(&fp->fi_lock); in recalculate_deny_mode()
1474 /* Recalculate per-file deny mode if there was a change */ in reset_union_bmap_deny()
1476 recalculate_deny_mode(stp->st_stid.sc_file); in reset_union_bmap_deny()
1479 /* release all access and file references for a given stateid */
1484 struct nfs4_file *fp = stp->st_stid.sc_file; in release_all_access()
1486 if (fp && stp->st_deny_bmap != 0) in release_all_access()
1491 nfs4_file_put_access(stp->st_stid.sc_file, i); in release_all_access()
1498 kfree(sop->so_owner.data); in nfs4_free_stateowner()
1499 sop->so_ops->so_free(sop); in nfs4_free_stateowner()
1504 struct nfs4_client *clp = sop->so_client; in nfs4_put_stateowner()
1506 might_lock(&clp->cl_lock); in nfs4_put_stateowner()
1508 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) in nfs4_put_stateowner()
1510 sop->so_ops->so_unhash(sop); in nfs4_put_stateowner()
1511 spin_unlock(&clp->cl_lock); in nfs4_put_stateowner()
1518 return list_empty(&stp->st_perfile); in nfs4_ol_stateid_unhashed()
1523 struct nfs4_file *fp = stp->st_stid.sc_file; in unhash_ol_stateid()
1525 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); in unhash_ol_stateid()
1527 if (list_empty(&stp->st_perfile)) in unhash_ol_stateid()
1530 spin_lock(&fp->fi_lock); in unhash_ol_stateid()
1531 list_del_init(&stp->st_perfile); in unhash_ol_stateid()
1532 spin_unlock(&fp->fi_lock); in unhash_ol_stateid()
1533 list_del(&stp->st_perstateowner); in unhash_ol_stateid()
1541 put_clnt_odstate(stp->st_clnt_odstate); in nfs4_free_ol_stateid()
1543 if (stp->st_stateowner) in nfs4_free_ol_stateid()
1544 nfs4_put_stateowner(stp->st_stateowner); in nfs4_free_ol_stateid()
1545 if (!list_empty(&stid->sc_cp_list)) in nfs4_free_ol_stateid()
1546 nfs4_free_cpntf_statelist(stid->sc_client->net, stid); in nfs4_free_ol_stateid()
1553 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); in nfs4_free_lock_stateid()
1556 nf = find_any_file(stp->st_stid.sc_file); in nfs4_free_lock_stateid()
1558 get_file(nf->nf_file); in nfs4_free_lock_stateid()
1559 filp_close(nf->nf_file, (fl_owner_t)lo); in nfs4_free_lock_stateid()
1573 struct nfs4_stid *s = &stp->st_stid; in put_ol_stateid_locked()
1574 struct nfs4_client *clp = s->sc_client; in put_ol_stateid_locked()
1576 lockdep_assert_held(&clp->cl_lock); in put_ol_stateid_locked()
1578 WARN_ON_ONCE(!list_empty(&stp->st_locks)); in put_ol_stateid_locked()
1580 if (!refcount_dec_and_test(&s->sc_count)) { in put_ol_stateid_locked()
1585 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); in put_ol_stateid_locked()
1586 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) in put_ol_stateid_locked()
1587 atomic_dec(&s->sc_client->cl_admin_revoked); in put_ol_stateid_locked()
1588 list_add(&stp->st_locks, reaplist); in put_ol_stateid_locked()
1593 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); in unhash_lock_stateid()
1597 list_del_init(&stp->st_locks); in unhash_lock_stateid()
1598 stp->st_stid.sc_status |= SC_STATUS_CLOSED; in unhash_lock_stateid()
1604 struct nfs4_client *clp = stp->st_stid.sc_client; in release_lock_stateid()
1607 spin_lock(&clp->cl_lock); in release_lock_stateid()
1609 spin_unlock(&clp->cl_lock); in release_lock_stateid()
1611 nfs4_put_stid(&stp->st_stid); in release_lock_stateid()
1616 struct nfs4_client *clp = lo->lo_owner.so_client; in unhash_lockowner_locked()
1618 lockdep_assert_held(&clp->cl_lock); in unhash_lockowner_locked()
1620 list_del_init(&lo->lo_owner.so_strhash); in unhash_lockowner_locked()
1638 list_del(&stp->st_locks); in free_ol_stateid_reaplist()
1639 fp = stp->st_stid.sc_file; in free_ol_stateid_reaplist()
1640 stp->st_stid.sc_free(&stp->st_stid); in free_ol_stateid_reaplist()
1651 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); in release_open_stateid_locks()
1653 while (!list_empty(&open_stp->st_locks)) { in release_open_stateid_locks()
1654 stp = list_entry(open_stp->st_locks.next, in release_open_stateid_locks()
1664 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); in unhash_open_stateid()
1676 spin_lock(&stp->st_stid.sc_client->cl_lock); in release_open_stateid()
1677 stp->st_stid.sc_status |= SC_STATUS_CLOSED; in release_open_stateid()
1680 spin_unlock(&stp->st_stid.sc_client->cl_lock); in release_open_stateid()
1686 lockdep_assert_held(&oo->oo_owner.so_client->cl_lock); in nfs4_openowner_unhashed()
1688 return list_empty(&oo->oo_owner.so_strhash) && in nfs4_openowner_unhashed()
1689 list_empty(&oo->oo_perclient); in nfs4_openowner_unhashed()
1694 struct nfs4_client *clp = oo->oo_owner.so_client; in unhash_openowner_locked()
1696 lockdep_assert_held(&clp->cl_lock); in unhash_openowner_locked()
1698 list_del_init(&oo->oo_owner.so_strhash); in unhash_openowner_locked()
1699 list_del_init(&oo->oo_perclient); in unhash_openowner_locked()
1704 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, in release_last_closed_stateid()
1708 spin_lock(&nn->client_lock); in release_last_closed_stateid()
1709 s = oo->oo_last_closed_stid; in release_last_closed_stateid()
1711 list_del_init(&oo->oo_close_lru); in release_last_closed_stateid()
1712 oo->oo_last_closed_stid = NULL; in release_last_closed_stateid()
1714 spin_unlock(&nn->client_lock); in release_last_closed_stateid()
1716 nfs4_put_stid(&s->st_stid); in release_last_closed_stateid()
1722 struct nfs4_client *clp = oo->oo_owner.so_client; in release_openowner()
1725 spin_lock(&clp->cl_lock); in release_openowner()
1727 while (!list_empty(&oo->oo_owner.so_stateids)) { in release_openowner()
1728 stp = list_first_entry(&oo->oo_owner.so_stateids, in release_openowner()
1733 spin_unlock(&clp->cl_lock); in release_openowner()
1736 nfs4_put_stateowner(&oo->oo_owner); in release_openowner()
1746 spin_lock(&clp->cl_lock); in find_one_sb_stid()
1747 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id) in find_one_sb_stid()
1748 if ((stid->sc_type & sc_types) && in find_one_sb_stid()
1749 stid->sc_status == 0 && in find_one_sb_stid()
1750 stid->sc_file->fi_inode->i_sb == sb) { in find_one_sb_stid()
1751 refcount_inc(&stid->sc_count); in find_one_sb_stid()
1754 spin_unlock(&clp->cl_lock); in find_one_sb_stid()
1759 * nfsd4_revoke_states - revoke all nfsv4 states associated with given filesystem
1763 * All nfs4 states (open, lock, delegation, layout) held by the server instance
1765 * in any files being closed and so all references from nfsd to the filesystem
1770 * states have been "admin-revoked".
1780 spin_lock(&nn->client_lock); in nfsd4_revoke_states()
1782 struct list_head *head = &nn->conf_id_hashtbl[idhashval]; in nfsd4_revoke_states()
1793 spin_unlock(&nn->client_lock); in nfsd4_revoke_states()
1794 switch (stid->sc_type) { in nfsd4_revoke_states()
1797 mutex_lock_nested(&stp->st_mutex, in nfsd4_revoke_states()
1800 spin_lock(&clp->cl_lock); in nfsd4_revoke_states()
1801 if (stid->sc_status == 0) { in nfsd4_revoke_states()
1802 stid->sc_status |= in nfsd4_revoke_states()
1804 atomic_inc(&clp->cl_admin_revoked); in nfsd4_revoke_states()
1805 spin_unlock(&clp->cl_lock); in nfsd4_revoke_states()
1808 spin_unlock(&clp->cl_lock); in nfsd4_revoke_states()
1809 mutex_unlock(&stp->st_mutex); in nfsd4_revoke_states()
1813 mutex_lock_nested(&stp->st_mutex, in nfsd4_revoke_states()
1815 spin_lock(&clp->cl_lock); in nfsd4_revoke_states()
1816 if (stid->sc_status == 0) { in nfsd4_revoke_states()
1818 lockowner(stp->st_stateowner); in nfsd4_revoke_states()
1821 stid->sc_status |= in nfsd4_revoke_states()
1823 atomic_inc(&clp->cl_admin_revoked); in nfsd4_revoke_states()
1824 spin_unlock(&clp->cl_lock); in nfsd4_revoke_states()
1825 nf = find_any_file(stp->st_stid.sc_file); in nfsd4_revoke_states()
1827 get_file(nf->nf_file); in nfsd4_revoke_states()
1828 filp_close(nf->nf_file, in nfsd4_revoke_states()
1834 spin_unlock(&clp->cl_lock); in nfsd4_revoke_states()
1835 mutex_unlock(&stp->st_mutex); in nfsd4_revoke_states()
1838 refcount_inc(&stid->sc_count); in nfsd4_revoke_states()
1854 spin_lock(&nn->client_lock); in nfsd4_revoke_states()
1855 if (clp->cl_minorversion == 0) in nfsd4_revoke_states()
1861 nn->nfs40_last_revoke = in nfsd4_revoke_states()
1867 spin_unlock(&nn->client_lock); in nfsd4_revoke_states()
1875 return sid->sequence % SESSION_HASH_SIZE; in hash_sessionid()
1882 u32 *ptr = (u32 *)(&sessionid->data[0]); in dump_sessionid()
1893 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1898 struct nfs4_stateowner *so = cstate->replay_owner; in nfsd4_bump_seqid()
1909 if (so->so_is_open_owner) in nfsd4_bump_seqid()
1911 so->so_seqid++; in nfsd4_bump_seqid()
1918 struct nfs4_client *clp = ses->se_client; in gen_sessionid()
1921 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; in gen_sessionid()
1922 sid->clientid = clp->cl_clientid; in gen_sessionid()
1923 sid->sequence = current_sessionid++; in gen_sessionid()
1924 sid->reserved = 0; in gen_sessionid()
1929 * the rpc header, but all we need to cache is the data starting after
1930 * the end of the initial SEQUENCE operation--the rest we regenerate
1936 * verifier), 12 for the compound header (with zero-length tag), and 44
1944 /* The sum of "target_slots-1" on every session. The shrinker can push this
1946 * be freed. The "-1" is because we can never free slot 0 while the
1956 if (from >= ses->se_fchannel.maxreqs) in free_session_slots()
1959 for (i = from; i < ses->se_fchannel.maxreqs; i++) { in free_session_slots()
1960 struct nfsd4_slot *slot = xa_load(&ses->se_slots, i); in free_session_slots()
1967 xa_store(&ses->se_slots, i, xa_mk_value(slot->sl_seqid), 0); in free_session_slots()
1968 free_svc_cred(&slot->sl_cred); in free_session_slots()
1971 ses->se_fchannel.maxreqs = from; in free_session_slots()
1972 if (ses->se_target_maxslots > from) { in free_session_slots()
1974 atomic_sub(ses->se_target_maxslots - new_target, &nfsd_total_target_slots); in free_session_slots()
1975 ses->se_target_maxslots = new_target; in free_session_slots()
1980 * reduce_session_slots - reduce the target max-slots of a session if possible
1984 * This interface can be used by a shrinker to reduce the target max-slots
1988 * best-effort, skiping a session is client_lock is already held has no
1997 struct nfsd_net *nn = net_generic(ses->se_client->net, in reduce_session_slots()
2001 if (ses->se_target_maxslots <= 1) in reduce_session_slots()
2003 if (!spin_trylock(&nn->client_lock)) in reduce_session_slots()
2005 ret = min(dec, ses->se_target_maxslots-1); in reduce_session_slots()
2006 ses->se_target_maxslots -= ret; in reduce_session_slots()
2008 ses->se_slot_gen += 1; in reduce_session_slots()
2009 if (ses->se_slot_gen == 0) { in reduce_session_slots()
2011 ses->se_slot_gen = 1; in reduce_session_slots()
2012 for (i = 0; i < ses->se_fchannel.maxreqs; i++) { in reduce_session_slots()
2013 struct nfsd4_slot *slot = xa_load(&ses->se_slots, i); in reduce_session_slots()
2014 slot->sl_generation = 0; in reduce_session_slots()
2017 spin_unlock(&nn->client_lock); in reduce_session_slots()
2031 size = fattrs->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ ? in nfsd4_alloc_slot()
2032 0 : fattrs->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; in nfsd4_alloc_slot()
2037 slot->sl_index = index; in nfsd4_alloc_slot()
2044 int numslots = fattrs->maxreqs; in alloc_session()
2052 xa_init(&new->se_slots); in alloc_session()
2055 if (!slot || xa_is_err(xa_store(&new->se_slots, 0, slot, GFP_KERNEL))) in alloc_session()
2063 if (xa_is_err(xa_store(&new->se_slots, i, slot, gfp))) { in alloc_session()
2068 fattrs->maxreqs = i; in alloc_session()
2069 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); in alloc_session()
2070 new->se_target_maxslots = i; in alloc_session()
2071 atomic_add(i - 1, &nfsd_total_target_slots); in alloc_session()
2072 new->se_cb_slot_avail = ~0U; in alloc_session()
2073 new->se_cb_highest_slot = min(battrs->maxreqs - 1, in alloc_session()
2074 NFSD_BC_SLOT_TABLE_SIZE - 1); in alloc_session()
2075 spin_lock_init(&new->se_lock); in alloc_session()
2079 xa_destroy(&new->se_slots); in alloc_session()
2086 svc_xprt_put(c->cn_xprt); in free_conn()
2093 struct nfs4_client *clp = c->cn_session->se_client; in nfsd4_conn_lost()
2097 spin_lock(&clp->cl_lock); in nfsd4_conn_lost()
2098 if (!list_empty(&c->cn_persession)) { in nfsd4_conn_lost()
2099 list_del(&c->cn_persession); in nfsd4_conn_lost()
2103 spin_unlock(&clp->cl_lock); in nfsd4_conn_lost()
2113 svc_xprt_get(rqstp->rq_xprt); in alloc_conn()
2114 conn->cn_xprt = rqstp->rq_xprt; in alloc_conn()
2115 conn->cn_flags = flags; in alloc_conn()
2116 INIT_LIST_HEAD(&conn->cn_xpt_user.list); in alloc_conn()
2122 conn->cn_session = ses; in __nfsd4_hash_conn()
2123 list_add(&conn->cn_persession, &ses->se_conns); in __nfsd4_hash_conn()
2128 struct nfs4_client *clp = ses->se_client; in nfsd4_hash_conn()
2130 spin_lock(&clp->cl_lock); in nfsd4_hash_conn()
2132 spin_unlock(&clp->cl_lock); in nfsd4_hash_conn()
2137 conn->cn_xpt_user.callback = nfsd4_conn_lost; in nfsd4_register_conn()
2138 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); in nfsd4_register_conn()
2149 nfsd4_conn_lost(&conn->cn_xpt_user); in nfsd4_init_conn()
2151 nfsd4_probe_callback_sync(ses->se_client); in nfsd4_init_conn()
2158 if (cses->flags & SESSION4_BACK_CHAN) in alloc_conn_from_crses()
2166 struct nfs4_client *clp = s->se_client; in nfsd4_del_conns()
2169 spin_lock(&clp->cl_lock); in nfsd4_del_conns()
2170 while (!list_empty(&s->se_conns)) { in nfsd4_del_conns()
2171 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); in nfsd4_del_conns()
2172 list_del_init(&c->cn_persession); in nfsd4_del_conns()
2173 spin_unlock(&clp->cl_lock); in nfsd4_del_conns()
2175 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); in nfsd4_del_conns()
2178 spin_lock(&clp->cl_lock); in nfsd4_del_conns()
2180 spin_unlock(&clp->cl_lock); in nfsd4_del_conns()
2186 xa_destroy(&ses->se_slots); in __free_session()
2215 if (scanned >= sc->nr_to_scan) { in nfsd_slot_scan()
2217 list_move(&nfsd_session_list, &ses->se_all_sessions); in nfsd_slot_scan()
2222 sc->nr_scanned = scanned; in nfsd_slot_scan()
2231 new->se_client = clp; in init_session()
2234 INIT_LIST_HEAD(&new->se_conns); in init_session()
2236 atomic_set(&new->se_ref, 0); in init_session()
2237 new->se_dead = false; in init_session()
2238 new->se_cb_prog = cses->callback_prog; in init_session()
2239 new->se_cb_sec = cses->cb_sec; in init_session()
2242 new->se_cb_seq_nr[idx] = 1; in init_session()
2244 idx = hash_sessionid(&new->se_sessionid); in init_session()
2245 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); in init_session()
2246 spin_lock(&clp->cl_lock); in init_session()
2247 list_add(&new->se_perclnt, &clp->cl_sessions); in init_session()
2248 spin_unlock(&clp->cl_lock); in init_session()
2251 list_add_tail(&new->se_all_sessions, &nfsd_session_list); in init_session()
2263 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); in init_session()
2264 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); in init_session()
2276 lockdep_assert_held(&nn->client_lock); in __find_in_sessionid_hashtbl()
2281 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { in __find_in_sessionid_hashtbl()
2282 if (!memcmp(elem->se_sessionid.data, sessionid->data, in __find_in_sessionid_hashtbl()
2314 struct nfs4_client *clp = ses->se_client; in unhash_session()
2315 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in unhash_session()
2317 lockdep_assert_held(&nn->client_lock); in unhash_session()
2319 list_del(&ses->se_hash); in unhash_session()
2320 spin_lock(&ses->se_client->cl_lock); in unhash_session()
2321 list_del(&ses->se_perclnt); in unhash_session()
2322 spin_unlock(&ses->se_client->cl_lock); in unhash_session()
2324 list_del(&ses->se_all_sessions); in unhash_session()
2337 if (clid->cl_boot == (u32)nn->boot_time) in STALE_CLIENTID()
2349 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients && in alloc_client()
2350 atomic_read(&nn->nfsd_courtesy_clients) > 0) in alloc_client()
2351 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in alloc_client()
2356 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL); in alloc_client()
2357 if (clp->cl_name.data == NULL) in alloc_client()
2359 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE, in alloc_client()
2362 if (!clp->cl_ownerstr_hashtbl) in alloc_client()
2364 clp->cl_callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0); in alloc_client()
2365 if (!clp->cl_callback_wq) in alloc_client()
2369 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); in alloc_client()
2370 INIT_LIST_HEAD(&clp->cl_sessions); in alloc_client()
2371 idr_init(&clp->cl_stateids); in alloc_client()
2372 atomic_set(&clp->cl_rpc_users, 0); in alloc_client()
2373 clp->cl_cb_state = NFSD4_CB_UNKNOWN; in alloc_client()
2374 clp->cl_state = NFSD4_ACTIVE; in alloc_client()
2375 atomic_inc(&nn->nfs4_client_count); in alloc_client()
2376 atomic_set(&clp->cl_delegs_in_recall, 0); in alloc_client()
2377 INIT_LIST_HEAD(&clp->cl_idhash); in alloc_client()
2378 INIT_LIST_HEAD(&clp->cl_openowners); in alloc_client()
2379 INIT_LIST_HEAD(&clp->cl_delegations); in alloc_client()
2380 INIT_LIST_HEAD(&clp->cl_lru); in alloc_client()
2381 INIT_LIST_HEAD(&clp->cl_revoked); in alloc_client()
2383 INIT_LIST_HEAD(&clp->cl_lo_states); in alloc_client()
2385 INIT_LIST_HEAD(&clp->async_copies); in alloc_client()
2386 spin_lock_init(&clp->async_lock); in alloc_client()
2387 spin_lock_init(&clp->cl_lock); in alloc_client()
2388 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); in alloc_client()
2391 kfree(clp->cl_ownerstr_hashtbl); in alloc_client()
2393 kfree(clp->cl_name.data); in alloc_client()
2404 free_svc_cred(&clp->cl_cred); in __free_client()
2405 destroy_workqueue(clp->cl_callback_wq); in __free_client()
2406 kfree(clp->cl_ownerstr_hashtbl); in __free_client()
2407 kfree(clp->cl_name.data); in __free_client()
2408 kfree(clp->cl_nii_domain.data); in __free_client()
2409 kfree(clp->cl_nii_name.data); in __free_client()
2410 idr_destroy(&clp->cl_stateids); in __free_client()
2411 kfree(clp->cl_ra); in __free_client()
2417 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client); in drop_client()
2423 while (!list_empty(&clp->cl_sessions)) { in free_client()
2425 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, in free_client()
2427 list_del(&ses->se_perclnt); in free_client()
2428 WARN_ON_ONCE(atomic_read(&ses->se_ref)); in free_client()
2431 rpc_destroy_wait_queue(&clp->cl_cb_waitq); in free_client()
2432 if (clp->cl_nfsd_dentry) { in free_client()
2433 nfsd_client_rmdir(clp->cl_nfsd_dentry); in free_client()
2434 clp->cl_nfsd_dentry = NULL; in free_client()
2444 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in unhash_client_locked()
2447 lockdep_assert_held(&nn->client_lock); in unhash_client_locked()
2450 clp->cl_time = 0; in unhash_client_locked()
2452 if (!list_empty(&clp->cl_idhash)) { in unhash_client_locked()
2453 list_del_init(&clp->cl_idhash); in unhash_client_locked()
2454 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) in unhash_client_locked()
2455 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); in unhash_client_locked()
2457 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); in unhash_client_locked()
2459 list_del_init(&clp->cl_lru); in unhash_client_locked()
2460 spin_lock(&clp->cl_lock); in unhash_client_locked()
2462 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) { in unhash_client_locked()
2463 list_del_init(&ses->se_hash); in unhash_client_locked()
2464 list_del_init(&ses->se_all_sessions); in unhash_client_locked()
2467 spin_unlock(&clp->cl_lock); in unhash_client_locked()
2473 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in unhash_client()
2475 spin_lock(&nn->client_lock); in unhash_client()
2477 spin_unlock(&nn->client_lock); in unhash_client()
2482 int users = atomic_read(&clp->cl_rpc_users); in mark_client_expired_locked()
2495 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in __destroy_client()
2502 while (!list_empty(&clp->cl_delegations)) { in __destroy_client()
2503 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); in __destroy_client()
2505 list_add(&dp->dl_recall_lru, &reaplist); in __destroy_client()
2510 list_del_init(&dp->dl_recall_lru); in __destroy_client()
2513 while (!list_empty(&clp->cl_revoked)) { in __destroy_client()
2514 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); in __destroy_client()
2515 list_del_init(&dp->dl_recall_lru); in __destroy_client()
2516 nfs4_put_stid(&dp->dl_stid); in __destroy_client()
2518 while (!list_empty(&clp->cl_openowners)) { in __destroy_client()
2519 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); in __destroy_client()
2520 nfs4_get_stateowner(&oo->oo_owner); in __destroy_client()
2526 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i], in __destroy_client()
2529 WARN_ON_ONCE(so->so_is_open_owner); in __destroy_client()
2536 if (clp->cl_cb_conn.cb_xprt) in __destroy_client()
2537 svc_xprt_put(clp->cl_cb_conn.cb_xprt); in __destroy_client()
2538 atomic_add_unless(&nn->nfs4_client_count, -1, 0); in __destroy_client()
2553 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in inc_reclaim_complete()
2555 if (!nn->track_reclaim_completes) in inc_reclaim_complete()
2557 if (!nfsd4_find_reclaim_client(clp->cl_name, nn)) in inc_reclaim_complete()
2559 if (atomic_inc_return(&nn->nr_reclaim_complete) == in inc_reclaim_complete()
2560 nn->reclaim_str_hashtbl_size) { in inc_reclaim_complete()
2561 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n", in inc_reclaim_complete()
2562 clp->net->ns.inum); in inc_reclaim_complete()
2576 memcpy(target->cl_verifier.data, source->data, in copy_verf()
2577 sizeof(target->cl_verifier.data)); in copy_verf()
2582 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; in copy_clid()
2583 target->cl_clientid.cl_id = source->cl_clientid.cl_id; in copy_clid()
2588 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL); in copy_cred()
2589 target->cr_raw_principal = kstrdup(source->cr_raw_principal, in copy_cred()
2591 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL); in copy_cred()
2592 if ((source->cr_principal && !target->cr_principal) || in copy_cred()
2593 (source->cr_raw_principal && !target->cr_raw_principal) || in copy_cred()
2594 (source->cr_targ_princ && !target->cr_targ_princ)) in copy_cred()
2595 return -ENOMEM; in copy_cred()
2597 target->cr_flavor = source->cr_flavor; in copy_cred()
2598 target->cr_uid = source->cr_uid; in copy_cred()
2599 target->cr_gid = source->cr_gid; in copy_cred()
2600 target->cr_group_info = source->cr_group_info; in copy_cred()
2601 get_group_info(target->cr_group_info); in copy_cred()
2602 target->cr_gss_mech = source->cr_gss_mech; in copy_cred()
2603 if (source->cr_gss_mech) in copy_cred()
2604 gss_mech_get(source->cr_gss_mech); in copy_cred()
2611 if (o1->len < o2->len) in compare_blob()
2612 return -1; in compare_blob()
2613 if (o1->len > o2->len) in compare_blob()
2615 return memcmp(o1->data, o2->data, o1->len); in compare_blob()
2621 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); in same_verf()
2627 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); in same_clid()
2634 if (g1->ngroups != g2->ngroups) in groups_equal()
2636 for (i=0; i<g1->ngroups; i++) in groups_equal()
2637 if (!gid_eq(g1->gid[i], g2->gid[i])) in groups_equal()
2646 * approximation. We also don't want to allow non-gss use of a client
2654 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); in is_gss_cred()
2662 || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) in same_creds()
2663 || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) in same_creds()
2664 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) in same_creds()
2667 if (cr1->cr_principal == cr2->cr_principal) in same_creds()
2669 if (!cr1->cr_principal || !cr2->cr_principal) in same_creds()
2671 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); in same_creds()
2676 struct svc_cred *cr = &rqstp->rq_cred; in svc_rqst_integrity_protected()
2679 if (!cr->cr_gss_mech) in svc_rqst_integrity_protected()
2681 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); in svc_rqst_integrity_protected()
2688 struct svc_cred *cr = &rqstp->rq_cred; in nfsd4_mach_creds_match()
2690 if (!cl->cl_mach_cred) in nfsd4_mach_creds_match()
2692 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) in nfsd4_mach_creds_match()
2696 if (cl->cl_cred.cr_raw_principal) in nfsd4_mach_creds_match()
2697 return 0 == strcmp(cl->cl_cred.cr_raw_principal, in nfsd4_mach_creds_match()
2698 cr->cr_raw_principal); in nfsd4_mach_creds_match()
2699 if (!cr->cr_principal) in nfsd4_mach_creds_match()
2701 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); in nfsd4_mach_creds_match()
2709 * This is opaque to client, so no need to byte-swap. Use in gen_confirm()
2713 verf[1] = (__force __be32)nn->clverifier_counter++; in gen_confirm()
2714 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); in gen_confirm()
2719 clp->cl_clientid.cl_boot = (u32)nn->boot_time; in gen_clid()
2720 clp->cl_clientid.cl_id = nn->clientid_counter++; in gen_clid()
2729 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); in find_stateid_locked()
2730 if (!ret || !ret->sc_type) in find_stateid_locked()
2741 spin_lock(&cl->cl_lock); in find_stateid_by_type()
2744 if ((s->sc_status & ~ok_states) == 0 && in find_stateid_by_type()
2745 (typemask & s->sc_type)) in find_stateid_by_type()
2746 refcount_inc(&s->sc_count); in find_stateid_by_type()
2750 spin_unlock(&cl->cl_lock); in find_stateid_by_type()
2787 struct inode *inode = file_inode(m->file); in client_info_show()
2794 return -ENXIO; in client_info_show()
2795 memcpy(&clid, &clp->cl_clientid, sizeof(clid)); in client_info_show()
2797 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr); in client_info_show()
2799 if (clp->cl_state == NFSD4_COURTESY) in client_info_show()
2801 else if (clp->cl_state == NFSD4_EXPIRABLE) in client_info_show()
2803 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) in client_info_show()
2808 ktime_get_boottime_seconds() - clp->cl_time); in client_info_show()
2810 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len); in client_info_show()
2811 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion); in client_info_show()
2812 if (clp->cl_nii_domain.data) { in client_info_show()
2814 seq_quote_mem(m, clp->cl_nii_domain.data, in client_info_show()
2815 clp->cl_nii_domain.len); in client_info_show()
2817 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); in client_info_show()
2819 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); in client_info_show()
2821 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state)); in client_info_show()
2822 seq_printf(m, "callback address: \"%pISpc\"\n", &clp->cl_cb_conn.cb_addr); in client_info_show()
2823 seq_printf(m, "admin-revoked states: %d\n", in client_info_show()
2824 atomic_read(&clp->cl_admin_revoked)); in client_info_show()
2825 spin_lock(&clp->cl_lock); in client_info_show()
2827 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) in client_info_show()
2828 seq_printf(m, " %u", ses->se_fchannel.maxreqs); in client_info_show()
2830 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) in client_info_show()
2831 seq_printf(m, " %u", ses->se_target_maxslots); in client_info_show()
2832 spin_unlock(&clp->cl_lock); in client_info_show()
2843 __acquires(&clp->cl_lock) in states_start()
2845 struct nfs4_client *clp = s->private; in states_start()
2849 spin_lock(&clp->cl_lock); in states_start()
2850 ret = idr_get_next_ul(&clp->cl_stateids, &id); in states_start()
2857 struct nfs4_client *clp = s->private; in states_next()
2863 ret = idr_get_next_ul(&clp->cl_stateids, &id); in states_next()
2869 __releases(&clp->cl_lock) in states_stop()
2871 struct nfs4_client *clp = s->private; in states_stop()
2873 spin_unlock(&clp->cl_lock); in states_stop()
2878 seq_printf(s, "filename: \"%pD2\"", f->nf_file); in nfs4_show_fname()
2883 struct inode *inode = file_inode(f->nf_file); in nfs4_show_superblock()
2886 MAJOR(inode->i_sb->s_dev), in nfs4_show_superblock()
2887 MINOR(inode->i_sb->s_dev), in nfs4_show_superblock()
2888 inode->i_ino); in nfs4_show_superblock()
2894 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); in nfs4_show_owner()
2899 seq_printf(s, "0x%.8x", stid->si_generation); in nfs4_show_stateid()
2900 seq_printf(s, "%12phN", &stid->si_opaque); in nfs4_show_stateid()
2912 oo = ols->st_stateowner; in nfs4_show_open()
2913 nf = st->sc_file; in nfs4_show_open()
2915 seq_puts(s, "- "); in nfs4_show_open()
2916 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_open()
2919 access = bmap_to_share_mode(ols->st_access_bmap); in nfs4_show_open()
2920 deny = bmap_to_share_mode(ols->st_deny_bmap); in nfs4_show_open()
2923 access & NFS4_SHARE_ACCESS_READ ? "r" : "-", in nfs4_show_open()
2924 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); in nfs4_show_open()
2926 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", in nfs4_show_open()
2927 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); in nfs4_show_open()
2930 spin_lock(&nf->fi_lock); in nfs4_show_open()
2938 spin_unlock(&nf->fi_lock); in nfs4_show_open()
2942 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) in nfs4_show_open()
2943 seq_puts(s, ", admin-revoked"); in nfs4_show_open()
2956 oo = ols->st_stateowner; in nfs4_show_lock()
2957 nf = st->sc_file; in nfs4_show_lock()
2959 seq_puts(s, "- "); in nfs4_show_lock()
2960 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_lock()
2963 spin_lock(&nf->fi_lock); in nfs4_show_lock()
2980 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) in nfs4_show_lock()
2981 seq_puts(s, ", admin-revoked"); in nfs4_show_lock()
2983 spin_unlock(&nf->fi_lock); in nfs4_show_lock()
3009 nf = st->sc_file; in nfs4_show_deleg()
3011 seq_puts(s, "- "); in nfs4_show_deleg()
3012 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_deleg()
3015 seq_printf(s, "access: %s", nfs4_show_deleg_type(ds->dl_type)); in nfs4_show_deleg()
3019 spin_lock(&nf->fi_lock); in nfs4_show_deleg()
3020 file = nf->fi_deleg_file; in nfs4_show_deleg()
3027 spin_unlock(&nf->fi_lock); in nfs4_show_deleg()
3028 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) in nfs4_show_deleg()
3029 seq_puts(s, ", admin-revoked"); in nfs4_show_deleg()
3041 seq_puts(s, "- "); in nfs4_show_layout()
3042 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_layout()
3047 spin_lock(&ls->ls_stid.sc_file->fi_lock); in nfs4_show_layout()
3048 file = ls->ls_file; in nfs4_show_layout()
3055 spin_unlock(&ls->ls_stid.sc_file->fi_lock); in nfs4_show_layout()
3056 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) in nfs4_show_layout()
3057 seq_puts(s, ", admin-revoked"); in nfs4_show_layout()
3067 switch (st->sc_type) { in states_show()
3097 return -ENXIO; in client_states_open()
3102 s = file->private_data; in client_states_open()
3103 s->private = clp; in client_states_open()
3109 struct seq_file *m = file->private_data; in client_opens_release()
3110 struct nfs4_client *clp = m->private; in client_opens_release()
3132 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in force_expire_client()
3135 trace_nfsd_clid_admin_expired(&clp->cl_clientid); in force_expire_client()
3137 spin_lock(&nn->client_lock); in force_expire_client()
3138 clp->cl_time = 0; in force_expire_client()
3139 spin_unlock(&nn->client_lock); in force_expire_client()
3141 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0); in force_expire_client()
3142 spin_lock(&nn->client_lock); in force_expire_client()
3143 already_expired = list_empty(&clp->cl_lru); in force_expire_client()
3146 spin_unlock(&nn->client_lock); in force_expire_client()
3151 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL); in force_expire_client()
3164 return -EINVAL; in client_ctl_write()
3167 return -ENXIO; in client_ctl_write()
3190 switch (task->tk_status) { in nfsd4_cb_recall_any_done()
3191 case -NFS4ERR_DELAY: in nfsd4_cb_recall_any_done()
3202 struct nfs4_client *clp = cb->cb_clp; in nfsd4_cb_recall_any_release()
3215 trace_nfsd_cb_getattr_done(&dp->dl_stid.sc_stateid, task); in nfsd4_cb_getattr_done()
3216 ncf->ncf_cb_status = task->tk_status; in nfsd4_cb_getattr_done()
3217 switch (task->tk_status) { in nfsd4_cb_getattr_done()
3218 case -NFS4ERR_DELAY: in nfsd4_cb_getattr_done()
3234 nfs4_put_stid(&dp->dl_stid); in nfsd4_cb_getattr_release()
3254 if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &ncf->ncf_getattr.cb_flags)) in nfs4_cb_getattr()
3258 ncf->ncf_cb_status = NFS4ERR_IO; in nfs4_cb_getattr()
3261 set_bit(NFSD4_CALLBACK_WAKE, &ncf->ncf_getattr.cb_flags); in nfs4_cb_getattr()
3263 refcount_inc(&dp->dl_stid.sc_count); in nfs4_cb_getattr()
3264 nfsd4_run_cb(&ncf->ncf_getattr); in nfs4_cb_getattr()
3281 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); in create_client()
3287 kref_init(&clp->cl_nfsdfs.cl_ref); in create_client()
3288 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); in create_client()
3289 clp->cl_time = ktime_get_boottime_seconds(); in create_client()
3291 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); in create_client()
3292 clp->cl_cb_session = NULL; in create_client()
3293 clp->net = net; in create_client()
3294 clp->cl_nfsd_dentry = nfsd_client_mkdir( in create_client()
3295 nn, &clp->cl_nfsdfs, in create_client()
3296 clp->cl_clientid.cl_id - nn->clientid_base, in create_client()
3298 clp->cl_nfsd_info_dentry = dentries[0]; in create_client()
3299 if (!clp->cl_nfsd_dentry) { in create_client()
3303 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL); in create_client()
3304 if (!clp->cl_ra) { in create_client()
3308 clp->cl_ra_time = 0; in create_client()
3309 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops, in create_client()
3317 struct rb_node **new = &(root->rb_node), *parent = NULL; in add_clp_to_name_tree()
3324 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) in add_clp_to_name_tree()
3325 new = &((*new)->rb_left); in add_clp_to_name_tree()
3327 new = &((*new)->rb_right); in add_clp_to_name_tree()
3330 rb_link_node(&new_clp->cl_namenode, parent, new); in add_clp_to_name_tree()
3331 rb_insert_color(&new_clp->cl_namenode, root); in add_clp_to_name_tree()
3338 struct rb_node *node = root->rb_node; in find_clp_in_name_tree()
3343 cmp = compare_blob(&clp->cl_name, name); in find_clp_in_name_tree()
3345 node = node->rb_left; in find_clp_in_name_tree()
3347 node = node->rb_right; in find_clp_in_name_tree()
3358 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in add_to_unconfirmed()
3360 lockdep_assert_held(&nn->client_lock); in add_to_unconfirmed()
3362 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); in add_to_unconfirmed()
3363 add_clp_to_name_tree(clp, &nn->unconf_name_tree); in add_to_unconfirmed()
3364 idhashval = clientid_hashval(clp->cl_clientid.cl_id); in add_to_unconfirmed()
3365 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); in add_to_unconfirmed()
3372 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); in move_to_confirmed()
3373 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in move_to_confirmed()
3375 lockdep_assert_held(&nn->client_lock); in move_to_confirmed()
3377 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); in move_to_confirmed()
3378 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); in move_to_confirmed()
3379 add_clp_to_name_tree(clp, &nn->conf_name_tree); in move_to_confirmed()
3380 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); in move_to_confirmed()
3381 trace_nfsd_clid_confirmed(&clp->cl_clientid); in move_to_confirmed()
3389 unsigned int idhashval = clientid_hashval(clid->cl_id); in find_client_in_id_table()
3392 if (same_clid(&clp->cl_clientid, clid)) { in find_client_in_id_table()
3393 if ((bool)clp->cl_minorversion != sessions) in find_client_in_id_table()
3405 struct list_head *tbl = nn->conf_id_hashtbl; in find_confirmed_client()
3407 lockdep_assert_held(&nn->client_lock); in find_confirmed_client()
3414 struct list_head *tbl = nn->unconf_id_hashtbl; in find_unconfirmed_client()
3416 lockdep_assert_held(&nn->client_lock); in find_unconfirmed_client()
3422 return clp->cl_exchange_flags != 0; in clp_used_exchangeid()
3428 lockdep_assert_held(&nn->client_lock); in find_confirmed_client_by_name()
3429 return find_clp_in_name_tree(name, &nn->conf_name_tree); in find_confirmed_client_by_name()
3435 lockdep_assert_held(&nn->client_lock); in find_unconfirmed_client_by_name()
3436 return find_clp_in_name_tree(name, &nn->unconf_name_tree); in find_unconfirmed_client_by_name()
3442 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; in gen_callback()
3448 if (se->se_callback_netid_len == 3 && in gen_callback()
3449 !memcmp(se->se_callback_netid_val, "tcp", 3)) in gen_callback()
3451 else if (se->se_callback_netid_len == 4 && in gen_callback()
3452 !memcmp(se->se_callback_netid_val, "tcp6", 4)) in gen_callback()
3457 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, in gen_callback()
3458 se->se_callback_addr_len, in gen_callback()
3459 (struct sockaddr *)&conn->cb_addr, in gen_callback()
3460 sizeof(conn->cb_addr)); in gen_callback()
3462 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) in gen_callback()
3465 if (conn->cb_addr.ss_family == AF_INET6) in gen_callback()
3466 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; in gen_callback()
3468 conn->cb_prog = se->se_callback_prog; in gen_callback()
3469 conn->cb_ident = se->se_callback_ident; in gen_callback()
3470 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); in gen_callback()
3474 conn->cb_addr.ss_family = AF_UNSPEC; in gen_callback()
3475 conn->cb_addrlen = 0; in gen_callback()
3486 struct xdr_buf *buf = resp->xdr->buf; in nfsd4_store_cache_entry()
3487 struct nfsd4_slot *slot = resp->cstate.slot; in nfsd4_store_cache_entry()
3502 if (resp->opcnt == 1 && resp->cstate.status != nfs_ok) in nfsd4_store_cache_entry()
3505 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; in nfsd4_store_cache_entry()
3506 slot->sl_opcnt = resp->opcnt; in nfsd4_store_cache_entry()
3507 slot->sl_status = resp->cstate.status; in nfsd4_store_cache_entry()
3508 free_svc_cred(&slot->sl_cred); in nfsd4_store_cache_entry()
3509 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred); in nfsd4_store_cache_entry()
3512 slot->sl_flags &= ~NFSD4_SLOT_CACHED; in nfsd4_store_cache_entry()
3515 slot->sl_flags |= NFSD4_SLOT_CACHED; in nfsd4_store_cache_entry()
3517 base = resp->cstate.data_offset; in nfsd4_store_cache_entry()
3518 slot->sl_datalen = buf->len - base; in nfsd4_store_cache_entry()
3519 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) in nfsd4_store_cache_entry()
3528 * operation which sets resp->p and increments resp->opcnt for
3537 struct nfsd4_slot *slot = resp->cstate.slot; in nfsd4_enc_sequence_replay()
3540 op = &args->ops[resp->opcnt - 1]; in nfsd4_enc_sequence_replay()
3543 if (slot->sl_flags & NFSD4_SLOT_CACHED) in nfsd4_enc_sequence_replay()
3544 return op->status; in nfsd4_enc_sequence_replay()
3545 if (args->opcnt == 1) { in nfsd4_enc_sequence_replay()
3547 * The original operation wasn't a solo sequence--we in nfsd4_enc_sequence_replay()
3548 * always cache those--so this retry must not match the in nfsd4_enc_sequence_replay()
3551 op->status = nfserr_seq_false_retry; in nfsd4_enc_sequence_replay()
3553 op = &args->ops[resp->opcnt++]; in nfsd4_enc_sequence_replay()
3554 op->status = nfserr_retry_uncached_rep; in nfsd4_enc_sequence_replay()
3557 return op->status; in nfsd4_enc_sequence_replay()
3568 struct nfsd4_slot *slot = resp->cstate.slot; in nfsd4_replay_cache_entry()
3569 struct xdr_stream *xdr = resp->xdr; in nfsd4_replay_cache_entry()
3573 dprintk("--> %s slot %p\n", __func__, slot); in nfsd4_replay_cache_entry()
3575 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); in nfsd4_replay_cache_entry()
3579 p = xdr_reserve_space(xdr, slot->sl_datalen); in nfsd4_replay_cache_entry()
3584 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); in nfsd4_replay_cache_entry()
3587 resp->opcnt = slot->sl_opcnt; in nfsd4_replay_cache_entry()
3588 return slot->sl_status; in nfsd4_replay_cache_entry()
3598 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; in nfsd4_set_ex_flags()
3600 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; in nfsd4_set_ex_flags()
3604 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; in nfsd4_set_ex_flags()
3607 clid->flags = new->cl_exchange_flags; in nfsd4_set_ex_flags()
3614 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) { in client_has_openowners()
3615 if (!list_empty(&oo->oo_owner.so_stateids)) in client_has_openowners()
3625 || !list_empty(&clp->cl_lo_states) in client_has_state()
3627 || !list_empty(&clp->cl_delegations) in client_has_state()
3628 || !list_empty(&clp->cl_sessions) in client_has_state()
3635 if (!exid->nii_domain.data) in copy_impl_id()
3637 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL); in copy_impl_id()
3638 if (!clp->cl_nii_domain.data) in copy_impl_id()
3640 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); in copy_impl_id()
3641 if (!clp->cl_nii_name.data) in copy_impl_id()
3643 clp->cl_nii_time = exid->nii_time; in copy_impl_id()
3651 struct nfsd4_exchange_id *exid = &u->exchange_id; in nfsd4_exchange_id()
3656 nfs4_verifier verf = exid->verifier; in nfsd4_exchange_id()
3658 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; in nfsd4_exchange_id()
3664 __func__, rqstp, exid, exid->clname.len, exid->clname.data, in nfsd4_exchange_id()
3665 addr_str, exid->flags, exid->spa_how); in nfsd4_exchange_id()
3667 exid->server_impl_name = kasprintf(GFP_KERNEL, "%s %s %s %s", in nfsd4_exchange_id()
3668 utsname()->sysname, utsname()->release, in nfsd4_exchange_id()
3669 utsname()->version, utsname()->machine); in nfsd4_exchange_id()
3670 if (!exid->server_impl_name) in nfsd4_exchange_id()
3673 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) in nfsd4_exchange_id()
3676 new = create_client(exid->clname, rqstp, &verf); in nfsd4_exchange_id()
3683 switch (exid->spa_how) { in nfsd4_exchange_id()
3685 exid->spo_must_enforce[0] = 0; in nfsd4_exchange_id()
3686 exid->spo_must_enforce[1] = ( in nfsd4_exchange_id()
3687 1 << (OP_BIND_CONN_TO_SESSION - 32) | in nfsd4_exchange_id()
3688 1 << (OP_EXCHANGE_ID - 32) | in nfsd4_exchange_id()
3689 1 << (OP_CREATE_SESSION - 32) | in nfsd4_exchange_id()
3690 1 << (OP_DESTROY_SESSION - 32) | in nfsd4_exchange_id()
3691 1 << (OP_DESTROY_CLIENTID - 32)); in nfsd4_exchange_id()
3693 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) | in nfsd4_exchange_id()
3698 exid->spo_must_allow[1] &= ( in nfsd4_exchange_id()
3699 1 << (OP_TEST_STATEID - 32) | in nfsd4_exchange_id()
3700 1 << (OP_FREE_STATEID - 32)); in nfsd4_exchange_id()
3710 if (!new->cl_cred.cr_principal && in nfsd4_exchange_id()
3711 !new->cl_cred.cr_raw_principal) { in nfsd4_exchange_id()
3715 new->cl_mach_cred = true; in nfsd4_exchange_id()
3728 spin_lock(&nn->client_lock); in nfsd4_exchange_id()
3729 conf = find_confirmed_client_by_name(&exid->clname, nn); in nfsd4_exchange_id()
3731 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); in nfsd4_exchange_id()
3732 bool verfs_match = same_verf(&verf, &conf->cl_verifier); in nfsd4_exchange_id()
3752 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; in nfsd4_exchange_id()
3765 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; in nfsd4_exchange_id()
3780 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); in nfsd4_exchange_id()
3792 trace_nfsd_clid_replaced(&conf->cl_clientid); in nfsd4_exchange_id()
3794 new->cl_minorversion = cstate->minorversion; in nfsd4_exchange_id()
3795 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; in nfsd4_exchange_id()
3796 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; in nfsd4_exchange_id()
3799 new->cl_cs_slot.sl_status = nfserr_seq_misordered; in nfsd4_exchange_id()
3804 exid->clientid.cl_boot = conf->cl_clientid.cl_boot; in nfsd4_exchange_id()
3805 exid->clientid.cl_id = conf->cl_clientid.cl_id; in nfsd4_exchange_id()
3807 exid->seqid = conf->cl_cs_slot.sl_seqid + 1; in nfsd4_exchange_id()
3810 exid->nii_domain.len = sizeof("kernel.org") - 1; in nfsd4_exchange_id()
3811 exid->nii_domain.data = "kernel.org"; in nfsd4_exchange_id()
3818 exid->nii_name.len = strlen(exid->server_impl_name); in nfsd4_exchange_id()
3819 if (exid->nii_name.len > NFS4_OPAQUE_LIMIT) in nfsd4_exchange_id()
3820 exid->nii_name.len = NFS4_OPAQUE_LIMIT; in nfsd4_exchange_id()
3821 exid->nii_name.data = exid->server_impl_name; in nfsd4_exchange_id()
3823 /* just send zeros - the date is in nii_name */ in nfsd4_exchange_id()
3824 exid->nii_time.tv_sec = 0; in nfsd4_exchange_id()
3825 exid->nii_time.tv_nsec = 0; in nfsd4_exchange_id()
3828 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); in nfsd4_exchange_id()
3832 spin_unlock(&nn->client_lock); in nfsd4_exchange_id()
3837 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); in nfsd4_exchange_id()
3846 struct nfsd4_exchange_id *exid = &u->exchange_id; in nfsd4_exchange_id_release()
3848 kfree(exid->server_impl_name); in nfsd4_exchange_id_release()
3860 /* Note unsigned 32-bit arithmetic handles wraparound: */ in check_slot_seqid()
3879 slot->sl_status = nfserr; in nfsd4_cache_create_session()
3880 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); in nfsd4_cache_create_session()
3887 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); in nfsd4_replay_create_session()
3888 return slot->sl_status; in nfsd4_replay_create_session()
3910 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; in check_forechannel_attrs()
3912 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) in check_forechannel_attrs()
3914 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) in check_forechannel_attrs()
3916 ca->headerpadsz = 0; in check_forechannel_attrs()
3917 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); in check_forechannel_attrs()
3918 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); in check_forechannel_attrs()
3919 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); in check_forechannel_attrs()
3920 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, in check_forechannel_attrs()
3922 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); in check_forechannel_attrs()
3928 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3945 ca->headerpadsz = 0; in check_backchannel_attrs()
3947 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) in check_backchannel_attrs()
3949 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) in check_backchannel_attrs()
3951 ca->maxresp_cached = 0; in check_backchannel_attrs()
3952 if (ca->maxops < 2) in check_backchannel_attrs()
3960 switch (cbs->flavor) { in nfsd4_check_cb_sec()
3980 struct nfsd4_create_session *cr_ses = &u->create_session; in nfsd4_create_session()
3990 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) in nfsd4_create_session()
3992 status = nfsd4_check_cb_sec(&cr_ses->cb_sec); in nfsd4_create_session()
3995 status = check_forechannel_attrs(&cr_ses->fore_channel, nn); in nfsd4_create_session()
3998 status = check_backchannel_attrs(&cr_ses->back_channel); in nfsd4_create_session()
4002 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); in nfsd4_create_session()
4009 spin_lock(&nn->client_lock); in nfsd4_create_session()
4011 /* RFC 8881 Section 18.36.4 Phase 1: Client record look-up. */ in nfsd4_create_session()
4012 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); in nfsd4_create_session()
4013 conf = find_confirmed_client(&cr_ses->clientid, true, nn); in nfsd4_create_session()
4021 cs_slot = &conf->cl_cs_slot; in nfsd4_create_session()
4024 cs_slot = &unconf->cl_cs_slot; in nfsd4_create_session()
4027 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); in nfsd4_create_session()
4030 cs_slot->sl_seqid++; in nfsd4_create_session()
4031 cr_ses->seqid = cs_slot->sl_seqid; in nfsd4_create_session()
4050 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || in nfsd4_create_session()
4051 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { in nfsd4_create_session()
4058 old = find_confirmed_client_by_name(&unconf->cl_name, nn); in nfsd4_create_session()
4063 trace_nfsd_clid_replaced(&old->cl_clientid); in nfsd4_create_session()
4072 cr_ses->flags &= ~SESSION4_PERSIST; in nfsd4_create_session()
4074 cr_ses->flags &= ~SESSION4_RDMA; in nfsd4_create_session()
4076 cr_ses->back_channel.maxreqs = new->se_cb_highest_slot + 1; in nfsd4_create_session()
4081 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, in nfsd4_create_session()
4086 spin_unlock(&nn->client_lock); in nfsd4_create_session()
4088 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); in nfsd4_create_session()
4102 cs_slot->sl_seqid--; in nfsd4_create_session()
4103 cr_ses->seqid = cs_slot->sl_seqid; in nfsd4_create_session()
4109 spin_unlock(&nn->client_lock); in nfsd4_create_session()
4135 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl; in nfsd4_backchannel_ctl()
4136 struct nfsd4_session *session = cstate->session; in nfsd4_backchannel_ctl()
4140 status = nfsd4_check_cb_sec(&bc->bc_cb_sec); in nfsd4_backchannel_ctl()
4143 spin_lock(&nn->client_lock); in nfsd4_backchannel_ctl()
4144 session->se_cb_prog = bc->bc_cb_program; in nfsd4_backchannel_ctl()
4145 session->se_cb_sec = bc->bc_cb_sec; in nfsd4_backchannel_ctl()
4146 spin_unlock(&nn->client_lock); in nfsd4_backchannel_ctl()
4148 nfsd4_probe_callback(session->se_client); in nfsd4_backchannel_ctl()
4157 list_for_each_entry(c, &s->se_conns, cn_persession) { in __nfsd4_find_conn()
4158 if (c->cn_xprt == xpt) { in __nfsd4_find_conn()
4168 struct nfs4_client *clp = session->se_client; in nfsd4_match_existing_connection()
4169 struct svc_xprt *xpt = rqst->rq_xprt; in nfsd4_match_existing_connection()
4174 spin_lock(&clp->cl_lock); in nfsd4_match_existing_connection()
4178 else if (req == c->cn_flags) in nfsd4_match_existing_connection()
4181 c->cn_flags != NFS4_CDFC4_BACK) in nfsd4_match_existing_connection()
4184 c->cn_flags != NFS4_CDFC4_FORE) in nfsd4_match_existing_connection()
4188 spin_unlock(&clp->cl_lock); in nfsd4_match_existing_connection()
4198 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session; in nfsd4_bind_conn_to_session()
4207 spin_lock(&nn->client_lock); in nfsd4_bind_conn_to_session()
4208 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); in nfsd4_bind_conn_to_session()
4209 spin_unlock(&nn->client_lock); in nfsd4_bind_conn_to_session()
4213 if (!nfsd4_mach_creds_match(session->se_client, rqstp)) in nfsd4_bind_conn_to_session()
4216 bcts->dir, &conn); in nfsd4_bind_conn_to_session()
4218 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH || in nfsd4_bind_conn_to_session()
4219 bcts->dir == NFS4_CDFC4_BACK) in nfsd4_bind_conn_to_session()
4220 conn->cn_flags |= NFS4_CDFC4_BACK; in nfsd4_bind_conn_to_session()
4221 nfsd4_probe_callback(session->se_client); in nfsd4_bind_conn_to_session()
4226 status = nfsd4_map_bcts_dir(&bcts->dir); in nfsd4_bind_conn_to_session()
4229 conn = alloc_conn(rqstp, bcts->dir); in nfsd4_bind_conn_to_session()
4243 if (!cstate->session) in nfsd4_compound_in_session()
4245 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid)); in nfsd4_compound_in_session()
4252 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid; in nfsd4_destroy_session()
4266 spin_lock(&nn->client_lock); in nfsd4_destroy_session()
4271 if (!nfsd4_mach_creds_match(ses->se_client, r)) in nfsd4_destroy_session()
4277 spin_unlock(&nn->client_lock); in nfsd4_destroy_session()
4279 nfsd4_probe_callback_sync(ses->se_client); in nfsd4_destroy_session()
4281 spin_lock(&nn->client_lock); in nfsd4_destroy_session()
4286 spin_unlock(&nn->client_lock); in nfsd4_destroy_session()
4293 struct nfs4_client *clp = ses->se_client; in nfsd4_sequence_check_conn()
4298 spin_lock(&clp->cl_lock); in nfsd4_sequence_check_conn()
4299 c = __nfsd4_find_conn(new->cn_xprt, ses); in nfsd4_sequence_check_conn()
4303 if (clp->cl_mach_cred) in nfsd4_sequence_check_conn()
4306 spin_unlock(&clp->cl_lock); in nfsd4_sequence_check_conn()
4310 nfsd4_conn_lost(&new->cn_xpt_user); in nfsd4_sequence_check_conn()
4313 spin_unlock(&clp->cl_lock); in nfsd4_sequence_check_conn()
4320 struct nfsd4_compoundargs *args = rqstp->rq_argp; in nfsd4_session_too_many_ops()
4322 return args->opcnt > session->se_fchannel.maxops; in nfsd4_session_too_many_ops()
4328 struct xdr_buf *xb = &rqstp->rq_arg; in nfsd4_request_too_big()
4330 return xb->len > session->se_fchannel.maxreq_sz; in nfsd4_request_too_big()
4336 struct nfsd4_compoundargs *argp = rqstp->rq_argp; in replay_matches_cache()
4338 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) != in replay_matches_cache()
4339 (bool)seq->cachethis) in replay_matches_cache()
4345 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status) in replay_matches_cache()
4352 if (slot->sl_opcnt > argp->opcnt) in replay_matches_cache()
4355 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) in replay_matches_cache()
4374 struct nfs4_client *clp = session->se_client; in nfsd4_construct_sequence_response()
4376 seq->maxslots_response = max(session->se_target_maxslots, in nfsd4_construct_sequence_response()
4377 seq->maxslots); in nfsd4_construct_sequence_response()
4378 seq->target_maxslots = session->se_target_maxslots; in nfsd4_construct_sequence_response()
4380 switch (clp->cl_cb_state) { in nfsd4_construct_sequence_response()
4382 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; in nfsd4_construct_sequence_response()
4385 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; in nfsd4_construct_sequence_response()
4388 seq->status_flags = 0; in nfsd4_construct_sequence_response()
4390 if (!list_empty(&clp->cl_revoked)) in nfsd4_construct_sequence_response()
4391 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; in nfsd4_construct_sequence_response()
4392 if (atomic_read(&clp->cl_admin_revoked)) in nfsd4_construct_sequence_response()
4393 seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED; in nfsd4_construct_sequence_response()
4400 struct nfsd4_sequence *seq = &u->sequence; in nfsd4_sequence()
4401 struct nfsd4_compoundres *resp = rqstp->rq_resp; in nfsd4_sequence()
4402 struct xdr_stream *xdr = resp->xdr; in nfsd4_sequence()
4412 if (resp->opcnt != 1) in nfsd4_sequence()
4423 spin_lock(&nn->client_lock); in nfsd4_sequence()
4424 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); in nfsd4_sequence()
4427 clp = session->se_client; in nfsd4_sequence()
4438 if (seq->slotid >= session->se_fchannel.maxreqs) in nfsd4_sequence()
4441 slot = xa_load(&session->se_slots, seq->slotid); in nfsd4_sequence()
4442 dprintk("%s: slotid %d\n", __func__, seq->slotid); in nfsd4_sequence()
4448 status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags); in nfsd4_sequence()
4451 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) in nfsd4_sequence()
4456 cstate->slot = slot; in nfsd4_sequence()
4457 cstate->session = session; in nfsd4_sequence()
4458 cstate->clp = clp; in nfsd4_sequence()
4459 /* Return the cached reply status and set cstate->status in nfsd4_sequence()
4462 cstate->status = nfserr_replay_cache; in nfsd4_sequence()
4473 if (session->se_target_maxslots < session->se_fchannel.maxreqs && in nfsd4_sequence()
4474 slot->sl_generation == session->se_slot_gen && in nfsd4_sequence()
4475 seq->maxslots <= session->se_target_maxslots) in nfsd4_sequence()
4477 free_session_slots(session, session->se_target_maxslots); in nfsd4_sequence()
4479 buflen = (seq->cachethis) ? in nfsd4_sequence()
4480 session->se_fchannel.maxresp_cached : in nfsd4_sequence()
4481 session->se_fchannel.maxresp_sz; in nfsd4_sequence()
4482 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : in nfsd4_sequence()
4484 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) in nfsd4_sequence()
4490 slot->sl_seqid = seq->seqid; in nfsd4_sequence()
4491 slot->sl_flags &= ~NFSD4_SLOT_REUSED; in nfsd4_sequence()
4492 slot->sl_flags |= NFSD4_SLOT_INUSE; in nfsd4_sequence()
4493 slot->sl_generation = session->se_slot_gen; in nfsd4_sequence()
4494 if (seq->cachethis) in nfsd4_sequence()
4495 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; in nfsd4_sequence()
4497 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; in nfsd4_sequence()
4499 cstate->slot = slot; in nfsd4_sequence()
4500 cstate->session = session; in nfsd4_sequence()
4501 cstate->clp = clp; in nfsd4_sequence()
4506 * fairly quick growth without grossly over-shooting what in nfsd4_sequence()
4509 if (seq->slotid == session->se_fchannel.maxreqs - 1 && in nfsd4_sequence()
4510 session->se_target_maxslots >= session->se_fchannel.maxreqs && in nfsd4_sequence()
4511 session->se_fchannel.maxreqs < NFSD_MAX_SLOTS_PER_SESSION) { in nfsd4_sequence()
4512 int s = session->se_fchannel.maxreqs; in nfsd4_sequence()
4522 slot = nfsd4_alloc_slot(&session->se_fchannel, s, in nfsd4_sequence()
4524 prev_slot = xa_load(&session->se_slots, s); in nfsd4_sequence()
4526 slot->sl_seqid = xa_to_value(prev_slot); in nfsd4_sequence()
4527 slot->sl_flags |= NFSD4_SLOT_REUSED; in nfsd4_sequence()
4530 !xa_is_err(xa_store(&session->se_slots, s, slot, in nfsd4_sequence()
4533 session->se_fchannel.maxreqs = s; in nfsd4_sequence()
4534 atomic_add(s - session->se_target_maxslots, in nfsd4_sequence()
4536 session->se_target_maxslots = s; in nfsd4_sequence()
4541 } while (slot && --cnt > 0); in nfsd4_sequence()
4549 spin_unlock(&nn->client_lock); in nfsd4_sequence()
4559 struct nfsd4_compound_state *cs = &resp->cstate; in nfsd4_sequence_done()
4562 if (cs->status != nfserr_replay_cache) { in nfsd4_sequence_done()
4564 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; in nfsd4_sequence_done()
4567 nfsd4_put_session(cs->session); in nfsd4_sequence_done()
4568 } else if (cs->clp) in nfsd4_sequence_done()
4569 put_client_renew(cs->clp); in nfsd4_sequence_done()
4577 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid; in nfsd4_destroy_clientid()
4583 spin_lock(&nn->client_lock); in nfsd4_destroy_clientid()
4584 unconf = find_unconfirmed_client(&dc->clientid, true, nn); in nfsd4_destroy_clientid()
4585 conf = find_confirmed_client(&dc->clientid, true, nn); in nfsd4_destroy_clientid()
4608 trace_nfsd_clid_destroyed(&clp->cl_clientid); in nfsd4_destroy_clientid()
4611 spin_unlock(&nn->client_lock); in nfsd4_destroy_clientid()
4621 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete; in nfsd4_reclaim_complete()
4622 struct nfs4_client *clp = cstate->clp; in nfsd4_reclaim_complete()
4625 if (rc->rca_one_fs) { in nfsd4_reclaim_complete()
4626 if (!cstate->current_fh.fh_dentry) in nfsd4_reclaim_complete()
4636 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) in nfsd4_reclaim_complete()
4651 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid); in nfsd4_reclaim_complete()
4662 struct nfsd4_setclientid *setclid = &u->setclientid; in nfsd4_setclientid()
4663 struct xdr_netobj clname = setclid->se_name; in nfsd4_setclientid()
4664 nfs4_verifier clverifier = setclid->se_verf; in nfsd4_setclientid()
4673 spin_lock(&nn->client_lock); in nfsd4_setclientid()
4679 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { in nfsd4_setclientid()
4688 if (same_verf(&conf->cl_verifier, &clverifier)) { in nfsd4_setclientid()
4696 new->cl_minorversion = 0; in nfsd4_setclientid()
4699 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; in nfsd4_setclientid()
4700 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; in nfsd4_setclientid()
4701 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); in nfsd4_setclientid()
4705 spin_unlock(&nn->client_lock); in nfsd4_setclientid()
4709 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); in nfsd4_setclientid()
4721 &u->setclientid_confirm; in nfsd4_setclientid_confirm()
4724 nfs4_verifier confirm = setclientid_confirm->sc_confirm; in nfsd4_setclientid_confirm()
4725 clientid_t * clid = &setclientid_confirm->sc_clientid; in nfsd4_setclientid_confirm()
4732 spin_lock(&nn->client_lock); in nfsd4_setclientid_confirm()
4743 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) { in nfsd4_setclientid_confirm()
4747 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) { in nfsd4_setclientid_confirm()
4751 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { in nfsd4_setclientid_confirm()
4752 if (conf && same_verf(&confirm, &conf->cl_confirm)) { in nfsd4_setclientid_confirm()
4763 nfsd4_change_callback(conf, &unconf->cl_cb_conn); in nfsd4_setclientid_confirm()
4770 old = find_confirmed_client_by_name(&unconf->cl_name, nn); in nfsd4_setclientid_confirm()
4774 && !same_creds(&unconf->cl_cred, in nfsd4_setclientid_confirm()
4775 &old->cl_cred)) { in nfsd4_setclientid_confirm()
4784 trace_nfsd_clid_replaced(&old->cl_clientid); in nfsd4_setclientid_confirm()
4794 spin_unlock(&nn->client_lock); in nfsd4_setclientid_confirm()
4796 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); in nfsd4_setclientid_confirm()
4798 spin_lock(&nn->client_lock); in nfsd4_setclientid_confirm()
4801 spin_unlock(&nn->client_lock); in nfsd4_setclientid_confirm()
4816 refcount_set(&fp->fi_ref, 1); in nfsd4_file_init()
4817 spin_lock_init(&fp->fi_lock); in nfsd4_file_init()
4818 INIT_LIST_HEAD(&fp->fi_stateids); in nfsd4_file_init()
4819 INIT_LIST_HEAD(&fp->fi_delegations); in nfsd4_file_init()
4820 INIT_LIST_HEAD(&fp->fi_clnt_odstate); in nfsd4_file_init()
4821 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle); in nfsd4_file_init()
4822 fp->fi_deleg_file = NULL; in nfsd4_file_init()
4823 fp->fi_rdeleg_file = NULL; in nfsd4_file_init()
4824 fp->fi_had_conflict = false; in nfsd4_file_init()
4825 fp->fi_share_deny = 0; in nfsd4_file_init()
4826 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); in nfsd4_file_init()
4827 memset(fp->fi_access, 0, sizeof(fp->fi_access)); in nfsd4_file_init()
4828 fp->fi_aliased = false; in nfsd4_file_init()
4829 fp->fi_inode = d_inode(fh->fh_dentry); in nfsd4_file_init()
4831 INIT_LIST_HEAD(&fp->fi_lo_states); in nfsd4_file_init()
4832 atomic_set(&fp->fi_lo_recalls, 0); in nfsd4_file_init()
4887 return -ENOMEM; in nfsd4_init_slabs()
4893 struct nfsd_net *nn = shrink->private_data; in nfsd4_state_shrinker_count()
4896 count = atomic_read(&nn->nfsd_courtesy_clients); in nfsd4_state_shrinker_count()
4900 queue_work(laundry_wq, &nn->nfsd_shrinker_work); in nfsd4_state_shrinker_count()
4916 nn->nfsd4_lease = 90; /* default lease time */ in nfsd4_init_leases_net()
4917 nn->nfsd4_grace = 90; in nfsd4_init_leases_net()
4918 nn->somebody_reclaimed = false; in nfsd4_init_leases_net()
4919 nn->track_reclaim_completes = false; in nfsd4_init_leases_net()
4920 nn->clverifier_counter = get_random_u32(); in nfsd4_init_leases_net()
4921 nn->clientid_base = get_random_u32(); in nfsd4_init_leases_net()
4922 nn->clientid_counter = nn->clientid_base + 1; in nfsd4_init_leases_net()
4923 nn->s2s_cp_cl_id = nn->clientid_counter++; in nfsd4_init_leases_net()
4925 atomic_set(&nn->nfs4_client_count, 0); in nfsd4_init_leases_net()
4929 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB); in nfsd4_init_leases_net()
4931 atomic_set(&nn->nfsd_courtesy_clients, 0); in nfsd4_init_leases_net()
4942 rp->rp_status = nfserr_serverfault; in init_nfs4_replay()
4943 rp->rp_buflen = 0; in init_nfs4_replay()
4944 rp->rp_buf = rp->rp_ibuf; in init_nfs4_replay()
4945 rp->rp_locked = RP_UNLOCKED; in init_nfs4_replay()
4952 wait_var_event(&so->so_replay.rp_locked, in nfsd4_cstate_assign_replay()
4953 cmpxchg(&so->so_replay.rp_locked, in nfsd4_cstate_assign_replay()
4955 if (so->so_replay.rp_locked == RP_UNHASHED) in nfsd4_cstate_assign_replay()
4956 return -EAGAIN; in nfsd4_cstate_assign_replay()
4957 cstate->replay_owner = nfs4_get_stateowner(so); in nfsd4_cstate_assign_replay()
4964 struct nfs4_stateowner *so = cstate->replay_owner; in nfsd4_cstate_clear_replay()
4967 cstate->replay_owner = NULL; in nfsd4_cstate_clear_replay()
4968 store_release_wake_up(&so->so_replay.rp_locked, RP_UNLOCKED); in nfsd4_cstate_clear_replay()
4981 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL); in alloc_stateowner()
4982 if (!sop->so_owner.data) { in alloc_stateowner()
4987 INIT_LIST_HEAD(&sop->so_stateids); in alloc_stateowner()
4988 sop->so_client = clp; in alloc_stateowner()
4989 init_nfs4_replay(&sop->so_replay); in alloc_stateowner()
4990 atomic_set(&sop->so_count, 1); in alloc_stateowner()
4996 lockdep_assert_held(&clp->cl_lock); in hash_openowner()
4998 list_add(&oo->oo_owner.so_strhash, in hash_openowner()
4999 &clp->cl_ownerstr_hashtbl[strhashval]); in hash_openowner()
5000 list_add(&oo->oo_perclient, &clp->cl_openowners); in hash_openowner()
5024 struct nfs4_openowner *oo = open->op_openowner; in nfsd4_find_existing_open()
5026 lockdep_assert_held(&fp->fi_lock); in nfsd4_find_existing_open()
5028 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { in nfsd4_find_existing_open()
5030 if (local->st_stateowner->so_is_open_owner == 0) in nfsd4_find_existing_open()
5032 if (local->st_stateowner != &oo->oo_owner) in nfsd4_find_existing_open()
5034 if (local->st_stid.sc_type == SC_TYPE_OPEN && in nfsd4_find_existing_open()
5035 !local->st_stid.sc_status) { in nfsd4_find_existing_open()
5037 refcount_inc(&ret->st_stid.sc_count); in nfsd4_find_existing_open()
5045 __releases(&s->sc_client->cl_lock) in nfsd4_drop_revoked_stid()
5047 struct nfs4_client *cl = s->sc_client; in nfsd4_drop_revoked_stid()
5053 switch (s->sc_type) { in nfsd4_drop_revoked_stid()
5058 spin_unlock(&cl->cl_lock); in nfsd4_drop_revoked_stid()
5064 spin_unlock(&cl->cl_lock); in nfsd4_drop_revoked_stid()
5070 list_del_init(&dp->dl_recall_lru); in nfsd4_drop_revoked_stid()
5071 spin_unlock(&cl->cl_lock); in nfsd4_drop_revoked_stid()
5075 spin_unlock(&cl->cl_lock); in nfsd4_drop_revoked_stid()
5083 * that it can forget an admin-revoked stateid. in nfsd40_drop_revoked_stid()
5090 if (cl->cl_minorversion == 0) { in nfsd40_drop_revoked_stid()
5093 spin_lock(&cl->cl_lock); in nfsd40_drop_revoked_stid()
5098 spin_unlock(&cl->cl_lock); in nfsd40_drop_revoked_stid()
5107 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) in nfsd4_verify_open_stid()
5109 else if (s->sc_status & SC_STATUS_REVOKED) in nfsd4_verify_open_stid()
5111 else if (s->sc_status & SC_STATUS_CLOSED) in nfsd4_verify_open_stid()
5122 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX); in nfsd4_lock_ol_stateid()
5123 ret = nfsd4_verify_open_stid(&stp->st_stid); in nfsd4_lock_ol_stateid()
5125 nfsd40_drop_revoked_stid(stp->st_stid.sc_client, in nfsd4_lock_ol_stateid()
5126 &stp->st_stid.sc_stateid); in nfsd4_lock_ol_stateid()
5129 mutex_unlock(&stp->st_mutex); in nfsd4_lock_ol_stateid()
5138 spin_lock(&fp->fi_lock); in nfsd4_find_and_lock_existing_open()
5140 spin_unlock(&fp->fi_lock); in nfsd4_find_and_lock_existing_open()
5143 nfs4_put_stid(&stp->st_stid); in nfsd4_find_and_lock_existing_open()
5152 struct nfs4_client *clp = cstate->clp; in find_or_alloc_open_stateowner()
5156 spin_lock(&clp->cl_lock); in find_or_alloc_open_stateowner()
5160 spin_unlock(&clp->cl_lock); in find_or_alloc_open_stateowner()
5163 spin_unlock(&clp->cl_lock); in find_or_alloc_open_stateowner()
5165 if (oo && !(oo->oo_flags & NFS4_OO_CONFIRMED)) { in find_or_alloc_open_stateowner()
5172 nfs4_free_stateowner(&new->oo_owner); in find_or_alloc_open_stateowner()
5176 new = alloc_stateowner(openowner_slab, &open->op_owner, clp); in find_or_alloc_open_stateowner()
5179 new->oo_owner.so_ops = &openowner_ops; in find_or_alloc_open_stateowner()
5180 new->oo_owner.so_is_open_owner = 1; in find_or_alloc_open_stateowner()
5181 new->oo_owner.so_seqid = open->op_seqid; in find_or_alloc_open_stateowner()
5182 new->oo_flags = 0; in find_or_alloc_open_stateowner()
5184 new->oo_flags |= NFS4_OO_CONFIRMED; in find_or_alloc_open_stateowner()
5185 new->oo_time = 0; in find_or_alloc_open_stateowner()
5186 new->oo_last_closed_stid = NULL; in find_or_alloc_open_stateowner()
5187 INIT_LIST_HEAD(&new->oo_close_lru); in find_or_alloc_open_stateowner()
5195 struct nfs4_openowner *oo = open->op_openowner; in init_open_stateid()
5199 stp = open->op_stp; in init_open_stateid()
5201 mutex_init(&stp->st_mutex); in init_open_stateid()
5202 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); in init_open_stateid()
5205 spin_lock(&oo->oo_owner.so_client->cl_lock); in init_open_stateid()
5206 spin_lock(&fp->fi_lock); in init_open_stateid()
5209 mutex_unlock(&stp->st_mutex); in init_open_stateid()
5218 open->op_stp = NULL; in init_open_stateid()
5219 refcount_inc(&stp->st_stid.sc_count); in init_open_stateid()
5220 stp->st_stid.sc_type = SC_TYPE_OPEN; in init_open_stateid()
5221 INIT_LIST_HEAD(&stp->st_locks); in init_open_stateid()
5222 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); in init_open_stateid()
5224 stp->st_stid.sc_file = fp; in init_open_stateid()
5225 stp->st_access_bmap = 0; in init_open_stateid()
5226 stp->st_deny_bmap = 0; in init_open_stateid()
5227 stp->st_openstp = NULL; in init_open_stateid()
5228 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); in init_open_stateid()
5229 list_add(&stp->st_perfile, &fp->fi_stateids); in init_open_stateid()
5232 spin_unlock(&fp->fi_lock); in init_open_stateid()
5233 spin_unlock(&oo->oo_owner.so_client->cl_lock); in init_open_stateid()
5237 nfs4_put_stid(&retstp->st_stid); in init_open_stateid()
5241 mutex_unlock(&stp->st_mutex); in init_open_stateid()
5256 struct nfs4_openowner *oo = openowner(s->st_stateowner); in move_to_close_lru()
5257 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, in move_to_close_lru()
5273 store_release_wake_up(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED); in move_to_close_lru()
5274 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2); in move_to_close_lru()
5277 if (s->st_stid.sc_file) { in move_to_close_lru()
5278 put_nfs4_file(s->st_stid.sc_file); in move_to_close_lru()
5279 s->st_stid.sc_file = NULL; in move_to_close_lru()
5282 spin_lock(&nn->client_lock); in move_to_close_lru()
5283 last = oo->oo_last_closed_stid; in move_to_close_lru()
5284 oo->oo_last_closed_stid = s; in move_to_close_lru()
5285 list_move_tail(&oo->oo_close_lru, &nn->close_lru); in move_to_close_lru()
5286 oo->oo_time = ktime_get_boottime_seconds(); in move_to_close_lru()
5287 spin_unlock(&nn->client_lock); in move_to_close_lru()
5289 nfs4_put_stid(&last->st_stid); in move_to_close_lru()
5295 struct inode *inode = d_inode(fhp->fh_dentry); in nfsd4_file_hash_lookup()
5303 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { in nfsd4_file_hash_lookup()
5304 if (refcount_inc_not_zero(&fi->fi_ref)) { in nfsd4_file_hash_lookup()
5316 * distinct filehandles. They will all be on the list returned
5319 * inode->i_lock prevents racing insertions from adding an entry
5325 struct inode *inode = d_inode(fhp->fh_dentry); in nfsd4_file_hash_insert()
5333 spin_lock(&inode->i_lock); in nfsd4_file_hash_insert()
5338 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { in nfsd4_file_hash_insert()
5339 if (refcount_inc_not_zero(&fi->fi_ref)) in nfsd4_file_hash_insert()
5342 fi->fi_aliased = alias_found = true; in nfsd4_file_hash_insert()
5348 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist, in nfsd4_file_hash_insert()
5353 new->fi_aliased = alias_found; in nfsd4_file_hash_insert()
5357 spin_unlock(&inode->i_lock); in nfsd4_file_hash_insert()
5364 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist, in nfsd4_file_hash_remove()
5369 * Called to check deny when READ with all zero stateid or
5370 * WRITE with all zero or all one stateid
5383 spin_lock(&fp->fi_lock); in nfs4_share_conflict()
5384 if (fp->fi_share_deny & deny_type) in nfs4_share_conflict()
5386 spin_unlock(&fp->fi_lock); in nfs4_share_conflict()
5395 return ctx && !list_empty_careful(&ctx->flc_lease); in nfsd4_deleg_present()
5399 * nfsd_wait_for_delegreturn - wait for delegations to be returned
5401 * @inode: in-core inode of the file being waited for
5403 * The timeout prevents deadlock if all nfsd threads happen to be
5423 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, in nfsd4_cb_recall_prepare()
5426 block_delegations(&dp->dl_stid.sc_file->fi_fhandle); in nfsd4_cb_recall_prepare()
5430 * already holding inode->i_lock. in nfsd4_cb_recall_prepare()
5436 if (delegation_hashed(dp) && dp->dl_time == 0) { in nfsd4_cb_recall_prepare()
5437 dp->dl_time = ktime_get_boottime_seconds(); in nfsd4_cb_recall_prepare()
5438 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); in nfsd4_cb_recall_prepare()
5448 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task); in nfsd4_cb_recall_done()
5450 if (dp->dl_stid.sc_status) in nfsd4_cb_recall_done()
5454 switch (task->tk_status) { in nfsd4_cb_recall_done()
5457 case -NFS4ERR_DELAY: in nfsd4_cb_recall_done()
5460 case -EBADHANDLE: in nfsd4_cb_recall_done()
5461 case -NFS4ERR_BAD_STATEID: in nfsd4_cb_recall_done()
5466 if (dp->dl_retries--) { in nfsd4_cb_recall_done()
5480 nfs4_put_stid(&dp->dl_stid); in nfsd4_cb_recall_release()
5494 if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &dp->dl_recall.cb_flags)) in nfsd_break_one_deleg()
5504 refcount_inc(&dp->dl_stid.sc_count); in nfsd_break_one_deleg()
5505 queued = nfsd4_run_cb(&dp->dl_recall); in nfsd_break_one_deleg()
5508 refcount_dec(&dp->dl_stid.sc_count); in nfsd_break_one_deleg()
5515 struct nfs4_delegation *dp = (struct nfs4_delegation *) fl->c.flc_owner; in nfsd_break_deleg_cb()
5516 struct nfs4_file *fp = dp->dl_stid.sc_file; in nfsd_break_deleg_cb()
5517 struct nfs4_client *clp = dp->dl_stid.sc_client; in nfsd_break_deleg_cb()
5520 trace_nfsd_cb_recall(&dp->dl_stid); in nfsd_break_deleg_cb()
5522 dp->dl_recalled = true; in nfsd_break_deleg_cb()
5523 atomic_inc(&clp->cl_delegs_in_recall); in nfsd_break_deleg_cb()
5525 nn = net_generic(clp->net, nfsd_net_id); in nfsd_break_deleg_cb()
5526 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in nfsd_break_deleg_cb()
5534 fl->fl_break_time = 0; in nfsd_break_deleg_cb()
5536 fp->fi_had_conflict = true; in nfsd_break_deleg_cb()
5542 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
5551 struct nfs4_delegation *dl = fl->c.flc_owner; in nfsd_breaker_owns_lease()
5558 clp = *(rqst->rq_lease_breaker); in nfsd_breaker_owns_lease()
5559 return dl->dl_stid.sc_client == clp; in nfsd_breaker_owns_lease()
5566 struct nfs4_delegation *dp = (struct nfs4_delegation *) onlist->c.flc_owner; in nfsd_change_deleg_cb()
5567 struct nfs4_client *clp = dp->dl_stid.sc_client; in nfsd_change_deleg_cb()
5570 if (dp->dl_recalled) in nfsd_change_deleg_cb()
5571 atomic_dec(&clp->cl_delegs_in_recall); in nfsd_change_deleg_cb()
5574 return -EAGAIN; in nfsd_change_deleg_cb()
5587 if (seqid == so->so_seqid - 1) in nfsd4_check_seqid()
5589 if (seqid == so->so_seqid) in nfsd4_check_seqid()
5599 spin_lock(&nn->client_lock); in lookup_clientid()
5602 atomic_inc(&found->cl_rpc_users); in lookup_clientid()
5603 spin_unlock(&nn->client_lock); in lookup_clientid()
5611 if (cstate->clp) { in set_client()
5612 if (!same_clid(&cstate->clp->cl_clientid, clid)) in set_client()
5620 * set cstate->clp), so session = false: in set_client()
5622 cstate->clp = lookup_clientid(clid, false, nn); in set_client()
5623 if (!cstate->clp) in set_client()
5632 clientid_t *clientid = &open->op_clientid; in nfsd4_process_open1()
5642 open->op_file = nfsd4_alloc_file(); in nfsd4_process_open1()
5643 if (open->op_file == NULL) in nfsd4_process_open1()
5649 clp = cstate->clp; in nfsd4_process_open1()
5651 strhashval = ownerstr_hashval(&open->op_owner); in nfsd4_process_open1()
5654 open->op_openowner = oo; in nfsd4_process_open1()
5657 if (nfsd4_cstate_assign_replay(cstate, &oo->oo_owner) == -EAGAIN) { in nfsd4_process_open1()
5658 nfs4_put_stateowner(&oo->oo_owner); in nfsd4_process_open1()
5661 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); in nfsd4_process_open1()
5665 open->op_stp = nfs4_alloc_open_stateid(clp); in nfsd4_process_open1()
5666 if (!open->op_stp) in nfsd4_process_open1()
5670 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { in nfsd4_process_open1()
5671 open->op_odstate = alloc_clnt_odstate(clp); in nfsd4_process_open1()
5672 if (!open->op_odstate) in nfsd4_process_open1()
5682 if (!(flags & RD_STATE) && deleg_is_read(dp->dl_type)) in nfs4_check_delegmode()
5706 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || in nfsd4_is_deleg_cur()
5707 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; in nfsd4_is_deleg_cur()
5718 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); in nfs4_check_deleg()
5721 if (deleg->dl_stid.sc_status & SC_STATUS_ADMIN_REVOKED) { in nfs4_check_deleg()
5722 nfs4_put_stid(&deleg->dl_stid); in nfs4_check_deleg()
5726 if (deleg->dl_stid.sc_status & SC_STATUS_REVOKED) { in nfs4_check_deleg()
5727 nfs4_put_stid(&deleg->dl_stid); in nfs4_check_deleg()
5728 nfsd40_drop_revoked_stid(cl, &open->op_delegate_stateid); in nfs4_check_deleg()
5732 flags = share_access_to_flags(open->op_share_access); in nfs4_check_deleg()
5735 nfs4_put_stid(&deleg->dl_stid); in nfs4_check_deleg()
5744 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; in nfs4_check_deleg()
5770 if (!open->op_truncate) in nfsd4_truncate()
5772 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) in nfsd4_truncate()
5783 int oflag = nfs4_access_to_omode(open->op_share_access); in nfs4_get_vfs_file()
5784 int access = nfs4_access_to_access(open->op_share_access); in nfs4_get_vfs_file()
5787 spin_lock(&fp->fi_lock); in nfs4_get_vfs_file()
5793 status = nfs4_file_check_deny(fp, open->op_share_deny); in nfs4_get_vfs_file()
5796 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5800 stp, open->op_share_deny, false)) in nfs4_get_vfs_file()
5802 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5807 status = nfs4_file_get_access(fp, open->op_share_access); in nfs4_get_vfs_file()
5810 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5814 stp, open->op_share_access, true)) in nfs4_get_vfs_file()
5816 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5821 old_access_bmap = stp->st_access_bmap; in nfs4_get_vfs_file()
5822 set_access(open->op_share_access, stp); in nfs4_get_vfs_file()
5825 old_deny_bmap = stp->st_deny_bmap; in nfs4_get_vfs_file()
5826 set_deny(open->op_share_deny, stp); in nfs4_get_vfs_file()
5827 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); in nfs4_get_vfs_file()
5829 if (!fp->fi_fds[oflag]) { in nfs4_get_vfs_file()
5830 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5833 open->op_filp, &nf); in nfs4_get_vfs_file()
5837 spin_lock(&fp->fi_lock); in nfs4_get_vfs_file()
5838 if (!fp->fi_fds[oflag]) { in nfs4_get_vfs_file()
5839 fp->fi_fds[oflag] = nf; in nfs4_get_vfs_file()
5843 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5847 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode, in nfs4_get_vfs_file()
5858 stp->st_access_bmap = old_access_bmap; in nfs4_get_vfs_file()
5859 nfs4_file_put_access(fp, open->op_share_access); in nfs4_get_vfs_file()
5870 unsigned char old_deny_bmap = stp->st_deny_bmap; in nfs4_upgrade_open()
5872 if (!test_access(open->op_share_access, stp)) in nfs4_upgrade_open()
5876 spin_lock(&fp->fi_lock); in nfs4_upgrade_open()
5877 status = nfs4_file_check_deny(fp, open->op_share_deny); in nfs4_upgrade_open()
5880 set_deny(open->op_share_deny, stp); in nfs4_upgrade_open()
5881 fp->fi_share_deny |= in nfs4_upgrade_open()
5882 (open->op_share_deny & NFS4_SHARE_DENY_BOTH); in nfs4_upgrade_open()
5886 stp, open->op_share_deny, false)) in nfs4_upgrade_open()
5890 spin_unlock(&fp->fi_lock); in nfs4_upgrade_open()
5904 if (clp->cl_cb_state == NFSD4_CB_UP) in nfsd4_cb_channel_good()
5911 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; in nfsd4_cb_channel_good()
5921 fl->fl_lmops = &nfsd_lease_mng_ops; in nfs4_alloc_init_lease()
5922 fl->c.flc_flags = FL_DELEG; in nfs4_alloc_init_lease()
5923 fl->c.flc_type = deleg_is_read(dp->dl_type) ? F_RDLCK : F_WRLCK; in nfs4_alloc_init_lease()
5924 fl->c.flc_owner = (fl_owner_t)dp; in nfs4_alloc_init_lease()
5925 fl->c.flc_pid = current->tgid; in nfs4_alloc_init_lease()
5926 fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; in nfs4_alloc_init_lease()
5934 struct file *f = fp->fi_deleg_file->nf_file; in nfsd4_check_conflicting_opens()
5938 writes = atomic_read(&ino->i_writecount); in nfsd4_check_conflicting_opens()
5945 * trying to go look up all the clients using that other in nfsd4_check_conflicting_opens()
5948 if (fp->fi_aliased) in nfsd4_check_conflicting_opens()
5949 return -EAGAIN; in nfsd4_check_conflicting_opens()
5957 if (fp->fi_fds[O_WRONLY]) in nfsd4_check_conflicting_opens()
5958 writes--; in nfsd4_check_conflicting_opens()
5959 if (fp->fi_fds[O_RDWR]) in nfsd4_check_conflicting_opens()
5960 writes--; in nfsd4_check_conflicting_opens()
5962 return -EAGAIN; /* There may be non-NFSv4 writers */ in nfsd4_check_conflicting_opens()
5964 * It's possible there are non-NFSv4 write opens in progress, in nfsd4_check_conflicting_opens()
5967 * lease soon enough. So, all that's left to check for is NFSv4 in nfsd4_check_conflicting_opens()
5970 spin_lock(&fp->fi_lock); in nfsd4_check_conflicting_opens()
5971 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { in nfsd4_check_conflicting_opens()
5972 if (st->st_openstp == NULL /* it's an open */ && in nfsd4_check_conflicting_opens()
5974 st->st_stid.sc_client != clp) { in nfsd4_check_conflicting_opens()
5975 spin_unlock(&fp->fi_lock); in nfsd4_check_conflicting_opens()
5976 return -EAGAIN; in nfsd4_check_conflicting_opens()
5979 spin_unlock(&fp->fi_lock); in nfsd4_check_conflicting_opens()
6002 err = nfsd_lookup_dentry(open->op_rqstp, parent, in nfsd4_verify_deleg_dentry()
6003 open->op_fname, open->op_fnamelen, in nfsd4_verify_deleg_dentry()
6007 return -EAGAIN; in nfsd4_verify_deleg_dentry()
6011 if (child != file_dentry(fp->fi_deleg_file->nf_file)) in nfsd4_verify_deleg_dentry()
6012 return -EAGAIN; in nfsd4_verify_deleg_dentry()
6026 struct inode *inode = file_inode(nf->nf_file); in nfsd4_verify_setuid_write()
6028 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) && in nfsd4_verify_setuid_write()
6029 (inode->i_mode & (S_ISUID|S_ISGID))) in nfsd4_verify_setuid_write()
6030 return -EAGAIN; in nfsd4_verify_setuid_write()
6037 return open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS; in nfsd4_want_deleg_timestamps()
6051 struct nfs4_client *clp = stp->st_stid.sc_client; in nfs4_set_delegation()
6052 struct nfs4_file *fp = stp->st_stid.sc_file; in nfs4_set_delegation()
6053 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate; in nfs4_set_delegation()
6065 if (fp->fi_had_conflict) in nfs4_set_delegation()
6066 return ERR_PTR(-EAGAIN); in nfs4_set_delegation()
6072 * on its own, all opens." in nfs4_set_delegation()
6085 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { in nfs4_set_delegation()
6094 if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) { in nfs4_set_delegation()
6100 return ERR_PTR(-EAGAIN); in nfs4_set_delegation()
6106 if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) { in nfs4_set_delegation()
6108 return ERR_PTR(-EOPNOTSUPP); in nfs4_set_delegation()
6112 spin_lock(&fp->fi_lock); in nfs4_set_delegation()
6114 status = -EAGAIN; in nfs4_set_delegation()
6116 status = -EAGAIN; in nfs4_set_delegation()
6117 else if (!fp->fi_deleg_file) { in nfs4_set_delegation()
6118 fp->fi_deleg_file = nf; in nfs4_set_delegation()
6121 fp->fi_delegees = 1; in nfs4_set_delegation()
6124 fp->fi_delegees++; in nfs4_set_delegation()
6125 spin_unlock(&fp->fi_lock); in nfs4_set_delegation()
6132 status = -ENOMEM; in nfs4_set_delegation()
6141 status = kernel_setlease(fp->fi_deleg_file->nf_file, in nfs4_set_delegation()
6142 fl->c.flc_type, &fl, NULL); in nfs4_set_delegation()
6162 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file); in nfs4_set_delegation()
6166 status = -EAGAIN; in nfs4_set_delegation()
6167 if (fp->fi_had_conflict) in nfs4_set_delegation()
6171 spin_lock(&clp->cl_lock); in nfs4_set_delegation()
6172 spin_lock(&fp->fi_lock); in nfs4_set_delegation()
6174 spin_unlock(&fp->fi_lock); in nfs4_set_delegation()
6175 spin_unlock(&clp->cl_lock); in nfs4_set_delegation()
6183 kernel_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); in nfs4_set_delegation()
6185 put_clnt_odstate(dp->dl_clnt_odstate); in nfs4_set_delegation()
6186 nfs4_put_stid(&dp->dl_stid); in nfs4_set_delegation()
6194 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; in nfsd4_open_deleg_none_ext()
6195 if (status == -EAGAIN) in nfsd4_open_deleg_none_ext()
6196 open->op_why_no_deleg = WND4_CONTENTION; in nfsd4_open_deleg_none_ext()
6198 open->op_why_no_deleg = WND4_RESOURCE; in nfsd4_open_deleg_none_ext()
6199 switch (open->op_deleg_want) { in nfsd4_open_deleg_none_ext()
6205 open->op_why_no_deleg = WND4_CANCELLED; in nfsd4_open_deleg_none_ext()
6217 struct nfsd_file *nf = find_writeable_file(dp->dl_stid.sc_file); in nfs4_delegation_stat()
6224 path.mnt = currentfh->fh_export->ex_path.mnt; in nfs4_delegation_stat()
6225 path.dentry = file_dentry(nf->nf_file); in nfs4_delegation_stat()
6249 if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == in nfsd4_add_rdaccess_to_wrdeleg()
6253 fp = stp->st_stid.sc_file; in nfsd4_add_rdaccess_to_wrdeleg()
6254 spin_lock(&fp->fi_lock); in nfsd4_add_rdaccess_to_wrdeleg()
6256 fp = stp->st_stid.sc_file; in nfsd4_add_rdaccess_to_wrdeleg()
6257 fp->fi_fds[O_RDONLY] = nf; in nfsd4_add_rdaccess_to_wrdeleg()
6258 fp->fi_rdeleg_file = nf; in nfsd4_add_rdaccess_to_wrdeleg()
6259 spin_unlock(&fp->fi_lock); in nfsd4_add_rdaccess_to_wrdeleg()
6293 struct nfs4_openowner *oo = openowner(stp->st_stateowner); in nfs4_open_delegation()
6295 struct nfs4_client *clp = stp->st_stid.sc_client; in nfs4_open_delegation()
6302 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); in nfs4_open_delegation()
6303 open->op_recall = false; in nfs4_open_delegation()
6304 switch (open->op_claim_type) { in nfs4_open_delegation()
6307 open->op_recall = true; in nfs4_open_delegation()
6316 * NLM locks have all been reclaimed: in nfs4_open_delegation()
6318 if (locks_in_grace(clp->net)) in nfs4_open_delegation()
6320 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) in nfs4_open_delegation()
6322 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE && in nfs4_open_delegation()
6323 !clp->cl_minorversion) in nfs4_open_delegation()
6333 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); in nfs4_open_delegation()
6335 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { in nfs4_open_delegation()
6336 struct file *f = dp->dl_stid.sc_file->fi_deleg_file->nf_file; in nfs4_open_delegation()
6340 nfs4_put_stid(&dp->dl_stid); in nfs4_open_delegation()
6344 open->op_delegate_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG : in nfs4_open_delegation()
6346 dp->dl_cb_fattr.ncf_cur_fsize = stat.size; in nfs4_open_delegation()
6347 dp->dl_cb_fattr.ncf_initial_cinfo = nfsd4_change_attribute(&stat); in nfs4_open_delegation()
6348 dp->dl_atime = stat.atime; in nfs4_open_delegation()
6349 dp->dl_ctime = stat.ctime; in nfs4_open_delegation()
6350 dp->dl_mtime = stat.mtime; in nfs4_open_delegation()
6351 spin_lock(&f->f_lock); in nfs4_open_delegation()
6352 f->f_mode |= FMODE_NOCMTIME; in nfs4_open_delegation()
6353 spin_unlock(&f->f_lock); in nfs4_open_delegation()
6354 trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid); in nfs4_open_delegation()
6356 open->op_delegate_type = deleg_ts && nfs4_delegation_stat(dp, currentfh, &stat) ? in nfs4_open_delegation()
6358 dp->dl_atime = stat.atime; in nfs4_open_delegation()
6359 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); in nfs4_open_delegation()
6361 nfs4_put_stid(&dp->dl_stid); in nfs4_open_delegation()
6364 open->op_delegate_type = OPEN_DELEGATE_NONE; in nfs4_open_delegation()
6365 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && in nfs4_open_delegation()
6366 open->op_delegate_type != OPEN_DELEGATE_NONE) { in nfs4_open_delegation()
6368 open->op_recall = true; in nfs4_open_delegation()
6372 if (open->op_deleg_want) in nfs4_open_delegation()
6380 if (deleg_is_write(dp->dl_type)) { in nfsd4_deleg_xgrade_none_ext()
6381 if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_READ_DELEG) { in nfsd4_deleg_xgrade_none_ext()
6382 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; in nfsd4_deleg_xgrade_none_ext()
6383 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; in nfsd4_deleg_xgrade_none_ext()
6384 } else if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG) { in nfsd4_deleg_xgrade_none_ext()
6385 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; in nfsd4_deleg_xgrade_none_ext()
6386 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; in nfsd4_deleg_xgrade_none_ext()
6398 if (!(open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION)) in open_xor_delegation()
6401 if (!deleg_is_read(open->op_delegate_type) && !deleg_is_write(open->op_delegate_type)) in open_xor_delegation()
6407 * nfsd4_process_open2 - finish open processing
6412 * If successful, (1) truncate the file if open->op_truncate was
6413 * set, (2) set open->op_stateid, (3) set open->op_delegation.
6421 struct nfsd4_compoundres *resp = rqstp->rq_resp; in nfsd4_process_open2()
6422 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; in nfsd4_process_open2()
6434 fp = nfsd4_file_hash_insert(open->op_file, current_fh); in nfsd4_process_open2()
6437 if (fp != open->op_file) { in nfsd4_process_open2()
6442 (dp->dl_stid.sc_file != fp)) { in nfsd4_process_open2()
6457 open->op_file = NULL; in nfsd4_process_open2()
6470 if (!open->op_stp) in nfsd4_process_open2()
6484 mutex_unlock(&stp->st_mutex); in nfsd4_process_open2()
6491 mutex_unlock(&stp->st_mutex); in nfsd4_process_open2()
6495 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, in nfsd4_process_open2()
6496 open->op_odstate); in nfsd4_process_open2()
6497 if (stp->st_clnt_odstate == open->op_odstate) in nfsd4_process_open2()
6498 open->op_odstate = NULL; in nfsd4_process_open2()
6501 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); in nfsd4_process_open2()
6502 mutex_unlock(&stp->st_mutex); in nfsd4_process_open2()
6504 if (nfsd4_has_session(&resp->cstate)) { in nfsd4_process_open2()
6505 if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_NO_DELEG) { in nfsd4_process_open2()
6506 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; in nfsd4_process_open2()
6507 open->op_why_no_deleg = WND4_NOT_WANTED; in nfsd4_process_open2()
6517 &resp->cstate.current_fh, current_fh); in nfsd4_process_open2()
6525 memcpy(&open->op_stateid, &zero_stateid, sizeof(open->op_stateid)); in nfsd4_process_open2()
6526 open->op_rflags |= OPEN4_RESULT_NO_OPEN_STATEID; in nfsd4_process_open2()
6531 trace_nfsd_open(&stp->st_stid.sc_stateid); in nfsd4_process_open2()
6534 if (open->op_delegate_type == OPEN_DELEGATE_NONE && dp && in nfsd4_process_open2()
6535 open->op_deleg_want) in nfsd4_process_open2()
6540 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) in nfsd4_process_open2()
6541 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; in nfsd4_process_open2()
6545 open->op_rflags |= NFS4_OPEN_RESULT_LOCKTYPE_POSIX; in nfsd4_process_open2()
6546 if (nfsd4_has_session(&resp->cstate)) in nfsd4_process_open2()
6547 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; in nfsd4_process_open2()
6548 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) in nfsd4_process_open2()
6549 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; in nfsd4_process_open2()
6552 nfs4_put_stid(&dp->dl_stid); in nfsd4_process_open2()
6554 nfs4_put_stid(&stp->st_stid); in nfsd4_process_open2()
6562 if (open->op_openowner) in nfsd4_cleanup_open_state()
6563 nfs4_put_stateowner(&open->op_openowner->oo_owner); in nfsd4_cleanup_open_state()
6564 if (open->op_file) in nfsd4_cleanup_open_state()
6565 kmem_cache_free(file_slab, open->op_file); in nfsd4_cleanup_open_state()
6566 if (open->op_stp) in nfsd4_cleanup_open_state()
6567 nfs4_put_stid(&open->op_stp->st_stid); in nfsd4_cleanup_open_state()
6568 if (open->op_odstate) in nfsd4_cleanup_open_state()
6569 kmem_cache_free(odstate_slab, open->op_odstate); in nfsd4_cleanup_open_state()
6576 clientid_t *clid = &u->renew; in nfsd4_renew()
6585 clp = cstate->clp; in nfsd4_renew()
6586 if (!list_empty(&clp->cl_delegations) in nfsd4_renew()
6587 && clp->cl_cb_state != NFSD4_CB_UP) in nfsd4_renew()
6596 if (nn->grace_ended) in nfsd4_end_grace()
6600 nn->grace_ended = true; in nfsd4_end_grace()
6617 locks_end_grace(&nn->nfsd4_manager); in nfsd4_end_grace()
6631 time64_t double_grace_period_end = nn->boot_time + in clients_still_reclaiming()
6632 2 * nn->nfsd4_lease; in clients_still_reclaiming()
6634 if (nn->track_reclaim_completes && in clients_still_reclaiming()
6635 atomic_read(&nn->nr_reclaim_complete) == in clients_still_reclaiming()
6636 nn->reclaim_str_hashtbl_size) in clients_still_reclaiming()
6638 if (!nn->somebody_reclaimed) in clients_still_reclaiming()
6640 nn->somebody_reclaimed = false; in clients_still_reclaiming()
6659 if (last_refresh < lt->cutoff) in state_expired()
6661 time_remaining = last_refresh - lt->cutoff; in state_expired()
6662 lt->new_timeo = min(lt->new_timeo, time_remaining); in state_expired()
6669 spin_lock_init(&nn->nfsd_ssc_lock); in nfsd4_ssc_init_umount_work()
6670 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list); in nfsd4_ssc_init_umount_work()
6671 init_waitqueue_head(&nn->nfsd_ssc_waitq); in nfsd4_ssc_init_umount_work()
6675 * This is called when nfsd is being shutdown, after all inter_ssc
6683 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
6684 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { in nfsd4_ssc_shutdown_umount()
6685 list_del(&ni->nsui_list); in nfsd4_ssc_shutdown_umount()
6686 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
6687 mntput(ni->nsui_vfsmount); in nfsd4_ssc_shutdown_umount()
6689 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
6691 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
6700 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6701 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { in nfsd4_ssc_expire_umount()
6702 if (time_after(jiffies, ni->nsui_expire)) { in nfsd4_ssc_expire_umount()
6703 if (refcount_read(&ni->nsui_refcnt) > 1) in nfsd4_ssc_expire_umount()
6707 ni->nsui_busy = true; in nfsd4_ssc_expire_umount()
6708 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6709 mntput(ni->nsui_vfsmount); in nfsd4_ssc_expire_umount()
6710 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6713 list_del(&ni->nsui_list); in nfsd4_ssc_expire_umount()
6723 wake_up_all(&nn->nfsd_ssc_waitq); in nfsd4_ssc_expire_umount()
6724 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6736 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { in nfs4_lockowner_has_blockers()
6737 nf = stp->st_stid.sc_file; in nfs4_lockowner_has_blockers()
6738 ctx = locks_inode_context(nf->fi_inode); in nfs4_lockowner_has_blockers()
6754 if (atomic_read(&clp->cl_delegs_in_recall)) in nfs4_anylock_blockers()
6756 spin_lock(&clp->cl_lock); in nfs4_anylock_blockers()
6758 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i], in nfs4_anylock_blockers()
6760 if (so->so_is_open_owner) in nfs4_anylock_blockers()
6764 spin_unlock(&clp->cl_lock); in nfs4_anylock_blockers()
6769 spin_unlock(&clp->cl_lock); in nfs4_anylock_blockers()
6781 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ? in nfs4_get_client_reaplist()
6784 spin_lock(&nn->client_lock); in nfs4_get_client_reaplist()
6785 list_for_each_safe(pos, next, &nn->client_lru) { in nfs4_get_client_reaplist()
6787 if (clp->cl_state == NFSD4_EXPIRABLE) in nfs4_get_client_reaplist()
6789 if (!state_expired(lt, clp->cl_time)) in nfs4_get_client_reaplist()
6791 if (!atomic_read(&clp->cl_rpc_users)) { in nfs4_get_client_reaplist()
6792 if (clp->cl_state == NFSD4_ACTIVE) in nfs4_get_client_reaplist()
6793 atomic_inc(&nn->nfsd_courtesy_clients); in nfs4_get_client_reaplist()
6794 clp->cl_state = NFSD4_COURTESY; in nfs4_get_client_reaplist()
6803 list_add(&clp->cl_lru, reaplist); in nfs4_get_client_reaplist()
6807 spin_unlock(&nn->client_lock); in nfs4_get_client_reaplist()
6821 spin_lock(&nn->client_lock); in nfs4_get_courtesy_client_reaplist()
6822 list_for_each_safe(pos, next, &nn->client_lru) { in nfs4_get_courtesy_client_reaplist()
6824 if (clp->cl_state == NFSD4_ACTIVE) in nfs4_get_courtesy_client_reaplist()
6829 list_add(&clp->cl_lru, reaplist); in nfs4_get_courtesy_client_reaplist()
6833 spin_unlock(&nn->client_lock); in nfs4_get_courtesy_client_reaplist()
6844 trace_nfsd_clid_purged(&clp->cl_clientid); in nfs4_process_client_reaplist()
6845 list_del_init(&clp->cl_lru); in nfs4_process_client_reaplist()
6855 spin_lock(&nn->client_lock); in nfs40_clean_admin_revoked()
6856 if (nn->nfs40_last_revoke == 0 || in nfs40_clean_admin_revoked()
6857 nn->nfs40_last_revoke > lt->cutoff) { in nfs40_clean_admin_revoked()
6858 spin_unlock(&nn->client_lock); in nfs40_clean_admin_revoked()
6861 nn->nfs40_last_revoke = 0; in nfs40_clean_admin_revoked()
6864 list_for_each_entry(clp, &nn->client_lru, cl_lru) { in nfs40_clean_admin_revoked()
6868 if (atomic_read(&clp->cl_admin_revoked) == 0) in nfs40_clean_admin_revoked()
6871 spin_lock(&clp->cl_lock); in nfs40_clean_admin_revoked()
6872 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id) in nfs40_clean_admin_revoked()
6873 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) { in nfs40_clean_admin_revoked()
6874 refcount_inc(&stid->sc_count); in nfs40_clean_admin_revoked()
6875 spin_unlock(&nn->client_lock); in nfs40_clean_admin_revoked()
6876 /* this function drops ->cl_lock */ in nfs40_clean_admin_revoked()
6879 spin_lock(&nn->client_lock); in nfs40_clean_admin_revoked()
6882 spin_unlock(&clp->cl_lock); in nfs40_clean_admin_revoked()
6884 spin_unlock(&nn->client_lock); in nfs40_clean_admin_revoked()
6896 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease, in nfs4_laundromat()
6897 .new_timeo = nn->nfsd4_lease in nfs4_laundromat()
6909 spin_lock(&nn->s2s_cp_lock); in nfs4_laundromat()
6910 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { in nfs4_laundromat()
6912 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID && in nfs4_laundromat()
6913 state_expired(<, cps->cpntf_time)) in nfs4_laundromat()
6916 spin_unlock(&nn->s2s_cp_lock); in nfs4_laundromat()
6924 list_for_each_safe(pos, next, &nn->del_recall_lru) { in nfs4_laundromat()
6926 if (!state_expired(<, dp->dl_time)) in nfs4_laundromat()
6928 refcount_inc(&dp->dl_stid.sc_count); in nfs4_laundromat()
6930 list_add(&dp->dl_recall_lru, &reaplist); in nfs4_laundromat()
6936 list_del_init(&dp->dl_recall_lru); in nfs4_laundromat()
6940 spin_lock(&nn->client_lock); in nfs4_laundromat()
6941 while (!list_empty(&nn->close_lru)) { in nfs4_laundromat()
6942 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, in nfs4_laundromat()
6944 if (!state_expired(<, oo->oo_time)) in nfs4_laundromat()
6946 list_del_init(&oo->oo_close_lru); in nfs4_laundromat()
6947 stp = oo->oo_last_closed_stid; in nfs4_laundromat()
6948 oo->oo_last_closed_stid = NULL; in nfs4_laundromat()
6949 spin_unlock(&nn->client_lock); in nfs4_laundromat()
6950 nfs4_put_stid(&stp->st_stid); in nfs4_laundromat()
6951 spin_lock(&nn->client_lock); in nfs4_laundromat()
6953 spin_unlock(&nn->client_lock); in nfs4_laundromat()
6958 * So, we clean out any un-revisited request after a lease period in nfs4_laundromat()
6967 spin_lock(&nn->blocked_locks_lock); in nfs4_laundromat()
6968 while (!list_empty(&nn->blocked_locks_lru)) { in nfs4_laundromat()
6969 nbl = list_first_entry(&nn->blocked_locks_lru, in nfs4_laundromat()
6971 if (!state_expired(<, nbl->nbl_time)) in nfs4_laundromat()
6973 list_move(&nbl->nbl_lru, &reaplist); in nfs4_laundromat()
6974 list_del_init(&nbl->nbl_list); in nfs4_laundromat()
6976 spin_unlock(&nn->blocked_locks_lock); in nfs4_laundromat()
6981 list_del_init(&nbl->nbl_lru); in nfs4_laundromat()
6985 /* service the server-to-server copy delayed unmount list */ in nfs4_laundromat()
7005 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); in laundromat_main()
7023 spin_lock(&nn->client_lock); in deleg_reaper()
7024 list_for_each_safe(pos, next, &nn->client_lru) { in deleg_reaper()
7027 if (clp->cl_state != NFSD4_ACTIVE) in deleg_reaper()
7029 if (list_empty(&clp->cl_delegations)) in deleg_reaper()
7031 if (atomic_read(&clp->cl_delegs_in_recall)) in deleg_reaper()
7033 if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &clp->cl_ra->ra_cb.cb_flags)) in deleg_reaper()
7035 if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5) in deleg_reaper()
7037 if (clp->cl_cb_state != NFSD4_CB_UP) in deleg_reaper()
7041 kref_get(&clp->cl_nfsdfs.cl_ref); in deleg_reaper()
7042 clp->cl_ra_time = ktime_get_boottime_seconds(); in deleg_reaper()
7043 clp->cl_ra->ra_keep = 0; in deleg_reaper()
7044 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) | in deleg_reaper()
7046 trace_nfsd_cb_recall_any(clp->cl_ra); in deleg_reaper()
7047 nfsd4_run_cb(&clp->cl_ra->ra_cb); in deleg_reaper()
7049 spin_unlock(&nn->client_lock); in deleg_reaper()
7064 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) in nfs4_check_fh()
7075 if (stp->st_openstp) in nfs4_check_openmode()
7076 stp = stp->st_openstp; in nfs4_check_openmode()
7109 if (has_session && in->si_generation == 0) in check_stateid_generation()
7112 if (in->si_generation == ref->si_generation) in check_stateid_generation()
7120 * non-buggy client. For example, if the client sends a lock in check_stateid_generation()
7123 * situation by waiting for responses on all the IO requests, in check_stateid_generation()
7135 spin_lock(&s->sc_lock); in nfsd4_stid_check_stateid_generation()
7138 ret = check_stateid_generation(in, &s->sc_stateid, has_session); in nfsd4_stid_check_stateid_generation()
7139 spin_unlock(&s->sc_lock); in nfsd4_stid_check_stateid_generation()
7141 nfsd40_drop_revoked_stid(s->sc_client, in nfsd4_stid_check_stateid_generation()
7142 &s->sc_stateid); in nfsd4_stid_check_stateid_generation()
7148 if (ols->st_stateowner->so_is_open_owner && in nfsd4_check_openowner_confirmed()
7149 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) in nfsd4_check_openowner_confirmed()
7162 spin_lock(&cl->cl_lock); in nfsd4_validate_stateid()
7173 switch (s->sc_type) { in nfsd4_validate_stateid()
7182 printk("unknown stateid type %x\n", s->sc_type); in nfsd4_validate_stateid()
7186 spin_unlock(&cl->cl_lock); in nfsd4_validate_stateid()
7219 status = set_client(&stateid->si_opaque.so_clid, cstate, nn); in nfsd4_lookup_stateid()
7221 if (cstate->session) in nfsd4_lookup_stateid()
7227 stid = find_stateid_by_type(cstate->clp, stateid, typemask, statusmask); in nfsd4_lookup_stateid()
7230 if ((stid->sc_status & SC_STATUS_REVOKED) && !return_revoked) { in nfsd4_lookup_stateid()
7234 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) { in nfsd4_lookup_stateid()
7235 nfsd40_drop_revoked_stid(cstate->clp, stateid); in nfsd4_lookup_stateid()
7248 if (!s || s->sc_status) in nfs4_find_file()
7251 switch (s->sc_type) { in nfs4_find_file()
7256 ret = find_readable_file(s->sc_file); in nfs4_find_file()
7258 ret = find_writeable_file(s->sc_file); in nfs4_find_file()
7285 status = nfsd_permission(&rqstp->rq_cred, in nfs4_check_file()
7286 fhp->fh_export, fhp->fh_dentry, in nfs4_check_file()
7304 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID); in _free_cpntf_state_locked()
7305 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count)) in _free_cpntf_state_locked()
7307 list_del(&cps->cp_list); in _free_cpntf_state_locked()
7308 idr_remove(&nn->s2s_cp_stateids, in _free_cpntf_state_locked()
7309 cps->cp_stateid.cs_stid.si_opaque.so_id); in _free_cpntf_state_locked()
7324 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id) in manage_cpntf_state()
7326 spin_lock(&nn->s2s_cp_lock); in manage_cpntf_state()
7327 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id); in manage_cpntf_state()
7331 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) { in manage_cpntf_state()
7336 refcount_inc(&state->cp_stateid.cs_count); in manage_cpntf_state()
7341 spin_unlock(&nn->s2s_cp_lock); in manage_cpntf_state()
7360 cps->cpntf_time = ktime_get_boottime_seconds(); in find_cpntf_state()
7363 found = lookup_clientid(&cps->cp_p_clid, true, nn); in find_cpntf_state()
7367 *stid = find_stateid_by_type(found, &cps->cp_p_stateid, in find_cpntf_state()
7383 spin_lock(&nn->s2s_cp_lock); in nfs4_put_cpntf_state()
7385 spin_unlock(&nn->s2s_cp_lock); in nfs4_put_cpntf_state()
7389 * nfs4_preprocess_stateid_op - find and prep stateid for an operation
7433 switch (s->sc_type) { in nfs4_preprocess_stateid_op()
7466 struct nfsd4_test_stateid *test_stateid = &u->test_stateid; in nfsd4_test_stateid()
7468 struct nfs4_client *cl = cstate->clp; in nfsd4_test_stateid()
7470 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) in nfsd4_test_stateid()
7471 stateid->ts_id_status = in nfsd4_test_stateid()
7472 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); in nfsd4_test_stateid()
7487 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); in nfsd4_free_lock_stateid()
7492 if (check_for_locks(stp->st_stid.sc_file, in nfsd4_free_lock_stateid()
7493 lockowner(stp->st_stateowner))) in nfsd4_free_lock_stateid()
7500 mutex_unlock(&stp->st_mutex); in nfsd4_free_lock_stateid()
7510 struct nfsd4_free_stateid *free_stateid = &u->free_stateid; in nfsd4_free_stateid()
7511 stateid_t *stateid = &free_stateid->fr_stateid; in nfsd4_free_stateid()
7514 struct nfs4_client *cl = cstate->clp; in nfsd4_free_stateid()
7517 spin_lock(&cl->cl_lock); in nfsd4_free_stateid()
7519 if (!s || s->sc_status & SC_STATUS_CLOSED) in nfsd4_free_stateid()
7521 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) { in nfsd4_free_stateid()
7526 spin_lock(&s->sc_lock); in nfsd4_free_stateid()
7527 switch (s->sc_type) { in nfsd4_free_stateid()
7529 if (s->sc_status & SC_STATUS_REVOKED) { in nfsd4_free_stateid()
7530 s->sc_status |= SC_STATUS_CLOSED; in nfsd4_free_stateid()
7531 spin_unlock(&s->sc_lock); in nfsd4_free_stateid()
7533 if (s->sc_status & SC_STATUS_FREEABLE) in nfsd4_free_stateid()
7534 list_del_init(&dp->dl_recall_lru); in nfsd4_free_stateid()
7535 s->sc_status |= SC_STATUS_FREED; in nfsd4_free_stateid()
7536 spin_unlock(&cl->cl_lock); in nfsd4_free_stateid()
7544 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); in nfsd4_free_stateid()
7550 spin_unlock(&s->sc_lock); in nfsd4_free_stateid()
7551 refcount_inc(&s->sc_count); in nfsd4_free_stateid()
7552 spin_unlock(&cl->cl_lock); in nfsd4_free_stateid()
7556 spin_unlock(&s->sc_lock); in nfsd4_free_stateid()
7558 spin_unlock(&cl->cl_lock); in nfsd4_free_stateid()
7572 struct svc_fh *current_fh = &cstate->current_fh; in nfs4_seqid_op_checks()
7573 struct nfs4_stateowner *sop = stp->st_stateowner; in nfs4_seqid_op_checks()
7582 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); in nfs4_seqid_op_checks()
7584 status = nfs4_check_fh(current_fh, &stp->st_stid); in nfs4_seqid_op_checks()
7586 mutex_unlock(&stp->st_mutex); in nfs4_seqid_op_checks()
7591 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
7624 if (nfsd4_cstate_assign_replay(cstate, stp->st_stateowner) == -EAGAIN) { in nfs4_preprocess_seqid_op()
7625 nfs4_put_stateowner(stp->st_stateowner); in nfs4_preprocess_seqid_op()
7633 nfs4_put_stid(&stp->st_stid); in nfs4_preprocess_seqid_op()
7648 oo = openowner(stp->st_stateowner); in nfs4_preprocess_confirmed_seqid_op()
7649 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { in nfs4_preprocess_confirmed_seqid_op()
7650 mutex_unlock(&stp->st_mutex); in nfs4_preprocess_confirmed_seqid_op()
7651 nfs4_put_stid(&stp->st_stid); in nfs4_preprocess_confirmed_seqid_op()
7662 struct nfsd4_open_confirm *oc = &u->open_confirm; in nfsd4_open_confirm() local
7669 cstate->current_fh.fh_dentry); in nfsd4_open_confirm()
7671 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); in nfsd4_open_confirm()
7676 oc->oc_seqid, &oc->oc_req_stateid, in nfsd4_open_confirm()
7680 oo = openowner(stp->st_stateowner); in nfsd4_open_confirm()
7682 if (oo->oo_flags & NFS4_OO_CONFIRMED) { in nfsd4_open_confirm()
7683 mutex_unlock(&stp->st_mutex); in nfsd4_open_confirm()
7686 oo->oo_flags |= NFS4_OO_CONFIRMED; in nfsd4_open_confirm()
7687 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); in nfsd4_open_confirm()
7688 mutex_unlock(&stp->st_mutex); in nfsd4_open_confirm()
7689 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid); in nfsd4_open_confirm()
7690 nfsd4_client_record_create(oo->oo_owner.so_client); in nfsd4_open_confirm()
7693 nfs4_put_stid(&stp->st_stid); in nfsd4_open_confirm()
7703 nfs4_file_put_access(stp->st_stid.sc_file, access); in nfs4_stateid_downgrade_bit()
7729 struct nfsd4_open_downgrade *od = &u->open_downgrade; in nfsd4_open_downgrade()
7735 cstate->current_fh.fh_dentry); in nfsd4_open_downgrade()
7738 if (od->od_deleg_want) in nfsd4_open_downgrade()
7740 od->od_deleg_want); in nfsd4_open_downgrade()
7742 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, in nfsd4_open_downgrade()
7743 &od->od_stateid, &stp, nn); in nfsd4_open_downgrade()
7747 if (!test_access(od->od_share_access, stp)) { in nfsd4_open_downgrade()
7749 stp->st_access_bmap, od->od_share_access); in nfsd4_open_downgrade()
7752 if (!test_deny(od->od_share_deny, stp)) { in nfsd4_open_downgrade()
7754 stp->st_deny_bmap, od->od_share_deny); in nfsd4_open_downgrade()
7757 nfs4_stateid_downgrade(stp, od->od_share_access); in nfsd4_open_downgrade()
7758 reset_union_bmap_deny(od->od_share_deny, stp); in nfsd4_open_downgrade()
7759 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); in nfsd4_open_downgrade()
7762 mutex_unlock(&stp->st_mutex); in nfsd4_open_downgrade()
7763 nfs4_put_stid(&stp->st_stid); in nfsd4_open_downgrade()
7771 struct nfs4_client *clp = s->st_stid.sc_client; in nfsd4_close_open_stateid()
7776 spin_lock(&clp->cl_lock); in nfsd4_close_open_stateid()
7779 if (clp->cl_minorversion) { in nfsd4_close_open_stateid()
7782 spin_unlock(&clp->cl_lock); in nfsd4_close_open_stateid()
7784 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid); in nfsd4_close_open_stateid()
7788 spin_unlock(&clp->cl_lock); in nfsd4_close_open_stateid()
7801 struct nfsd4_close *close = &u->close; in nfsd4_close()
7809 cstate->current_fh.fh_dentry); in nfsd4_close()
7811 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, in nfsd4_close()
7812 &close->cl_stateid, in nfsd4_close()
7819 spin_lock(&stp->st_stid.sc_client->cl_lock); in nfsd4_close()
7820 stp->st_stid.sc_status |= SC_STATUS_CLOSED; in nfsd4_close()
7821 spin_unlock(&stp->st_stid.sc_client->cl_lock); in nfsd4_close()
7829 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); in nfsd4_close()
7832 mutex_unlock(&stp->st_mutex); in nfsd4_close()
7843 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid)); in nfsd4_close()
7846 nfs4_put_stid(&stp->st_stid); in nfsd4_close()
7855 struct nfsd4_delegreturn *dr = &u->delegreturn; in nfsd4_delegreturn()
7857 stateid_t *stateid = &dr->dr_stateid; in nfsd4_delegreturn()
7862 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) in nfsd4_delegreturn()
7869 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate)); in nfsd4_delegreturn()
7876 wake_up_var(d_inode(cstate->current_fh.fh_dentry)); in nfsd4_delegreturn()
7878 nfs4_put_stid(&dp->dl_stid); in nfsd4_delegreturn()
7891 return end > start ? end - 1: NFS4_MAX_UINT64; in last_byte_offset()
7895 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
7896 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
7897 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
7898 * locking, this prevents us from being completely protocol-compliant. The
7905 if (lock->fl_start < 0) in nfs4_transform_lock_offset()
7906 lock->fl_start = OFFSET_MAX; in nfs4_transform_lock_offset()
7907 if (lock->fl_end < 0) in nfs4_transform_lock_offset()
7908 lock->fl_end = OFFSET_MAX; in nfs4_transform_lock_offset()
7916 nfs4_get_stateowner(&lo->lo_owner); in nfsd4_lm_get_owner()
7926 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_lm_put_owner()
7933 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) cfl->c.flc_owner; in nfsd4_lm_lock_expirable()
7934 struct nfs4_client *clp = lo->lo_owner.so_client; in nfsd4_lm_lock_expirable()
7938 nn = net_generic(clp->net, nfsd_net_id); in nfsd4_lm_lock_expirable()
7939 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in nfsd4_lm_lock_expirable()
7955 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) fl->c.flc_owner; in nfsd4_lm_notify()
7956 struct net *net = lo->lo_owner.so_client->net; in nfsd4_lm_notify()
7963 spin_lock(&nn->blocked_locks_lock); in nfsd4_lm_notify()
7964 if (!list_empty(&nbl->nbl_list)) { in nfsd4_lm_notify()
7965 list_del_init(&nbl->nbl_list); in nfsd4_lm_notify()
7966 list_del_init(&nbl->nbl_lru); in nfsd4_lm_notify()
7969 spin_unlock(&nn->blocked_locks_lock); in nfsd4_lm_notify()
7973 nfsd4_try_run_cb(&nbl->nbl_cb); in nfsd4_lm_notify()
7991 if (fl->fl_lmops == &nfsd_posix_mng_ops) { in nfs4_set_lock_denied()
7992 lo = (struct nfs4_lockowner *) fl->c.flc_owner; in nfs4_set_lock_denied()
7993 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, in nfs4_set_lock_denied()
7995 if (!deny->ld_owner.data) in nfs4_set_lock_denied()
7998 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; in nfs4_set_lock_denied()
8001 deny->ld_owner.len = 0; in nfs4_set_lock_denied()
8002 deny->ld_owner.data = NULL; in nfs4_set_lock_denied()
8003 deny->ld_clientid.cl_boot = 0; in nfs4_set_lock_denied()
8004 deny->ld_clientid.cl_id = 0; in nfs4_set_lock_denied()
8006 deny->ld_start = fl->fl_start; in nfs4_set_lock_denied()
8007 deny->ld_length = NFS4_MAX_UINT64; in nfs4_set_lock_denied()
8008 if (fl->fl_end != NFS4_MAX_UINT64) in nfs4_set_lock_denied()
8009 deny->ld_length = fl->fl_end - fl->fl_start + 1; in nfs4_set_lock_denied()
8010 deny->ld_type = NFS4_READ_LT; in nfs4_set_lock_denied()
8011 if (fl->c.flc_type != F_RDLCK) in nfs4_set_lock_denied()
8012 deny->ld_type = NFS4_WRITE_LT; in nfs4_set_lock_denied()
8021 lockdep_assert_held(&clp->cl_lock); in find_lockowner_str_locked()
8023 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], in find_lockowner_str_locked()
8025 if (so->so_is_open_owner) in find_lockowner_str_locked()
8038 spin_lock(&clp->cl_lock); in find_lockowner_str()
8040 spin_unlock(&clp->cl_lock); in find_lockowner_str()
8063 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
8075 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); in alloc_init_lock_stateowner()
8078 INIT_LIST_HEAD(&lo->lo_blocked); in alloc_init_lock_stateowner()
8079 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); in alloc_init_lock_stateowner()
8080 lo->lo_owner.so_is_open_owner = 0; in alloc_init_lock_stateowner()
8081 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; in alloc_init_lock_stateowner()
8082 lo->lo_owner.so_ops = &lockowner_ops; in alloc_init_lock_stateowner()
8083 spin_lock(&clp->cl_lock); in alloc_init_lock_stateowner()
8084 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner); in alloc_init_lock_stateowner()
8086 list_add(&lo->lo_owner.so_strhash, in alloc_init_lock_stateowner()
8087 &clp->cl_ownerstr_hashtbl[strhashval]); in alloc_init_lock_stateowner()
8090 nfs4_free_stateowner(&lo->lo_owner); in alloc_init_lock_stateowner()
8092 spin_unlock(&clp->cl_lock); in alloc_init_lock_stateowner()
8102 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); in find_lock_stateid()
8104 /* If ost is not hashed, ost->st_locks will not be valid */ in find_lock_stateid()
8106 list_for_each_entry(lst, &ost->st_locks, st_locks) { in find_lock_stateid()
8107 if (lst->st_stateowner == &lo->lo_owner) { in find_lock_stateid()
8108 refcount_inc(&lst->st_stid.sc_count); in find_lock_stateid()
8120 struct nfs4_client *clp = lo->lo_owner.so_client; in init_lock_stateid()
8123 mutex_init(&stp->st_mutex); in init_lock_stateid()
8124 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); in init_lock_stateid()
8126 spin_lock(&clp->cl_lock); in init_lock_stateid()
8132 refcount_inc(&stp->st_stid.sc_count); in init_lock_stateid()
8133 stp->st_stid.sc_type = SC_TYPE_LOCK; in init_lock_stateid()
8134 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); in init_lock_stateid()
8136 stp->st_stid.sc_file = fp; in init_lock_stateid()
8137 stp->st_access_bmap = 0; in init_lock_stateid()
8138 stp->st_deny_bmap = open_stp->st_deny_bmap; in init_lock_stateid()
8139 stp->st_openstp = open_stp; in init_lock_stateid()
8140 spin_lock(&fp->fi_lock); in init_lock_stateid()
8141 list_add(&stp->st_locks, &open_stp->st_locks); in init_lock_stateid()
8142 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); in init_lock_stateid()
8143 list_add(&stp->st_perfile, &fp->fi_stateids); in init_lock_stateid()
8144 spin_unlock(&fp->fi_lock); in init_lock_stateid()
8145 spin_unlock(&clp->cl_lock); in init_lock_stateid()
8148 spin_unlock(&clp->cl_lock); in init_lock_stateid()
8150 nfs4_put_stid(&retstp->st_stid); in init_lock_stateid()
8154 mutex_unlock(&stp->st_mutex); in init_lock_stateid()
8157 spin_unlock(&clp->cl_lock); in init_lock_stateid()
8158 mutex_unlock(&stp->st_mutex); in init_lock_stateid()
8169 struct nfs4_openowner *oo = openowner(ost->st_stateowner); in find_or_create_lock_stateid()
8170 struct nfs4_client *clp = oo->oo_owner.so_client; in find_or_create_lock_stateid()
8173 spin_lock(&clp->cl_lock); in find_or_create_lock_stateid()
8175 spin_unlock(&clp->cl_lock); in find_or_create_lock_stateid()
8179 nfs4_put_stid(&lst->st_stid); in find_or_create_lock_stateid()
8203 struct nfs4_file *fp = lock_stp->st_stid.sc_file; in get_lock_access()
8205 lockdep_assert_held(&fp->fi_lock); in get_lock_access()
8220 struct nfs4_file *fi = ost->st_stid.sc_file; in lookup_or_create_lock_state()
8221 struct nfs4_openowner *oo = openowner(ost->st_stateowner); in lookup_or_create_lock_state()
8222 struct nfs4_client *cl = oo->oo_owner.so_client; in lookup_or_create_lock_state()
8223 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); in lookup_or_create_lock_state()
8228 lo = find_lockowner_str(cl, &lock->lk_new_owner); in lookup_or_create_lock_state()
8230 strhashval = ownerstr_hashval(&lock->lk_new_owner); in lookup_or_create_lock_state()
8237 if (!cstate->minorversion && in lookup_or_create_lock_state()
8238 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) in lookup_or_create_lock_state()
8251 nfs4_put_stateowner(&lo->lo_owner); in lookup_or_create_lock_state()
8262 struct nfsd4_lock *lock = &u->lock; in nfsd4_lock()
8282 (long long) lock->lk_offset, in nfsd4_lock()
8283 (long long) lock->lk_length); in nfsd4_lock()
8285 if (check_lock_length(lock->lk_offset, lock->lk_length)) in nfsd4_lock()
8288 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); in nfsd4_lock()
8291 if (exportfs_cannot_lock(cstate->current_fh.fh_dentry->d_sb->s_export_op)) { in nfsd4_lock()
8296 if (lock->lk_is_new) { in nfsd4_lock()
8299 memcpy(&lock->lk_new_clientid, in nfsd4_lock()
8300 &cstate->clp->cl_clientid, in nfsd4_lock()
8305 lock->lk_new_open_seqid, in nfsd4_lock()
8306 &lock->lk_new_open_stateid, in nfsd4_lock()
8310 mutex_unlock(&open_stp->st_mutex); in nfsd4_lock()
8311 open_sop = openowner(open_stp->st_stateowner); in nfsd4_lock()
8313 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, in nfsd4_lock()
8314 &lock->lk_new_clientid)) in nfsd4_lock()
8320 lock->lk_old_lock_seqid, in nfsd4_lock()
8321 &lock->lk_old_lock_stateid, in nfsd4_lock()
8327 lock_sop = lockowner(lock_stp->st_stateowner); in nfsd4_lock()
8329 lkflg = setlkflg(lock->lk_type); in nfsd4_lock()
8335 if (locks_in_grace(net) && !lock->lk_reclaim) in nfsd4_lock()
8338 if (!locks_in_grace(net) && lock->lk_reclaim) in nfsd4_lock()
8341 if (lock->lk_reclaim) in nfsd4_lock()
8344 fp = lock_stp->st_stid.sc_file; in nfsd4_lock()
8345 switch (lock->lk_type) { in nfsd4_lock()
8349 spin_lock(&fp->fi_lock); in nfsd4_lock()
8353 spin_unlock(&fp->fi_lock); in nfsd4_lock()
8359 spin_lock(&fp->fi_lock); in nfsd4_lock()
8363 spin_unlock(&fp->fi_lock); in nfsd4_lock()
8376 if (lock->lk_type & (NFS4_READW_LT | NFS4_WRITEW_LT) && in nfsd4_lock()
8378 locks_can_async_lock(nf->nf_file->f_op)) in nfsd4_lock()
8381 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); in nfsd4_lock()
8388 file_lock = &nbl->nbl_lock; in nfsd4_lock()
8389 file_lock->c.flc_type = type; in nfsd4_lock()
8390 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); in nfsd4_lock()
8391 file_lock->c.flc_pid = current->tgid; in nfsd4_lock()
8392 file_lock->c.flc_file = nf->nf_file; in nfsd4_lock()
8393 file_lock->c.flc_flags = flags; in nfsd4_lock()
8394 file_lock->fl_lmops = &nfsd_posix_mng_ops; in nfsd4_lock()
8395 file_lock->fl_start = lock->lk_offset; in nfsd4_lock()
8396 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); in nfsd4_lock()
8407 nbl->nbl_time = ktime_get_boottime_seconds(); in nfsd4_lock()
8408 spin_lock(&nn->blocked_locks_lock); in nfsd4_lock()
8409 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); in nfsd4_lock()
8410 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); in nfsd4_lock()
8411 kref_get(&nbl->nbl_kref); in nfsd4_lock()
8412 spin_unlock(&nn->blocked_locks_lock); in nfsd4_lock()
8415 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock); in nfsd4_lock()
8418 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); in nfsd4_lock()
8420 if (lock->lk_reclaim) in nfsd4_lock()
8421 nn->somebody_reclaimed = true; in nfsd4_lock()
8424 kref_put(&nbl->nbl_kref, free_nbl); in nfsd4_lock()
8427 case -EAGAIN: /* conflock holds conflicting lock */ in nfsd4_lock()
8430 nfs4_set_lock_denied(conflock, &lock->lk_denied); in nfsd4_lock()
8432 case -EDEADLK: in nfsd4_lock()
8444 spin_lock(&nn->blocked_locks_lock); in nfsd4_lock()
8445 if (!list_empty(&nbl->nbl_list) && in nfsd4_lock()
8446 !list_empty(&nbl->nbl_lru)) { in nfsd4_lock()
8447 list_del_init(&nbl->nbl_list); in nfsd4_lock()
8448 list_del_init(&nbl->nbl_lru); in nfsd4_lock()
8449 kref_put(&nbl->nbl_kref, free_nbl); in nfsd4_lock()
8452 spin_unlock(&nn->blocked_locks_lock); in nfsd4_lock()
8460 if (cstate->replay_owner && in nfsd4_lock()
8461 cstate->replay_owner != &lock_sop->lo_owner && in nfsd4_lock()
8463 lock_sop->lo_owner.so_seqid++; in nfsd4_lock()
8466 * If this is a new, never-before-used stateid, and we are in nfsd4_lock()
8472 mutex_unlock(&lock_stp->st_mutex); in nfsd4_lock()
8474 nfs4_put_stid(&lock_stp->st_stid); in nfsd4_lock()
8477 nfs4_put_stid(&open_stp->st_stid); in nfsd4_lock()
8486 struct nfsd4_lock *lock = &u->lock; in nfsd4_lock_release()
8487 struct nfsd4_lock_denied *deny = &lock->lk_denied; in nfsd4_lock_release()
8489 kfree(deny->ld_owner.data); in nfsd4_lock_release()
8506 inode = fhp->fh_dentry->d_inode; in nfsd_test_lock()
8511 lock->c.flc_file = nf->nf_file; in nfsd_test_lock()
8512 err = nfserrno(vfs_test_lock(nf->nf_file, lock)); in nfsd_test_lock()
8513 lock->c.flc_file = NULL; in nfsd_test_lock()
8527 struct nfsd4_lockt *lockt = &u->lockt; in nfsd4_lockt()
8536 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) in nfsd4_lockt()
8540 status = set_client(&lockt->lt_clientid, cstate, nn); in nfsd4_lockt()
8545 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) in nfsd4_lockt()
8555 switch (lockt->lt_type) { in nfsd4_lockt()
8558 file_lock->c.flc_type = F_RDLCK; in nfsd4_lockt()
8562 file_lock->c.flc_type = F_WRLCK; in nfsd4_lockt()
8570 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); in nfsd4_lockt()
8572 file_lock->c.flc_owner = (fl_owner_t)lo; in nfsd4_lockt()
8573 file_lock->c.flc_pid = current->tgid; in nfsd4_lockt()
8574 file_lock->c.flc_flags = FL_POSIX; in nfsd4_lockt()
8576 file_lock->fl_start = lockt->lt_offset; in nfsd4_lockt()
8577 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); in nfsd4_lockt()
8581 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); in nfsd4_lockt()
8585 if (file_lock->c.flc_type != F_UNLCK) { in nfsd4_lockt()
8587 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); in nfsd4_lockt()
8591 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_lockt()
8599 struct nfsd4_lockt *lockt = &u->lockt; in nfsd4_lockt_release()
8600 struct nfsd4_lock_denied *deny = &lockt->lt_denied; in nfsd4_lockt_release()
8602 kfree(deny->ld_owner.data); in nfsd4_lockt_release()
8609 struct nfsd4_locku *locku = &u->locku; in nfsd4_locku()
8618 (long long) locku->lu_offset, in nfsd4_locku()
8619 (long long) locku->lu_length); in nfsd4_locku()
8621 if (check_lock_length(locku->lu_offset, locku->lu_length)) in nfsd4_locku()
8624 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, in nfsd4_locku()
8625 &locku->lu_stateid, SC_TYPE_LOCK, 0, in nfsd4_locku()
8629 nf = find_any_file(stp->st_stid.sc_file); in nfsd4_locku()
8634 if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) { in nfsd4_locku()
8646 file_lock->c.flc_type = F_UNLCK; in nfsd4_locku()
8647 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); in nfsd4_locku()
8648 file_lock->c.flc_pid = current->tgid; in nfsd4_locku()
8649 file_lock->c.flc_file = nf->nf_file; in nfsd4_locku()
8650 file_lock->c.flc_flags = FL_POSIX; in nfsd4_locku()
8651 file_lock->fl_lmops = &nfsd_posix_mng_ops; in nfsd4_locku()
8652 file_lock->fl_start = locku->lu_offset; in nfsd4_locku()
8654 file_lock->fl_end = last_byte_offset(locku->lu_offset, in nfsd4_locku()
8655 locku->lu_length); in nfsd4_locku()
8658 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL); in nfsd4_locku()
8663 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); in nfsd4_locku()
8667 mutex_unlock(&stp->st_mutex); in nfsd4_locku()
8668 nfs4_put_stid(&stp->st_stid); in nfsd4_locku()
8694 spin_lock(&fp->fi_lock); in check_for_locks()
8702 inode = file_inode(nf->nf_file); in check_for_locks()
8705 if (flctx && !list_empty_careful(&flctx->flc_posix)) { in check_for_locks()
8706 spin_lock(&flctx->flc_lock); in check_for_locks()
8707 for_each_file_lock(fl, &flctx->flc_posix) { in check_for_locks()
8708 if (fl->c.flc_owner == (fl_owner_t)lowner) { in check_for_locks()
8713 spin_unlock(&flctx->flc_lock); in check_for_locks()
8716 spin_unlock(&fp->fi_lock); in check_for_locks()
8721 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
8740 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner; in nfsd4_release_lockowner()
8742 clientid_t *clid = &rlockowner->rl_clientid; in nfsd4_release_lockowner()
8750 clid->cl_boot, clid->cl_id); in nfsd4_release_lockowner()
8755 clp = cstate->clp; in nfsd4_release_lockowner()
8757 spin_lock(&clp->cl_lock); in nfsd4_release_lockowner()
8758 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner); in nfsd4_release_lockowner()
8760 spin_unlock(&clp->cl_lock); in nfsd4_release_lockowner()
8764 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { in nfsd4_release_lockowner()
8765 if (check_for_locks(stp->st_stid.sc_file, lo)) { in nfsd4_release_lockowner()
8766 spin_unlock(&clp->cl_lock); in nfsd4_release_lockowner()
8767 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_release_lockowner()
8772 while (!list_empty(&lo->lo_owner.so_stateids)) { in nfsd4_release_lockowner()
8773 stp = list_first_entry(&lo->lo_owner.so_stateids, in nfsd4_release_lockowner()
8779 spin_unlock(&clp->cl_lock); in nfsd4_release_lockowner()
8783 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_release_lockowner()
8799 return (crp && crp->cr_clp); in nfs4_has_reclaimed_state()
8803 * failure => all reset bets are off, nfserr_no_grace...
8818 INIT_LIST_HEAD(&crp->cr_strhash); in nfs4_client_to_reclaim()
8819 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); in nfs4_client_to_reclaim()
8820 crp->cr_name.data = name.data; in nfs4_client_to_reclaim()
8821 crp->cr_name.len = name.len; in nfs4_client_to_reclaim()
8822 crp->cr_princhash.data = princhash.data; in nfs4_client_to_reclaim()
8823 crp->cr_princhash.len = princhash.len; in nfs4_client_to_reclaim()
8824 crp->cr_clp = NULL; in nfs4_client_to_reclaim()
8825 nn->reclaim_str_hashtbl_size++; in nfs4_client_to_reclaim()
8833 list_del(&crp->cr_strhash); in nfs4_remove_reclaim_record()
8834 kfree(crp->cr_name.data); in nfs4_remove_reclaim_record()
8835 kfree(crp->cr_princhash.data); in nfs4_remove_reclaim_record()
8837 nn->reclaim_str_hashtbl_size--; in nfs4_remove_reclaim_record()
8847 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { in nfs4_release_reclaim()
8848 crp = list_entry(nn->reclaim_str_hashtbl[i].next, in nfs4_release_reclaim()
8853 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); in nfs4_release_reclaim()
8865 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { in nfsd4_find_reclaim_client()
8866 if (compare_blob(&crp->cr_name, &name) == 0) { in nfsd4_find_reclaim_client()
8876 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) in nfs4_check_open_reclaim()
8903 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); in set_max_delegations()
8911 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, in nfs4_state_create_net()
8914 if (!nn->conf_id_hashtbl) in nfs4_state_create_net()
8916 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, in nfs4_state_create_net()
8919 if (!nn->unconf_id_hashtbl) in nfs4_state_create_net()
8921 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE, in nfs4_state_create_net()
8924 if (!nn->sessionid_hashtbl) in nfs4_state_create_net()
8928 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); in nfs4_state_create_net()
8929 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); in nfs4_state_create_net()
8932 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); in nfs4_state_create_net()
8933 nn->conf_name_tree = RB_ROOT; in nfs4_state_create_net()
8934 nn->unconf_name_tree = RB_ROOT; in nfs4_state_create_net()
8935 nn->boot_time = ktime_get_real_seconds(); in nfs4_state_create_net()
8936 nn->grace_ended = false; in nfs4_state_create_net()
8937 nn->nfsd4_manager.block_opens = true; in nfs4_state_create_net()
8938 INIT_LIST_HEAD(&nn->nfsd4_manager.list); in nfs4_state_create_net()
8939 INIT_LIST_HEAD(&nn->client_lru); in nfs4_state_create_net()
8940 INIT_LIST_HEAD(&nn->close_lru); in nfs4_state_create_net()
8941 INIT_LIST_HEAD(&nn->del_recall_lru); in nfs4_state_create_net()
8942 spin_lock_init(&nn->client_lock); in nfs4_state_create_net()
8943 spin_lock_init(&nn->s2s_cp_lock); in nfs4_state_create_net()
8944 idr_init(&nn->s2s_cp_stateids); in nfs4_state_create_net()
8945 atomic_set(&nn->pending_async_copies, 0); in nfs4_state_create_net()
8947 spin_lock_init(&nn->blocked_locks_lock); in nfs4_state_create_net()
8948 INIT_LIST_HEAD(&nn->blocked_locks_lru); in nfs4_state_create_net()
8950 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); in nfs4_state_create_net()
8951 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker); in nfs4_state_create_net()
8954 nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client"); in nfs4_state_create_net()
8955 if (!nn->nfsd_client_shrinker) in nfs4_state_create_net()
8958 nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan; in nfs4_state_create_net()
8959 nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count; in nfs4_state_create_net()
8960 nn->nfsd_client_shrinker->private_data = nn; in nfs4_state_create_net()
8962 shrinker_register(nn->nfsd_client_shrinker); in nfs4_state_create_net()
8968 kfree(nn->sessionid_hashtbl); in nfs4_state_create_net()
8970 kfree(nn->unconf_id_hashtbl); in nfs4_state_create_net()
8972 kfree(nn->conf_id_hashtbl); in nfs4_state_create_net()
8974 return -ENOMEM; in nfs4_state_create_net()
8985 while (!list_empty(&nn->conf_id_hashtbl[i])) { in nfs4_state_destroy_net()
8986 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); in nfs4_state_destroy_net()
8991 WARN_ON(!list_empty(&nn->blocked_locks_lru)); in nfs4_state_destroy_net()
8994 while (!list_empty(&nn->unconf_id_hashtbl[i])) { in nfs4_state_destroy_net()
8995 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); in nfs4_state_destroy_net()
9000 kfree(nn->sessionid_hashtbl); in nfs4_state_destroy_net()
9001 kfree(nn->unconf_id_hashtbl); in nfs4_state_destroy_net()
9002 kfree(nn->conf_id_hashtbl); in nfs4_state_destroy_net()
9015 locks_start_grace(net, &nn->nfsd4_manager); in nfs4_state_start_net()
9017 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) in nfs4_state_start_net()
9019 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", in nfs4_state_start_net()
9020 nn->nfsd4_grace, net->ns.inum); in nfs4_state_start_net()
9022 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); in nfs4_state_start_net()
9027 net->ns.inum); in nfs4_state_start_net()
9028 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ); in nfs4_state_start_net()
9043 nfsd_slot_shrinker = shrinker_alloc(0, "nfsd-DRC-slot"); in nfs4_state_start()
9046 return -ENOMEM; in nfs4_state_start()
9048 nfsd_slot_shrinker->count_objects = nfsd_slot_count; in nfs4_state_start()
9049 nfsd_slot_shrinker->scan_objects = nfsd_slot_scan; in nfs4_state_start()
9063 shrinker_free(nn->nfsd_client_shrinker); in nfs4_state_shutdown_net()
9064 cancel_work_sync(&nn->nfsd_shrinker_work); in nfs4_state_shutdown_net()
9065 cancel_delayed_work_sync(&nn->laundromat_work); in nfs4_state_shutdown_net()
9066 locks_end_grace(&nn->nfsd4_manager); in nfs4_state_shutdown_net()
9070 list_for_each_safe(pos, next, &nn->del_recall_lru) { in nfs4_state_shutdown_net()
9073 list_add(&dp->dl_recall_lru, &reaplist); in nfs4_state_shutdown_net()
9078 list_del_init(&dp->dl_recall_lru); in nfs4_state_shutdown_net()
9101 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); in get_stateid()
9107 if (cstate->minorversion) { in put_stateid()
9108 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); in put_stateid()
9126 put_stateid(cstate, &u->open_downgrade.od_stateid); in nfsd4_set_opendowngradestateid()
9133 put_stateid(cstate, &u->open.op_stateid); in nfsd4_set_openstateid()
9140 put_stateid(cstate, &u->close.cl_stateid); in nfsd4_set_closestateid()
9147 put_stateid(cstate, &u->lock.lk_resp_stateid); in nfsd4_set_lockstateid()
9158 get_stateid(cstate, &u->open_downgrade.od_stateid); in nfsd4_get_opendowngradestateid()
9165 get_stateid(cstate, &u->delegreturn.dr_stateid); in nfsd4_get_delegreturnstateid()
9172 get_stateid(cstate, &u->free_stateid.fr_stateid); in nfsd4_get_freestateid()
9179 get_stateid(cstate, &u->setattr.sa_stateid); in nfsd4_get_setattrstateid()
9186 get_stateid(cstate, &u->close.cl_stateid); in nfsd4_get_closestateid()
9193 get_stateid(cstate, &u->locku.lu_stateid); in nfsd4_get_lockustateid()
9200 get_stateid(cstate, &u->read.rd_stateid); in nfsd4_get_readstateid()
9207 get_stateid(cstate, &u->write.wr_stateid); in nfsd4_get_writestateid()
9211 * nfsd4_vet_deleg_time - vet and set the timespec for a delegated timestamp update
9246 struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr; in cb_getattr_update_times()
9250 if (deleg_attrs_deleg(dp->dl_type)) { in cb_getattr_update_times()
9253 attrs.ia_atime = ncf->ncf_cb_atime; in cb_getattr_update_times()
9254 attrs.ia_mtime = ncf->ncf_cb_mtime; in cb_getattr_update_times()
9256 if (nfsd4_vet_deleg_time(&attrs.ia_atime, &dp->dl_atime, &now)) in cb_getattr_update_times()
9259 if (nfsd4_vet_deleg_time(&attrs.ia_mtime, &dp->dl_mtime, &now)) { in cb_getattr_update_times()
9262 if (nfsd4_vet_deleg_time(&attrs.ia_ctime, &dp->dl_ctime, &now)) in cb_getattr_update_times()
9280 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
9293 * code is returned. If @pdp is set to a non-NULL value, then the
9314 spin_lock(&ctx->flc_lock); in nfsd4_deleg_getattr_conflict()
9315 for_each_file_lock(fl, &ctx->flc_lease) { in nfsd4_deleg_getattr_conflict()
9316 if (fl->c.flc_flags == FL_LAYOUT) in nfsd4_deleg_getattr_conflict()
9318 if (fl->c.flc_type == F_WRLCK) { in nfsd4_deleg_getattr_conflict()
9319 if (fl->fl_lmops == &nfsd_lease_mng_ops) in nfsd4_deleg_getattr_conflict()
9320 dp = fl->c.flc_owner; in nfsd4_deleg_getattr_conflict()
9327 dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { in nfsd4_deleg_getattr_conflict()
9328 spin_unlock(&ctx->flc_lock); in nfsd4_deleg_getattr_conflict()
9340 refcount_inc(&dp->dl_stid.sc_count); in nfsd4_deleg_getattr_conflict()
9341 ncf = &dp->dl_cb_fattr; in nfsd4_deleg_getattr_conflict()
9342 nfs4_cb_getattr(&dp->dl_cb_fattr); in nfsd4_deleg_getattr_conflict()
9343 spin_unlock(&ctx->flc_lock); in nfsd4_deleg_getattr_conflict()
9345 wait_on_bit_timeout(&ncf->ncf_getattr.cb_flags, NFSD4_CALLBACK_RUNNING, in nfsd4_deleg_getattr_conflict()
9347 if (ncf->ncf_cb_status) { in nfsd4_deleg_getattr_conflict()
9354 if (!ncf->ncf_file_modified && in nfsd4_deleg_getattr_conflict()
9355 (ncf->ncf_initial_cinfo != ncf->ncf_cb_change || in nfsd4_deleg_getattr_conflict()
9356 ncf->ncf_cur_fsize != ncf->ncf_cb_fsize)) in nfsd4_deleg_getattr_conflict()
9357 ncf->ncf_file_modified = true; in nfsd4_deleg_getattr_conflict()
9358 if (ncf->ncf_file_modified) { in nfsd4_deleg_getattr_conflict()
9371 ncf->ncf_cur_fsize = ncf->ncf_cb_fsize; in nfsd4_deleg_getattr_conflict()
9377 nfs4_put_stid(&dp->dl_stid); in nfsd4_deleg_getattr_conflict()