Lines Matching +full:ssc +full:- +full:range
107 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
141 rc = -ENOMEM; in nfsd4_create_laundry_wq()
152 return ses->se_dead; in is_session_dead()
157 if (atomic_read(&ses->se_ref) > ref_held_by_me) in mark_session_dead_locked()
159 ses->se_dead = true; in mark_session_dead_locked()
165 return clp->cl_time == 0; in is_client_expired()
171 if (clp->cl_state != NFSD4_ACTIVE) in nfsd4_dec_courtesy_client_count()
172 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0); in nfsd4_dec_courtesy_client_count()
177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in get_client_locked()
179 lockdep_assert_held(&nn->client_lock); in get_client_locked()
183 atomic_inc(&clp->cl_rpc_users); in get_client_locked()
185 clp->cl_state = NFSD4_ACTIVE; in get_client_locked()
193 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in renew_client_locked()
199 clp->cl_clientid.cl_boot, in renew_client_locked()
200 clp->cl_clientid.cl_id); in renew_client_locked()
204 list_move_tail(&clp->cl_lru, &nn->client_lru); in renew_client_locked()
205 clp->cl_time = ktime_get_boottime_seconds(); in renew_client_locked()
207 clp->cl_state = NFSD4_ACTIVE; in renew_client_locked()
212 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in put_client_renew_locked()
214 lockdep_assert_held(&nn->client_lock); in put_client_renew_locked()
216 if (!atomic_dec_and_test(&clp->cl_rpc_users)) in put_client_renew_locked()
226 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in put_client_renew()
228 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock)) in put_client_renew()
234 spin_unlock(&nn->client_lock); in put_client_renew()
243 status = get_client_locked(ses->se_client); in nfsd4_get_session_locked()
246 atomic_inc(&ses->se_ref); in nfsd4_get_session_locked()
252 struct nfs4_client *clp = ses->se_client; in nfsd4_put_session_locked()
253 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in nfsd4_put_session_locked()
255 lockdep_assert_held(&nn->client_lock); in nfsd4_put_session_locked()
257 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) in nfsd4_put_session_locked()
264 struct nfs4_client *clp = ses->se_client; in nfsd4_put_session()
265 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in nfsd4_put_session()
267 spin_lock(&nn->client_lock); in nfsd4_put_session()
269 spin_unlock(&nn->client_lock); in nfsd4_put_session()
278 spin_lock(&nn->blocked_locks_lock); in find_blocked_lock()
279 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { in find_blocked_lock()
280 if (fh_match(fh, &cur->nbl_fh)) { in find_blocked_lock()
281 list_del_init(&cur->nbl_list); in find_blocked_lock()
282 WARN_ON(list_empty(&cur->nbl_lru)); in find_blocked_lock()
283 list_del_init(&cur->nbl_lru); in find_blocked_lock()
288 spin_unlock(&nn->blocked_locks_lock); in find_blocked_lock()
290 locks_delete_block(&found->nbl_lock); in find_blocked_lock()
304 INIT_LIST_HEAD(&nbl->nbl_list); in find_or_allocate_block()
305 INIT_LIST_HEAD(&nbl->nbl_lru); in find_or_allocate_block()
306 fh_copy_shallow(&nbl->nbl_fh, fh); in find_or_allocate_block()
307 locks_init_lock(&nbl->nbl_lock); in find_or_allocate_block()
308 kref_init(&nbl->nbl_kref); in find_or_allocate_block()
309 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, in find_or_allocate_block()
323 locks_release_private(&nbl->nbl_lock); in free_nbl()
330 locks_delete_block(&nbl->nbl_lock); in free_blocked_lock()
331 kref_put(&nbl->nbl_kref, free_nbl); in free_blocked_lock()
337 struct nfs4_client *clp = lo->lo_owner.so_client; in remove_blocked_locks()
338 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in remove_blocked_locks()
343 spin_lock(&nn->blocked_locks_lock); in remove_blocked_locks()
344 while (!list_empty(&lo->lo_blocked)) { in remove_blocked_locks()
345 nbl = list_first_entry(&lo->lo_blocked, in remove_blocked_locks()
348 list_del_init(&nbl->nbl_list); in remove_blocked_locks()
349 WARN_ON(list_empty(&nbl->nbl_lru)); in remove_blocked_locks()
350 list_move(&nbl->nbl_lru, &reaplist); in remove_blocked_locks()
352 spin_unlock(&nn->blocked_locks_lock); in remove_blocked_locks()
358 list_del_init(&nbl->nbl_lru); in remove_blocked_locks()
368 locks_delete_block(&nbl->nbl_lock); in nfsd4_cb_notify_lock_prepare()
381 switch (task->tk_status) { in nfsd4_cb_notify_lock_done()
382 case -NFS4ERR_DELAY: in nfsd4_cb_notify_lock_done()
412 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
450 stp->st_access_bmap |= mask; in set_access()
460 stp->st_access_bmap &= ~mask; in clear_access()
469 return (bool)(stp->st_access_bmap & mask); in test_access()
479 stp->st_deny_bmap |= mask; in set_deny()
489 stp->st_deny_bmap &= ~mask; in clear_deny()
498 return (bool)(stp->st_deny_bmap & mask); in test_deny()
533 atomic_inc(&sop->so_count); in nfs4_get_stateowner()
540 return (sop->so_owner.len == owner->len) && in same_owner_str()
541 0 == memcmp(sop->so_owner.data, owner->data, owner->len); in same_owner_str()
550 lockdep_assert_held(&clp->cl_lock); in find_openstateowner_str()
552 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], in find_openstateowner_str()
554 if (!so->so_is_open_owner) in find_openstateowner_str()
556 if (same_owner_str(so, &open->op_owner)) in find_openstateowner_str()
568 while (nbytes--) { in opaque_hashval()
578 if (refcount_dec_and_test(&fi->fi_ref)) { in put_nfs4_file()
580 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); in put_nfs4_file()
581 WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); in put_nfs4_file()
591 lockdep_assert_held(&f->fi_lock); in find_writeable_file_locked()
593 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); in find_writeable_file_locked()
595 ret = nfsd_file_get(f->fi_fds[O_RDWR]); in find_writeable_file_locked()
604 spin_lock(&f->fi_lock); in find_writeable_file()
606 spin_unlock(&f->fi_lock); in find_writeable_file()
616 lockdep_assert_held(&f->fi_lock); in find_readable_file_locked()
618 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); in find_readable_file_locked()
620 ret = nfsd_file_get(f->fi_fds[O_RDWR]); in find_readable_file_locked()
629 spin_lock(&f->fi_lock); in find_readable_file()
631 spin_unlock(&f->fi_lock); in find_readable_file()
643 spin_lock(&f->fi_lock); in find_any_file()
644 ret = nfsd_file_get(f->fi_fds[O_RDWR]); in find_any_file()
646 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); in find_any_file()
648 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); in find_any_file()
650 spin_unlock(&f->fi_lock); in find_any_file()
656 lockdep_assert_held(&f->fi_lock); in find_any_file_locked()
658 if (f->fi_fds[O_RDWR]) in find_any_file_locked()
659 return f->fi_fds[O_RDWR]; in find_any_file_locked()
660 if (f->fi_fds[O_WRONLY]) in find_any_file_locked()
661 return f->fi_fds[O_WRONLY]; in find_any_file_locked()
662 if (f->fi_fds[O_RDONLY]) in find_any_file_locked()
663 return f->fi_fds[O_RDONLY]; in find_any_file_locked()
677 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
683 ret = opaque_hashval(ownername->data, ownername->len); in ownerstr_hashval()
715 * false - access/deny mode conflict with normal client.
716 * true - no conflict or conflict with courtesy client(s) is resolved.
728 lockdep_assert_held(&fp->fi_lock); in nfs4_resolve_deny_conflicts_locked()
729 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { in nfs4_resolve_deny_conflicts_locked()
731 if (st->st_openstp) in nfs4_resolve_deny_conflicts_locked()
736 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap; in nfs4_resolve_deny_conflicts_locked()
739 clp = st->st_stid.sc_client; in nfs4_resolve_deny_conflicts_locked()
746 clp = stp->st_stid.sc_client; in nfs4_resolve_deny_conflicts_locked()
747 nn = net_generic(clp->net, nfsd_net_id); in nfs4_resolve_deny_conflicts_locked()
748 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in nfs4_resolve_deny_conflicts_locked()
756 lockdep_assert_held(&fp->fi_lock); in __nfs4_file_get_access()
759 atomic_inc(&fp->fi_access[O_WRONLY]); in __nfs4_file_get_access()
761 atomic_inc(&fp->fi_access[O_RDONLY]); in __nfs4_file_get_access()
767 lockdep_assert_held(&fp->fi_lock); in nfs4_file_get_access()
774 if ((access & fp->fi_share_deny) != 0) in nfs4_file_get_access()
790 atomic_read(&fp->fi_access[O_RDONLY])) in nfs4_file_check_deny()
794 atomic_read(&fp->fi_access[O_WRONLY])) in nfs4_file_check_deny()
802 might_lock(&fp->fi_lock); in __nfs4_file_put_access()
804 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { in __nfs4_file_put_access()
808 swap(f1, fp->fi_fds[oflag]); in __nfs4_file_put_access()
809 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) in __nfs4_file_put_access()
810 swap(f2, fp->fi_fds[O_RDWR]); in __nfs4_file_put_access()
811 spin_unlock(&fp->fi_lock); in __nfs4_file_put_access()
833 * Note that we only allocate it for pNFS-enabled exports, otherwise
843 co->co_client = clp; in alloc_clnt_odstate()
844 refcount_set(&co->co_odcount, 1); in alloc_clnt_odstate()
852 struct nfs4_file *fp = co->co_file; in hash_clnt_odstate_locked()
854 lockdep_assert_held(&fp->fi_lock); in hash_clnt_odstate_locked()
855 list_add(&co->co_perfile, &fp->fi_clnt_odstate); in hash_clnt_odstate_locked()
862 refcount_inc(&co->co_odcount); in get_clnt_odstate()
873 fp = co->co_file; in put_clnt_odstate()
874 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { in put_clnt_odstate()
875 list_del(&co->co_perfile); in put_clnt_odstate()
876 spin_unlock(&fp->fi_lock); in put_clnt_odstate()
878 nfsd4_return_all_file_layouts(co->co_client, fp); in put_clnt_odstate()
892 cl = new->co_client; in find_or_hash_clnt_odstate()
894 spin_lock(&fp->fi_lock); in find_or_hash_clnt_odstate()
895 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { in find_or_hash_clnt_odstate()
896 if (co->co_client == cl) { in find_or_hash_clnt_odstate()
902 co->co_file = fp; in find_or_hash_clnt_odstate()
905 spin_unlock(&fp->fi_lock); in find_or_hash_clnt_odstate()
920 spin_lock(&cl->cl_lock); in nfs4_alloc_stid()
922 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT); in nfs4_alloc_stid()
923 spin_unlock(&cl->cl_lock); in nfs4_alloc_stid()
928 stid->sc_free = sc_free; in nfs4_alloc_stid()
929 stid->sc_client = cl; in nfs4_alloc_stid()
930 stid->sc_stateid.si_opaque.so_id = new_id; in nfs4_alloc_stid()
931 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; in nfs4_alloc_stid()
933 refcount_set(&stid->sc_count, 1); in nfs4_alloc_stid()
934 spin_lock_init(&stid->sc_lock); in nfs4_alloc_stid()
935 INIT_LIST_HEAD(&stid->sc_cp_list); in nfs4_alloc_stid()
951 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time; in nfs4_init_cp_state()
952 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; in nfs4_init_cp_state()
955 spin_lock(&nn->s2s_cp_lock); in nfs4_init_cp_state()
956 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); in nfs4_init_cp_state()
957 stid->cs_stid.si_opaque.so_id = new_id; in nfs4_init_cp_state()
958 stid->cs_stid.si_generation = 1; in nfs4_init_cp_state()
959 spin_unlock(&nn->s2s_cp_lock); in nfs4_init_cp_state()
963 stid->cs_type = cs_type; in nfs4_init_cp_state()
969 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID); in nfs4_init_copy_state()
980 cps->cpntf_time = ktime_get_boottime_seconds(); in nfs4_alloc_init_cpntf_state()
981 refcount_set(&cps->cp_stateid.cs_count, 1); in nfs4_alloc_init_cpntf_state()
982 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID)) in nfs4_alloc_init_cpntf_state()
984 spin_lock(&nn->s2s_cp_lock); in nfs4_alloc_init_cpntf_state()
985 list_add(&cps->cp_list, &p_stid->sc_cp_list); in nfs4_alloc_init_cpntf_state()
986 spin_unlock(&nn->s2s_cp_lock); in nfs4_alloc_init_cpntf_state()
997 if (copy->cp_stateid.cs_type != NFS4_COPY_STID) in nfs4_free_copy_state()
999 nn = net_generic(copy->cp_clp->net, nfsd_net_id); in nfs4_free_copy_state()
1000 spin_lock(&nn->s2s_cp_lock); in nfs4_free_copy_state()
1001 idr_remove(&nn->s2s_cp_stateids, in nfs4_free_copy_state()
1002 copy->cp_stateid.cs_stid.si_opaque.so_id); in nfs4_free_copy_state()
1003 spin_unlock(&nn->s2s_cp_lock); in nfs4_free_copy_state()
1012 spin_lock(&nn->s2s_cp_lock); in nfs4_free_cpntf_statelist()
1013 while (!list_empty(&stid->sc_cp_list)) { in nfs4_free_cpntf_statelist()
1014 cps = list_first_entry(&stid->sc_cp_list, in nfs4_free_cpntf_statelist()
1018 spin_unlock(&nn->s2s_cp_lock); in nfs4_free_cpntf_statelist()
1035 * Considering nfsd_break_one_deleg is called with the flc->flc_lock held,
1042 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list)); in nfs4_free_deleg()
1043 WARN_ON_ONCE(!list_empty(&dp->dl_perfile)); in nfs4_free_deleg()
1044 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt)); in nfs4_free_deleg()
1045 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru)); in nfs4_free_deleg()
1063 * low 3 bytes as hash-table indices.
1082 if (bd->entries == 0) in delegation_blocked()
1084 if (ktime_get_seconds() - bd->swap_time > 30) { in delegation_blocked()
1086 if (ktime_get_seconds() - bd->swap_time > 30) { in delegation_blocked()
1087 bd->entries -= bd->old_entries; in delegation_blocked()
1088 bd->old_entries = bd->entries; in delegation_blocked()
1089 bd->new = 1-bd->new; in delegation_blocked()
1090 memset(bd->set[bd->new], 0, in delegation_blocked()
1091 sizeof(bd->set[0])); in delegation_blocked()
1092 bd->swap_time = ktime_get_seconds(); in delegation_blocked()
1096 hash = jhash(&fh->fh_raw, fh->fh_size, 0); in delegation_blocked()
1097 if (test_bit(hash&255, bd->set[0]) && in delegation_blocked()
1098 test_bit((hash>>8)&255, bd->set[0]) && in delegation_blocked()
1099 test_bit((hash>>16)&255, bd->set[0])) in delegation_blocked()
1102 if (test_bit(hash&255, bd->set[1]) && in delegation_blocked()
1103 test_bit((hash>>8)&255, bd->set[1]) && in delegation_blocked()
1104 test_bit((hash>>16)&255, bd->set[1])) in delegation_blocked()
1115 hash = jhash(&fh->fh_raw, fh->fh_size, 0); in block_delegations()
1118 __set_bit(hash&255, bd->set[bd->new]); in block_delegations()
1119 __set_bit((hash>>8)&255, bd->set[bd->new]); in block_delegations()
1120 __set_bit((hash>>16)&255, bd->set[bd->new]); in block_delegations()
1121 if (bd->entries == 0) in block_delegations()
1122 bd->swap_time = ktime_get_seconds(); in block_delegations()
1123 bd->entries += 1; in block_delegations()
1139 if (delegation_blocked(&fp->fi_fhandle)) in alloc_init_deleg()
1151 dp->dl_stid.sc_stateid.si_generation = 1; in alloc_init_deleg()
1152 INIT_LIST_HEAD(&dp->dl_perfile); in alloc_init_deleg()
1153 INIT_LIST_HEAD(&dp->dl_perclnt); in alloc_init_deleg()
1154 INIT_LIST_HEAD(&dp->dl_recall_lru); in alloc_init_deleg()
1155 dp->dl_clnt_odstate = odstate; in alloc_init_deleg()
1157 dp->dl_type = dl_type; in alloc_init_deleg()
1158 dp->dl_retries = 1; in alloc_init_deleg()
1159 dp->dl_recalled = false; in alloc_init_deleg()
1160 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, in alloc_init_deleg()
1162 nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client, in alloc_init_deleg()
1164 dp->dl_cb_fattr.ncf_file_modified = false; in alloc_init_deleg()
1166 dp->dl_stid.sc_file = fp; in alloc_init_deleg()
1176 struct nfs4_file *fp = s->sc_file; in nfs4_put_stid()
1177 struct nfs4_client *clp = s->sc_client; in nfs4_put_stid()
1179 might_lock(&clp->cl_lock); in nfs4_put_stid()
1181 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) { in nfs4_put_stid()
1185 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); in nfs4_put_stid()
1186 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) in nfs4_put_stid()
1187 atomic_dec(&s->sc_client->cl_admin_revoked); in nfs4_put_stid()
1188 nfs4_free_cpntf_statelist(clp->net, s); in nfs4_put_stid()
1189 spin_unlock(&clp->cl_lock); in nfs4_put_stid()
1190 s->sc_free(s); in nfs4_put_stid()
1198 stateid_t *src = &stid->sc_stateid; in nfs4_inc_and_copy_stateid()
1200 spin_lock(&stid->sc_lock); in nfs4_inc_and_copy_stateid()
1201 if (unlikely(++src->si_generation == 0)) in nfs4_inc_and_copy_stateid()
1202 src->si_generation = 1; in nfs4_inc_and_copy_stateid()
1204 spin_unlock(&stid->sc_lock); in nfs4_inc_and_copy_stateid()
1212 spin_lock(&fp->fi_lock); in put_deleg_file()
1213 if (--fp->fi_delegees == 0) { in put_deleg_file()
1214 swap(nf, fp->fi_deleg_file); in put_deleg_file()
1215 swap(rnf, fp->fi_rdeleg_file); in put_deleg_file()
1217 spin_unlock(&fp->fi_lock); in put_deleg_file()
1232 if ((READ_ONCE(f->f_mode) & FMODE_NOCMTIME) == 0) in nfsd4_finalize_deleg_timestamps()
1235 spin_lock(&f->f_lock); in nfsd4_finalize_deleg_timestamps()
1236 f->f_mode &= ~FMODE_NOCMTIME; in nfsd4_finalize_deleg_timestamps()
1237 spin_unlock(&f->f_lock); in nfsd4_finalize_deleg_timestamps()
1240 if (!dp->dl_written) in nfsd4_finalize_deleg_timestamps()
1244 if (dp->dl_setattr) in nfsd4_finalize_deleg_timestamps()
1249 ret = notify_change(&nop_mnt_idmap, f->f_path.dentry, &ia, NULL); in nfsd4_finalize_deleg_timestamps()
1255 MAJOR(inode->i_sb->s_dev), in nfsd4_finalize_deleg_timestamps()
1256 MINOR(inode->i_sb->s_dev), in nfsd4_finalize_deleg_timestamps()
1257 inode->i_ino, ret); in nfsd4_finalize_deleg_timestamps()
1263 struct nfs4_file *fp = dp->dl_stid.sc_file; in nfs4_unlock_deleg_lease()
1264 struct nfsd_file *nf = fp->fi_deleg_file; in nfs4_unlock_deleg_lease()
1266 WARN_ON_ONCE(!fp->fi_delegees); in nfs4_unlock_deleg_lease()
1268 nfsd4_finalize_deleg_timestamps(dp, nf->nf_file); in nfs4_unlock_deleg_lease()
1269 kernel_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); in nfs4_unlock_deleg_lease()
1275 put_clnt_odstate(dp->dl_clnt_odstate); in destroy_unhashed_deleg()
1277 nfs4_put_stid(&dp->dl_stid); in destroy_unhashed_deleg()
1281 * nfs4_delegation_exists - Discover if this delegation already exists
1296 lockdep_assert_held(&fp->fi_lock); in nfs4_delegation_exists()
1298 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) { in nfs4_delegation_exists()
1299 searchclp = searchdp->dl_stid.sc_client; in nfs4_delegation_exists()
1308 * hash_delegation_locked - Add a delegation to the appropriate lists
1315 * On error: -EAGAIN if one was previously granted to this
1323 struct nfs4_client *clp = dp->dl_stid.sc_client; in hash_delegation_locked()
1326 lockdep_assert_held(&fp->fi_lock); in hash_delegation_locked()
1327 lockdep_assert_held(&clp->cl_lock); in hash_delegation_locked()
1330 return -EAGAIN; in hash_delegation_locked()
1331 refcount_inc(&dp->dl_stid.sc_count); in hash_delegation_locked()
1332 dp->dl_stid.sc_type = SC_TYPE_DELEG; in hash_delegation_locked()
1333 list_add(&dp->dl_perfile, &fp->fi_delegations); in hash_delegation_locked()
1334 list_add(&dp->dl_perclnt, &clp->cl_delegations); in hash_delegation_locked()
1340 return !(list_empty(&dp->dl_perfile)); in delegation_hashed()
1346 struct nfs4_file *fp = dp->dl_stid.sc_file; in unhash_delegation_locked()
1354 dp->dl_stid.sc_client->cl_minorversion == 0) in unhash_delegation_locked()
1356 dp->dl_stid.sc_status |= statusmask; in unhash_delegation_locked()
1358 atomic_inc(&dp->dl_stid.sc_client->cl_admin_revoked); in unhash_delegation_locked()
1361 ++dp->dl_time; in unhash_delegation_locked()
1362 spin_lock(&fp->fi_lock); in unhash_delegation_locked()
1363 list_del_init(&dp->dl_perclnt); in unhash_delegation_locked()
1364 list_del_init(&dp->dl_recall_lru); in unhash_delegation_locked()
1365 list_del_init(&dp->dl_perfile); in unhash_delegation_locked()
1366 spin_unlock(&fp->fi_lock); in unhash_delegation_locked()
1382 * revoke_delegation - perform nfs4 delegation structure cleanup
1401 * in the revocation process is protected by the clp->cl_lock.
1405 struct nfs4_client *clp = dp->dl_stid.sc_client; in revoke_delegation()
1407 WARN_ON(!list_empty(&dp->dl_recall_lru)); in revoke_delegation()
1408 WARN_ON_ONCE(dp->dl_stid.sc_client->cl_minorversion > 0 && in revoke_delegation()
1409 !(dp->dl_stid.sc_status & in revoke_delegation()
1412 trace_nfsd_stid_revoke(&dp->dl_stid); in revoke_delegation()
1414 spin_lock(&clp->cl_lock); in revoke_delegation()
1415 if (dp->dl_stid.sc_status & SC_STATUS_FREED) { in revoke_delegation()
1416 list_del_init(&dp->dl_recall_lru); in revoke_delegation()
1419 list_add(&dp->dl_recall_lru, &clp->cl_revoked); in revoke_delegation()
1420 dp->dl_stid.sc_status |= SC_STATUS_FREEABLE; in revoke_delegation()
1422 spin_unlock(&clp->cl_lock); in revoke_delegation()
1450 spin_lock(&fp->fi_lock); in recalculate_deny_mode()
1451 old_deny = fp->fi_share_deny; in recalculate_deny_mode()
1452 fp->fi_share_deny = 0; in recalculate_deny_mode()
1453 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { in recalculate_deny_mode()
1454 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); in recalculate_deny_mode()
1455 if (fp->fi_share_deny == old_deny) in recalculate_deny_mode()
1458 spin_unlock(&fp->fi_lock); in recalculate_deny_mode()
1474 /* Recalculate per-file deny mode if there was a change */ in reset_union_bmap_deny()
1476 recalculate_deny_mode(stp->st_stid.sc_file); in reset_union_bmap_deny()
1484 struct nfs4_file *fp = stp->st_stid.sc_file; in release_all_access()
1486 if (fp && stp->st_deny_bmap != 0) in release_all_access()
1491 nfs4_file_put_access(stp->st_stid.sc_file, i); in release_all_access()
1498 kfree(sop->so_owner.data); in nfs4_free_stateowner()
1499 sop->so_ops->so_free(sop); in nfs4_free_stateowner()
1504 struct nfs4_client *clp = sop->so_client; in nfs4_put_stateowner()
1506 might_lock(&clp->cl_lock); in nfs4_put_stateowner()
1508 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) in nfs4_put_stateowner()
1510 sop->so_ops->so_unhash(sop); in nfs4_put_stateowner()
1511 spin_unlock(&clp->cl_lock); in nfs4_put_stateowner()
1518 return list_empty(&stp->st_perfile); in nfs4_ol_stateid_unhashed()
1523 struct nfs4_file *fp = stp->st_stid.sc_file; in unhash_ol_stateid()
1525 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); in unhash_ol_stateid()
1527 if (list_empty(&stp->st_perfile)) in unhash_ol_stateid()
1530 spin_lock(&fp->fi_lock); in unhash_ol_stateid()
1531 list_del_init(&stp->st_perfile); in unhash_ol_stateid()
1532 spin_unlock(&fp->fi_lock); in unhash_ol_stateid()
1533 list_del(&stp->st_perstateowner); in unhash_ol_stateid()
1541 put_clnt_odstate(stp->st_clnt_odstate); in nfs4_free_ol_stateid()
1543 if (stp->st_stateowner) in nfs4_free_ol_stateid()
1544 nfs4_put_stateowner(stp->st_stateowner); in nfs4_free_ol_stateid()
1545 WARN_ON(!list_empty(&stid->sc_cp_list)); in nfs4_free_ol_stateid()
1552 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); in nfs4_free_lock_stateid()
1555 nf = find_any_file(stp->st_stid.sc_file); in nfs4_free_lock_stateid()
1557 get_file(nf->nf_file); in nfs4_free_lock_stateid()
1558 filp_close(nf->nf_file, (fl_owner_t)lo); in nfs4_free_lock_stateid()
1572 struct nfs4_stid *s = &stp->st_stid; in put_ol_stateid_locked()
1573 struct nfs4_client *clp = s->sc_client; in put_ol_stateid_locked()
1575 lockdep_assert_held(&clp->cl_lock); in put_ol_stateid_locked()
1577 WARN_ON_ONCE(!list_empty(&stp->st_locks)); in put_ol_stateid_locked()
1579 if (!refcount_dec_and_test(&s->sc_count)) { in put_ol_stateid_locked()
1584 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); in put_ol_stateid_locked()
1585 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) in put_ol_stateid_locked()
1586 atomic_dec(&s->sc_client->cl_admin_revoked); in put_ol_stateid_locked()
1587 list_add(&stp->st_locks, reaplist); in put_ol_stateid_locked()
1592 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); in unhash_lock_stateid()
1596 list_del_init(&stp->st_locks); in unhash_lock_stateid()
1597 stp->st_stid.sc_status |= SC_STATUS_CLOSED; in unhash_lock_stateid()
1603 struct nfs4_client *clp = stp->st_stid.sc_client; in release_lock_stateid()
1606 spin_lock(&clp->cl_lock); in release_lock_stateid()
1608 spin_unlock(&clp->cl_lock); in release_lock_stateid()
1610 nfs4_put_stid(&stp->st_stid); in release_lock_stateid()
1615 struct nfs4_client *clp = lo->lo_owner.so_client; in unhash_lockowner_locked()
1617 lockdep_assert_held(&clp->cl_lock); in unhash_lockowner_locked()
1619 list_del_init(&lo->lo_owner.so_strhash); in unhash_lockowner_locked()
1637 list_del(&stp->st_locks); in free_ol_stateid_reaplist()
1638 fp = stp->st_stid.sc_file; in free_ol_stateid_reaplist()
1639 stp->st_stid.sc_free(&stp->st_stid); in free_ol_stateid_reaplist()
1650 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); in release_open_stateid_locks()
1652 while (!list_empty(&open_stp->st_locks)) { in release_open_stateid_locks()
1653 stp = list_entry(open_stp->st_locks.next, in release_open_stateid_locks()
1663 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); in unhash_open_stateid()
1675 spin_lock(&stp->st_stid.sc_client->cl_lock); in release_open_stateid()
1676 stp->st_stid.sc_status |= SC_STATUS_CLOSED; in release_open_stateid()
1679 spin_unlock(&stp->st_stid.sc_client->cl_lock); in release_open_stateid()
1685 lockdep_assert_held(&oo->oo_owner.so_client->cl_lock); in nfs4_openowner_unhashed()
1687 return list_empty(&oo->oo_owner.so_strhash) && in nfs4_openowner_unhashed()
1688 list_empty(&oo->oo_perclient); in nfs4_openowner_unhashed()
1693 struct nfs4_client *clp = oo->oo_owner.so_client; in unhash_openowner_locked()
1695 lockdep_assert_held(&clp->cl_lock); in unhash_openowner_locked()
1697 list_del_init(&oo->oo_owner.so_strhash); in unhash_openowner_locked()
1698 list_del_init(&oo->oo_perclient); in unhash_openowner_locked()
1703 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, in release_last_closed_stateid()
1707 spin_lock(&nn->client_lock); in release_last_closed_stateid()
1708 s = oo->oo_last_closed_stid; in release_last_closed_stateid()
1710 list_del_init(&oo->oo_close_lru); in release_last_closed_stateid()
1711 oo->oo_last_closed_stid = NULL; in release_last_closed_stateid()
1713 spin_unlock(&nn->client_lock); in release_last_closed_stateid()
1715 nfs4_put_stid(&s->st_stid); in release_last_closed_stateid()
1721 struct nfs4_client *clp = oo->oo_owner.so_client; in release_openowner()
1724 spin_lock(&clp->cl_lock); in release_openowner()
1726 while (!list_empty(&oo->oo_owner.so_stateids)) { in release_openowner()
1727 stp = list_first_entry(&oo->oo_owner.so_stateids, in release_openowner()
1732 spin_unlock(&clp->cl_lock); in release_openowner()
1735 nfs4_put_stateowner(&oo->oo_owner); in release_openowner()
1745 spin_lock(&clp->cl_lock); in find_one_sb_stid()
1746 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id) in find_one_sb_stid()
1747 if ((stid->sc_type & sc_types) && in find_one_sb_stid()
1748 stid->sc_status == 0 && in find_one_sb_stid()
1749 stid->sc_file->fi_inode->i_sb == sb) { in find_one_sb_stid()
1750 refcount_inc(&stid->sc_count); in find_one_sb_stid()
1753 spin_unlock(&clp->cl_lock); in find_one_sb_stid()
1758 * nfsd4_revoke_states - revoke all nfsv4 states associated with given filesystem
1769 * states have been "admin-revoked".
1779 spin_lock(&nn->client_lock); in nfsd4_revoke_states()
1781 struct list_head *head = &nn->conf_id_hashtbl[idhashval]; in nfsd4_revoke_states()
1792 spin_unlock(&nn->client_lock); in nfsd4_revoke_states()
1793 switch (stid->sc_type) { in nfsd4_revoke_states()
1796 mutex_lock_nested(&stp->st_mutex, in nfsd4_revoke_states()
1799 spin_lock(&clp->cl_lock); in nfsd4_revoke_states()
1800 if (stid->sc_status == 0) { in nfsd4_revoke_states()
1801 stid->sc_status |= in nfsd4_revoke_states()
1803 atomic_inc(&clp->cl_admin_revoked); in nfsd4_revoke_states()
1804 spin_unlock(&clp->cl_lock); in nfsd4_revoke_states()
1807 spin_unlock(&clp->cl_lock); in nfsd4_revoke_states()
1808 mutex_unlock(&stp->st_mutex); in nfsd4_revoke_states()
1812 mutex_lock_nested(&stp->st_mutex, in nfsd4_revoke_states()
1814 spin_lock(&clp->cl_lock); in nfsd4_revoke_states()
1815 if (stid->sc_status == 0) { in nfsd4_revoke_states()
1817 lockowner(stp->st_stateowner); in nfsd4_revoke_states()
1820 stid->sc_status |= in nfsd4_revoke_states()
1822 atomic_inc(&clp->cl_admin_revoked); in nfsd4_revoke_states()
1823 spin_unlock(&clp->cl_lock); in nfsd4_revoke_states()
1824 nf = find_any_file(stp->st_stid.sc_file); in nfsd4_revoke_states()
1826 get_file(nf->nf_file); in nfsd4_revoke_states()
1827 filp_close(nf->nf_file, in nfsd4_revoke_states()
1833 spin_unlock(&clp->cl_lock); in nfsd4_revoke_states()
1834 mutex_unlock(&stp->st_mutex); in nfsd4_revoke_states()
1837 refcount_inc(&stid->sc_count); in nfsd4_revoke_states()
1853 spin_lock(&nn->client_lock); in nfsd4_revoke_states()
1854 if (clp->cl_minorversion == 0) in nfsd4_revoke_states()
1860 nn->nfs40_last_revoke = in nfsd4_revoke_states()
1866 spin_unlock(&nn->client_lock); in nfsd4_revoke_states()
1874 return sid->sequence % SESSION_HASH_SIZE; in hash_sessionid()
1881 u32 *ptr = (u32 *)(&sessionid->data[0]); in dump_sessionid()
1892 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1897 struct nfs4_stateowner *so = cstate->replay_owner; in nfsd4_bump_seqid()
1908 if (so->so_is_open_owner) in nfsd4_bump_seqid()
1910 so->so_seqid++; in nfsd4_bump_seqid()
1917 struct nfs4_client *clp = ses->se_client; in gen_sessionid()
1920 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; in gen_sessionid()
1921 sid->clientid = clp->cl_clientid; in gen_sessionid()
1922 sid->sequence = current_sessionid++; in gen_sessionid()
1923 sid->reserved = 0; in gen_sessionid()
1929 * the end of the initial SEQUENCE operation--the rest we regenerate
1935 * verifier), 12 for the compound header (with zero-length tag), and 44
1943 /* The sum of "target_slots-1" on every session. The shrinker can push this
1945 * be freed. The "-1" is because we can never free slot 0 while the
1955 if (from >= ses->se_fchannel.maxreqs) in free_session_slots()
1958 for (i = from; i < ses->se_fchannel.maxreqs; i++) { in free_session_slots()
1959 struct nfsd4_slot *slot = xa_load(&ses->se_slots, i); in free_session_slots()
1966 xa_store(&ses->se_slots, i, xa_mk_value(slot->sl_seqid), 0); in free_session_slots()
1967 free_svc_cred(&slot->sl_cred); in free_session_slots()
1970 ses->se_fchannel.maxreqs = from; in free_session_slots()
1971 if (ses->se_target_maxslots > from) { in free_session_slots()
1973 atomic_sub(ses->se_target_maxslots - new_target, &nfsd_total_target_slots); in free_session_slots()
1974 ses->se_target_maxslots = new_target; in free_session_slots()
1979 * reduce_session_slots - reduce the target max-slots of a session if possible
1983 * This interface can be used by a shrinker to reduce the target max-slots
1987 * best-effort, skiping a session is client_lock is already held has no
1996 struct nfsd_net *nn = net_generic(ses->se_client->net, in reduce_session_slots()
2000 if (ses->se_target_maxslots <= 1) in reduce_session_slots()
2002 if (!spin_trylock(&nn->client_lock)) in reduce_session_slots()
2004 ret = min(dec, ses->se_target_maxslots-1); in reduce_session_slots()
2005 ses->se_target_maxslots -= ret; in reduce_session_slots()
2007 ses->se_slot_gen += 1; in reduce_session_slots()
2008 if (ses->se_slot_gen == 0) { in reduce_session_slots()
2010 ses->se_slot_gen = 1; in reduce_session_slots()
2011 for (i = 0; i < ses->se_fchannel.maxreqs; i++) { in reduce_session_slots()
2012 struct nfsd4_slot *slot = xa_load(&ses->se_slots, i); in reduce_session_slots()
2013 slot->sl_generation = 0; in reduce_session_slots()
2016 spin_unlock(&nn->client_lock); in reduce_session_slots()
2030 size = fattrs->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ ? in nfsd4_alloc_slot()
2031 0 : fattrs->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; in nfsd4_alloc_slot()
2036 slot->sl_index = index; in nfsd4_alloc_slot()
2043 int numslots = fattrs->maxreqs; in alloc_session()
2051 xa_init(&new->se_slots); in alloc_session()
2054 if (!slot || xa_is_err(xa_store(&new->se_slots, 0, slot, GFP_KERNEL))) in alloc_session()
2062 if (xa_is_err(xa_store(&new->se_slots, i, slot, gfp))) { in alloc_session()
2067 fattrs->maxreqs = i; in alloc_session()
2068 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); in alloc_session()
2069 new->se_target_maxslots = i; in alloc_session()
2070 atomic_add(i - 1, &nfsd_total_target_slots); in alloc_session()
2071 new->se_cb_slot_avail = ~0U; in alloc_session()
2072 new->se_cb_highest_slot = min(battrs->maxreqs - 1, in alloc_session()
2073 NFSD_BC_SLOT_TABLE_SIZE - 1); in alloc_session()
2074 spin_lock_init(&new->se_lock); in alloc_session()
2078 xa_destroy(&new->se_slots); in alloc_session()
2085 svc_xprt_put(c->cn_xprt); in free_conn()
2092 struct nfs4_client *clp = c->cn_session->se_client; in nfsd4_conn_lost()
2096 spin_lock(&clp->cl_lock); in nfsd4_conn_lost()
2097 if (!list_empty(&c->cn_persession)) { in nfsd4_conn_lost()
2098 list_del(&c->cn_persession); in nfsd4_conn_lost()
2102 spin_unlock(&clp->cl_lock); in nfsd4_conn_lost()
2112 svc_xprt_get(rqstp->rq_xprt); in alloc_conn()
2113 conn->cn_xprt = rqstp->rq_xprt; in alloc_conn()
2114 conn->cn_flags = flags; in alloc_conn()
2115 INIT_LIST_HEAD(&conn->cn_xpt_user.list); in alloc_conn()
2121 conn->cn_session = ses; in __nfsd4_hash_conn()
2122 list_add(&conn->cn_persession, &ses->se_conns); in __nfsd4_hash_conn()
2127 struct nfs4_client *clp = ses->se_client; in nfsd4_hash_conn()
2129 spin_lock(&clp->cl_lock); in nfsd4_hash_conn()
2131 spin_unlock(&clp->cl_lock); in nfsd4_hash_conn()
2136 conn->cn_xpt_user.callback = nfsd4_conn_lost; in nfsd4_register_conn()
2137 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); in nfsd4_register_conn()
2148 nfsd4_conn_lost(&conn->cn_xpt_user); in nfsd4_init_conn()
2150 nfsd4_probe_callback_sync(ses->se_client); in nfsd4_init_conn()
2157 if (cses->flags & SESSION4_BACK_CHAN) in alloc_conn_from_crses()
2165 struct nfs4_client *clp = s->se_client; in nfsd4_del_conns()
2168 spin_lock(&clp->cl_lock); in nfsd4_del_conns()
2169 while (!list_empty(&s->se_conns)) { in nfsd4_del_conns()
2170 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); in nfsd4_del_conns()
2171 list_del_init(&c->cn_persession); in nfsd4_del_conns()
2172 spin_unlock(&clp->cl_lock); in nfsd4_del_conns()
2174 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); in nfsd4_del_conns()
2177 spin_lock(&clp->cl_lock); in nfsd4_del_conns()
2179 spin_unlock(&clp->cl_lock); in nfsd4_del_conns()
2185 xa_destroy(&ses->se_slots); in __free_session()
2214 if (scanned >= sc->nr_to_scan) { in nfsd_slot_scan()
2216 list_move(&nfsd_session_list, &ses->se_all_sessions); in nfsd_slot_scan()
2221 sc->nr_scanned = scanned; in nfsd_slot_scan()
2230 new->se_client = clp; in init_session()
2233 INIT_LIST_HEAD(&new->se_conns); in init_session()
2235 atomic_set(&new->se_ref, 0); in init_session()
2236 new->se_dead = false; in init_session()
2237 new->se_cb_prog = cses->callback_prog; in init_session()
2238 new->se_cb_sec = cses->cb_sec; in init_session()
2241 new->se_cb_seq_nr[idx] = 1; in init_session()
2243 idx = hash_sessionid(&new->se_sessionid); in init_session()
2244 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); in init_session()
2245 spin_lock(&clp->cl_lock); in init_session()
2246 list_add(&new->se_perclnt, &clp->cl_sessions); in init_session()
2247 spin_unlock(&clp->cl_lock); in init_session()
2250 list_add_tail(&new->se_all_sessions, &nfsd_session_list); in init_session()
2262 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); in init_session()
2263 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); in init_session()
2275 lockdep_assert_held(&nn->client_lock); in __find_in_sessionid_hashtbl()
2280 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { in __find_in_sessionid_hashtbl()
2281 if (!memcmp(elem->se_sessionid.data, sessionid->data, in __find_in_sessionid_hashtbl()
2313 struct nfs4_client *clp = ses->se_client; in unhash_session()
2314 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in unhash_session()
2316 lockdep_assert_held(&nn->client_lock); in unhash_session()
2318 list_del(&ses->se_hash); in unhash_session()
2319 spin_lock(&ses->se_client->cl_lock); in unhash_session()
2320 list_del(&ses->se_perclnt); in unhash_session()
2321 spin_unlock(&ses->se_client->cl_lock); in unhash_session()
2323 list_del(&ses->se_all_sessions); in unhash_session()
2336 if (clid->cl_boot == (u32)nn->boot_time) in STALE_CLIENTID()
2348 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients && in alloc_client()
2349 atomic_read(&nn->nfsd_courtesy_clients) > 0) in alloc_client()
2350 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in alloc_client()
2355 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL); in alloc_client()
2356 if (clp->cl_name.data == NULL) in alloc_client()
2358 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE, in alloc_client()
2361 if (!clp->cl_ownerstr_hashtbl) in alloc_client()
2363 clp->cl_callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0); in alloc_client()
2364 if (!clp->cl_callback_wq) in alloc_client()
2368 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); in alloc_client()
2369 INIT_LIST_HEAD(&clp->cl_sessions); in alloc_client()
2370 idr_init(&clp->cl_stateids); in alloc_client()
2371 atomic_set(&clp->cl_rpc_users, 0); in alloc_client()
2372 clp->cl_cb_state = NFSD4_CB_UNKNOWN; in alloc_client()
2373 clp->cl_state = NFSD4_ACTIVE; in alloc_client()
2374 atomic_inc(&nn->nfs4_client_count); in alloc_client()
2375 atomic_set(&clp->cl_delegs_in_recall, 0); in alloc_client()
2376 INIT_LIST_HEAD(&clp->cl_idhash); in alloc_client()
2377 INIT_LIST_HEAD(&clp->cl_openowners); in alloc_client()
2378 INIT_LIST_HEAD(&clp->cl_delegations); in alloc_client()
2379 INIT_LIST_HEAD(&clp->cl_lru); in alloc_client()
2380 INIT_LIST_HEAD(&clp->cl_revoked); in alloc_client()
2382 INIT_LIST_HEAD(&clp->cl_lo_states); in alloc_client()
2384 INIT_LIST_HEAD(&clp->async_copies); in alloc_client()
2385 spin_lock_init(&clp->async_lock); in alloc_client()
2386 spin_lock_init(&clp->cl_lock); in alloc_client()
2387 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); in alloc_client()
2390 kfree(clp->cl_ownerstr_hashtbl); in alloc_client()
2392 kfree(clp->cl_name.data); in alloc_client()
2403 free_svc_cred(&clp->cl_cred); in __free_client()
2404 destroy_workqueue(clp->cl_callback_wq); in __free_client()
2405 kfree(clp->cl_ownerstr_hashtbl); in __free_client()
2406 kfree(clp->cl_name.data); in __free_client()
2407 kfree(clp->cl_nii_domain.data); in __free_client()
2408 kfree(clp->cl_nii_name.data); in __free_client()
2409 idr_destroy(&clp->cl_stateids); in __free_client()
2410 kfree(clp->cl_ra); in __free_client()
2416 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client); in drop_client()
2422 while (!list_empty(&clp->cl_sessions)) { in free_client()
2424 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, in free_client()
2426 list_del(&ses->se_perclnt); in free_client()
2427 WARN_ON_ONCE(atomic_read(&ses->se_ref)); in free_client()
2430 rpc_destroy_wait_queue(&clp->cl_cb_waitq); in free_client()
2431 if (clp->cl_nfsd_dentry) { in free_client()
2432 nfsd_client_rmdir(clp->cl_nfsd_dentry); in free_client()
2433 clp->cl_nfsd_dentry = NULL; in free_client()
2443 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in unhash_client_locked()
2446 lockdep_assert_held(&nn->client_lock); in unhash_client_locked()
2449 clp->cl_time = 0; in unhash_client_locked()
2451 if (!list_empty(&clp->cl_idhash)) { in unhash_client_locked()
2452 list_del_init(&clp->cl_idhash); in unhash_client_locked()
2453 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) in unhash_client_locked()
2454 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); in unhash_client_locked()
2456 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); in unhash_client_locked()
2458 list_del_init(&clp->cl_lru); in unhash_client_locked()
2459 spin_lock(&clp->cl_lock); in unhash_client_locked()
2461 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) { in unhash_client_locked()
2462 list_del_init(&ses->se_hash); in unhash_client_locked()
2463 list_del_init(&ses->se_all_sessions); in unhash_client_locked()
2466 spin_unlock(&clp->cl_lock); in unhash_client_locked()
2472 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in unhash_client()
2474 spin_lock(&nn->client_lock); in unhash_client()
2476 spin_unlock(&nn->client_lock); in unhash_client()
2481 int users = atomic_read(&clp->cl_rpc_users); in mark_client_expired_locked()
2494 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in __destroy_client()
2501 while (!list_empty(&clp->cl_delegations)) { in __destroy_client()
2502 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); in __destroy_client()
2504 list_add(&dp->dl_recall_lru, &reaplist); in __destroy_client()
2509 list_del_init(&dp->dl_recall_lru); in __destroy_client()
2512 while (!list_empty(&clp->cl_revoked)) { in __destroy_client()
2513 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); in __destroy_client()
2514 list_del_init(&dp->dl_recall_lru); in __destroy_client()
2515 nfs4_put_stid(&dp->dl_stid); in __destroy_client()
2517 while (!list_empty(&clp->cl_openowners)) { in __destroy_client()
2518 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); in __destroy_client()
2519 nfs4_get_stateowner(&oo->oo_owner); in __destroy_client()
2525 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i], in __destroy_client()
2528 WARN_ON_ONCE(so->so_is_open_owner); in __destroy_client()
2535 if (clp->cl_cb_conn.cb_xprt) in __destroy_client()
2536 svc_xprt_put(clp->cl_cb_conn.cb_xprt); in __destroy_client()
2537 atomic_add_unless(&nn->nfs4_client_count, -1, 0); in __destroy_client()
2552 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in inc_reclaim_complete()
2554 if (!nn->track_reclaim_completes) in inc_reclaim_complete()
2556 if (!nfsd4_find_reclaim_client(clp->cl_name, nn)) in inc_reclaim_complete()
2558 if (atomic_inc_return(&nn->nr_reclaim_complete) == in inc_reclaim_complete()
2559 nn->reclaim_str_hashtbl_size) { in inc_reclaim_complete()
2561 clp->net->ns.inum); in inc_reclaim_complete()
2575 memcpy(target->cl_verifier.data, source->data, in copy_verf()
2576 sizeof(target->cl_verifier.data)); in copy_verf()
2581 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; in copy_clid()
2582 target->cl_clientid.cl_id = source->cl_clientid.cl_id; in copy_clid()
2587 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL); in copy_cred()
2588 target->cr_raw_principal = kstrdup(source->cr_raw_principal, in copy_cred()
2590 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL); in copy_cred()
2591 if ((source->cr_principal && !target->cr_principal) || in copy_cred()
2592 (source->cr_raw_principal && !target->cr_raw_principal) || in copy_cred()
2593 (source->cr_targ_princ && !target->cr_targ_princ)) in copy_cred()
2594 return -ENOMEM; in copy_cred()
2596 target->cr_flavor = source->cr_flavor; in copy_cred()
2597 target->cr_uid = source->cr_uid; in copy_cred()
2598 target->cr_gid = source->cr_gid; in copy_cred()
2599 target->cr_group_info = source->cr_group_info; in copy_cred()
2600 get_group_info(target->cr_group_info); in copy_cred()
2601 target->cr_gss_mech = source->cr_gss_mech; in copy_cred()
2602 if (source->cr_gss_mech) in copy_cred()
2603 gss_mech_get(source->cr_gss_mech); in copy_cred()
2610 if (o1->len < o2->len) in compare_blob()
2611 return -1; in compare_blob()
2612 if (o1->len > o2->len) in compare_blob()
2614 return memcmp(o1->data, o2->data, o1->len); in compare_blob()
2620 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); in same_verf()
2626 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); in same_clid()
2633 if (g1->ngroups != g2->ngroups) in groups_equal()
2635 for (i=0; i<g1->ngroups; i++) in groups_equal()
2636 if (!gid_eq(g1->gid[i], g2->gid[i])) in groups_equal()
2645 * approximation. We also don't want to allow non-gss use of a client
2653 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); in is_gss_cred()
2661 || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) in same_creds()
2662 || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) in same_creds()
2663 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) in same_creds()
2666 if (cr1->cr_principal == cr2->cr_principal) in same_creds()
2668 if (!cr1->cr_principal || !cr2->cr_principal) in same_creds()
2670 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); in same_creds()
2675 struct svc_cred *cr = &rqstp->rq_cred; in svc_rqst_integrity_protected()
2678 if (!cr->cr_gss_mech) in svc_rqst_integrity_protected()
2680 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); in svc_rqst_integrity_protected()
2687 struct svc_cred *cr = &rqstp->rq_cred; in nfsd4_mach_creds_match()
2689 if (!cl->cl_mach_cred) in nfsd4_mach_creds_match()
2691 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) in nfsd4_mach_creds_match()
2695 if (cl->cl_cred.cr_raw_principal) in nfsd4_mach_creds_match()
2696 return 0 == strcmp(cl->cl_cred.cr_raw_principal, in nfsd4_mach_creds_match()
2697 cr->cr_raw_principal); in nfsd4_mach_creds_match()
2698 if (!cr->cr_principal) in nfsd4_mach_creds_match()
2700 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); in nfsd4_mach_creds_match()
2708 * This is opaque to client, so no need to byte-swap. Use in gen_confirm()
2712 verf[1] = (__force __be32)nn->clverifier_counter++; in gen_confirm()
2713 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); in gen_confirm()
2718 clp->cl_clientid.cl_boot = (u32)nn->boot_time; in gen_clid()
2719 clp->cl_clientid.cl_id = nn->clientid_counter++; in gen_clid()
2728 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); in find_stateid_locked()
2729 if (!ret || !ret->sc_type) in find_stateid_locked()
2740 spin_lock(&cl->cl_lock); in find_stateid_by_type()
2743 if ((s->sc_status & ~ok_states) == 0 && in find_stateid_by_type()
2744 (typemask & s->sc_type)) in find_stateid_by_type()
2745 refcount_inc(&s->sc_count); in find_stateid_by_type()
2749 spin_unlock(&cl->cl_lock); in find_stateid_by_type()
2786 struct inode *inode = file_inode(m->file); in client_info_show()
2793 return -ENXIO; in client_info_show()
2794 memcpy(&clid, &clp->cl_clientid, sizeof(clid)); in client_info_show()
2796 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr); in client_info_show()
2798 if (clp->cl_state == NFSD4_COURTESY) in client_info_show()
2800 else if (clp->cl_state == NFSD4_EXPIRABLE) in client_info_show()
2802 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) in client_info_show()
2807 ktime_get_boottime_seconds() - clp->cl_time); in client_info_show()
2809 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len); in client_info_show()
2810 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion); in client_info_show()
2811 if (clp->cl_nii_domain.data) { in client_info_show()
2813 seq_quote_mem(m, clp->cl_nii_domain.data, in client_info_show()
2814 clp->cl_nii_domain.len); in client_info_show()
2816 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); in client_info_show()
2818 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); in client_info_show()
2820 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state)); in client_info_show()
2821 seq_printf(m, "callback address: \"%pISpc\"\n", &clp->cl_cb_conn.cb_addr); in client_info_show()
2822 seq_printf(m, "admin-revoked states: %d\n", in client_info_show()
2823 atomic_read(&clp->cl_admin_revoked)); in client_info_show()
2824 spin_lock(&clp->cl_lock); in client_info_show()
2826 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) in client_info_show()
2827 seq_printf(m, " %u", ses->se_fchannel.maxreqs); in client_info_show()
2829 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) in client_info_show()
2830 seq_printf(m, " %u", ses->se_target_maxslots); in client_info_show()
2831 spin_unlock(&clp->cl_lock); in client_info_show()
2842 __acquires(&clp->cl_lock) in states_start()
2844 struct nfs4_client *clp = s->private; in states_start()
2848 spin_lock(&clp->cl_lock); in states_start()
2849 ret = idr_get_next_ul(&clp->cl_stateids, &id); in states_start()
2856 struct nfs4_client *clp = s->private; in states_next()
2862 ret = idr_get_next_ul(&clp->cl_stateids, &id); in states_next()
2868 __releases(&clp->cl_lock) in states_stop()
2870 struct nfs4_client *clp = s->private; in states_stop()
2872 spin_unlock(&clp->cl_lock); in states_stop()
2877 seq_printf(s, "filename: \"%pD2\"", f->nf_file); in nfs4_show_fname()
2882 struct inode *inode = file_inode(f->nf_file); in nfs4_show_superblock()
2885 MAJOR(inode->i_sb->s_dev), in nfs4_show_superblock()
2886 MINOR(inode->i_sb->s_dev), in nfs4_show_superblock()
2887 inode->i_ino); in nfs4_show_superblock()
2893 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); in nfs4_show_owner()
2898 seq_printf(s, "0x%.8x", stid->si_generation); in nfs4_show_stateid()
2899 seq_printf(s, "%12phN", &stid->si_opaque); in nfs4_show_stateid()
2911 oo = ols->st_stateowner; in nfs4_show_open()
2912 nf = st->sc_file; in nfs4_show_open()
2914 seq_puts(s, "- "); in nfs4_show_open()
2915 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_open()
2918 access = bmap_to_share_mode(ols->st_access_bmap); in nfs4_show_open()
2919 deny = bmap_to_share_mode(ols->st_deny_bmap); in nfs4_show_open()
2922 access & NFS4_SHARE_ACCESS_READ ? "r" : "-", in nfs4_show_open()
2923 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); in nfs4_show_open()
2925 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", in nfs4_show_open()
2926 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); in nfs4_show_open()
2929 spin_lock(&nf->fi_lock); in nfs4_show_open()
2937 spin_unlock(&nf->fi_lock); in nfs4_show_open()
2941 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) in nfs4_show_open()
2942 seq_puts(s, ", admin-revoked"); in nfs4_show_open()
2955 oo = ols->st_stateowner; in nfs4_show_lock()
2956 nf = st->sc_file; in nfs4_show_lock()
2958 seq_puts(s, "- "); in nfs4_show_lock()
2959 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_lock()
2962 spin_lock(&nf->fi_lock); in nfs4_show_lock()
2979 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) in nfs4_show_lock()
2980 seq_puts(s, ", admin-revoked"); in nfs4_show_lock()
2982 spin_unlock(&nf->fi_lock); in nfs4_show_lock()
3008 nf = st->sc_file; in nfs4_show_deleg()
3010 seq_puts(s, "- "); in nfs4_show_deleg()
3011 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_deleg()
3014 seq_printf(s, "access: %s", nfs4_show_deleg_type(ds->dl_type)); in nfs4_show_deleg()
3018 spin_lock(&nf->fi_lock); in nfs4_show_deleg()
3019 file = nf->fi_deleg_file; in nfs4_show_deleg()
3026 spin_unlock(&nf->fi_lock); in nfs4_show_deleg()
3027 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) in nfs4_show_deleg()
3028 seq_puts(s, ", admin-revoked"); in nfs4_show_deleg()
3040 seq_puts(s, "- "); in nfs4_show_layout()
3041 nfs4_show_stateid(s, &st->sc_stateid); in nfs4_show_layout()
3046 spin_lock(&ls->ls_stid.sc_file->fi_lock); in nfs4_show_layout()
3047 file = ls->ls_file; in nfs4_show_layout()
3054 spin_unlock(&ls->ls_stid.sc_file->fi_lock); in nfs4_show_layout()
3055 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) in nfs4_show_layout()
3056 seq_puts(s, ", admin-revoked"); in nfs4_show_layout()
3066 switch (st->sc_type) { in states_show()
3096 return -ENXIO; in client_states_open()
3101 s = file->private_data; in client_states_open()
3102 s->private = clp; in client_states_open()
3108 struct seq_file *m = file->private_data; in client_opens_release()
3109 struct nfs4_client *clp = m->private; in client_opens_release()
3131 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in force_expire_client()
3134 trace_nfsd_clid_admin_expired(&clp->cl_clientid); in force_expire_client()
3136 spin_lock(&nn->client_lock); in force_expire_client()
3137 clp->cl_time = 0; in force_expire_client()
3138 spin_unlock(&nn->client_lock); in force_expire_client()
3140 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0); in force_expire_client()
3141 spin_lock(&nn->client_lock); in force_expire_client()
3142 already_expired = list_empty(&clp->cl_lru); in force_expire_client()
3145 spin_unlock(&nn->client_lock); in force_expire_client()
3150 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL); in force_expire_client()
3163 return -EINVAL; in client_ctl_write()
3166 return -ENXIO; in client_ctl_write()
3189 switch (task->tk_status) { in nfsd4_cb_recall_any_done()
3190 case -NFS4ERR_DELAY: in nfsd4_cb_recall_any_done()
3201 struct nfs4_client *clp = cb->cb_clp; in nfsd4_cb_recall_any_release()
3214 trace_nfsd_cb_getattr_done(&dp->dl_stid.sc_stateid, task); in nfsd4_cb_getattr_done()
3215 ncf->ncf_cb_status = task->tk_status; in nfsd4_cb_getattr_done()
3216 switch (task->tk_status) { in nfsd4_cb_getattr_done()
3217 case -NFS4ERR_DELAY: in nfsd4_cb_getattr_done()
3233 nfs4_put_stid(&dp->dl_stid); in nfsd4_cb_getattr_release()
3253 if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &ncf->ncf_getattr.cb_flags)) in nfs4_cb_getattr()
3257 ncf->ncf_cb_status = NFS4ERR_IO; in nfs4_cb_getattr()
3260 set_bit(NFSD4_CALLBACK_WAKE, &ncf->ncf_getattr.cb_flags); in nfs4_cb_getattr()
3262 refcount_inc(&dp->dl_stid.sc_count); in nfs4_cb_getattr()
3263 nfsd4_run_cb(&ncf->ncf_getattr); in nfs4_cb_getattr()
3280 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); in create_client()
3286 kref_init(&clp->cl_nfsdfs.cl_ref); in create_client()
3287 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); in create_client()
3288 clp->cl_time = ktime_get_boottime_seconds(); in create_client()
3290 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); in create_client()
3291 clp->cl_cb_session = NULL; in create_client()
3292 clp->net = net; in create_client()
3293 clp->cl_nfsd_dentry = nfsd_client_mkdir( in create_client()
3294 nn, &clp->cl_nfsdfs, in create_client()
3295 clp->cl_clientid.cl_id - nn->clientid_base, in create_client()
3297 clp->cl_nfsd_info_dentry = dentries[0]; in create_client()
3298 if (!clp->cl_nfsd_dentry) { in create_client()
3302 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL); in create_client()
3303 if (!clp->cl_ra) { in create_client()
3307 clp->cl_ra_time = 0; in create_client()
3308 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops, in create_client()
3316 struct rb_node **new = &(root->rb_node), *parent = NULL; in add_clp_to_name_tree()
3323 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) in add_clp_to_name_tree()
3324 new = &((*new)->rb_left); in add_clp_to_name_tree()
3326 new = &((*new)->rb_right); in add_clp_to_name_tree()
3329 rb_link_node(&new_clp->cl_namenode, parent, new); in add_clp_to_name_tree()
3330 rb_insert_color(&new_clp->cl_namenode, root); in add_clp_to_name_tree()
3337 struct rb_node *node = root->rb_node; in find_clp_in_name_tree()
3342 cmp = compare_blob(&clp->cl_name, name); in find_clp_in_name_tree()
3344 node = node->rb_left; in find_clp_in_name_tree()
3346 node = node->rb_right; in find_clp_in_name_tree()
3357 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in add_to_unconfirmed()
3359 lockdep_assert_held(&nn->client_lock); in add_to_unconfirmed()
3361 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); in add_to_unconfirmed()
3362 add_clp_to_name_tree(clp, &nn->unconf_name_tree); in add_to_unconfirmed()
3363 idhashval = clientid_hashval(clp->cl_clientid.cl_id); in add_to_unconfirmed()
3364 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); in add_to_unconfirmed()
3371 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); in move_to_confirmed()
3372 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); in move_to_confirmed()
3374 lockdep_assert_held(&nn->client_lock); in move_to_confirmed()
3376 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); in move_to_confirmed()
3377 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); in move_to_confirmed()
3378 add_clp_to_name_tree(clp, &nn->conf_name_tree); in move_to_confirmed()
3379 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); in move_to_confirmed()
3380 trace_nfsd_clid_confirmed(&clp->cl_clientid); in move_to_confirmed()
3388 unsigned int idhashval = clientid_hashval(clid->cl_id); in find_client_in_id_table()
3391 if (same_clid(&clp->cl_clientid, clid)) { in find_client_in_id_table()
3392 if ((bool)clp->cl_minorversion != sessions) in find_client_in_id_table()
3404 struct list_head *tbl = nn->conf_id_hashtbl; in find_confirmed_client()
3406 lockdep_assert_held(&nn->client_lock); in find_confirmed_client()
3413 struct list_head *tbl = nn->unconf_id_hashtbl; in find_unconfirmed_client()
3415 lockdep_assert_held(&nn->client_lock); in find_unconfirmed_client()
3421 return clp->cl_exchange_flags != 0; in clp_used_exchangeid()
3427 lockdep_assert_held(&nn->client_lock); in find_confirmed_client_by_name()
3428 return find_clp_in_name_tree(name, &nn->conf_name_tree); in find_confirmed_client_by_name()
3434 lockdep_assert_held(&nn->client_lock); in find_unconfirmed_client_by_name()
3435 return find_clp_in_name_tree(name, &nn->unconf_name_tree); in find_unconfirmed_client_by_name()
3441 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; in gen_callback()
3447 if (se->se_callback_netid_len == 3 && in gen_callback()
3448 !memcmp(se->se_callback_netid_val, "tcp", 3)) in gen_callback()
3450 else if (se->se_callback_netid_len == 4 && in gen_callback()
3451 !memcmp(se->se_callback_netid_val, "tcp6", 4)) in gen_callback()
3456 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, in gen_callback()
3457 se->se_callback_addr_len, in gen_callback()
3458 (struct sockaddr *)&conn->cb_addr, in gen_callback()
3459 sizeof(conn->cb_addr)); in gen_callback()
3461 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) in gen_callback()
3464 if (conn->cb_addr.ss_family == AF_INET6) in gen_callback()
3465 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; in gen_callback()
3467 conn->cb_prog = se->se_callback_prog; in gen_callback()
3468 conn->cb_ident = se->se_callback_ident; in gen_callback()
3469 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); in gen_callback()
3473 conn->cb_addr.ss_family = AF_UNSPEC; in gen_callback()
3474 conn->cb_addrlen = 0; in gen_callback()
3485 struct xdr_buf *buf = resp->xdr->buf; in nfsd4_store_cache_entry()
3486 struct nfsd4_slot *slot = resp->cstate.slot; in nfsd4_store_cache_entry()
3489 dprintk("--> %s slot %p\n", __func__, slot); in nfsd4_store_cache_entry()
3491 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; in nfsd4_store_cache_entry()
3492 slot->sl_opcnt = resp->opcnt; in nfsd4_store_cache_entry()
3493 slot->sl_status = resp->cstate.status; in nfsd4_store_cache_entry()
3494 free_svc_cred(&slot->sl_cred); in nfsd4_store_cache_entry()
3495 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred); in nfsd4_store_cache_entry()
3498 slot->sl_flags &= ~NFSD4_SLOT_CACHED; in nfsd4_store_cache_entry()
3501 slot->sl_flags |= NFSD4_SLOT_CACHED; in nfsd4_store_cache_entry()
3503 base = resp->cstate.data_offset; in nfsd4_store_cache_entry()
3504 slot->sl_datalen = buf->len - base; in nfsd4_store_cache_entry()
3505 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) in nfsd4_store_cache_entry()
3514 * operation which sets resp->p and increments resp->opcnt for
3523 struct nfsd4_slot *slot = resp->cstate.slot; in nfsd4_enc_sequence_replay()
3526 op = &args->ops[resp->opcnt - 1]; in nfsd4_enc_sequence_replay()
3529 if (slot->sl_flags & NFSD4_SLOT_CACHED) in nfsd4_enc_sequence_replay()
3530 return op->status; in nfsd4_enc_sequence_replay()
3531 if (args->opcnt == 1) { in nfsd4_enc_sequence_replay()
3533 * The original operation wasn't a solo sequence--we in nfsd4_enc_sequence_replay()
3534 * always cache those--so this retry must not match the in nfsd4_enc_sequence_replay()
3537 op->status = nfserr_seq_false_retry; in nfsd4_enc_sequence_replay()
3539 op = &args->ops[resp->opcnt++]; in nfsd4_enc_sequence_replay()
3540 op->status = nfserr_retry_uncached_rep; in nfsd4_enc_sequence_replay()
3543 return op->status; in nfsd4_enc_sequence_replay()
3554 struct nfsd4_slot *slot = resp->cstate.slot; in nfsd4_replay_cache_entry()
3555 struct xdr_stream *xdr = resp->xdr; in nfsd4_replay_cache_entry()
3559 dprintk("--> %s slot %p\n", __func__, slot); in nfsd4_replay_cache_entry()
3561 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); in nfsd4_replay_cache_entry()
3565 p = xdr_reserve_space(xdr, slot->sl_datalen); in nfsd4_replay_cache_entry()
3570 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); in nfsd4_replay_cache_entry()
3573 resp->opcnt = slot->sl_opcnt; in nfsd4_replay_cache_entry()
3574 return slot->sl_status; in nfsd4_replay_cache_entry()
3584 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; in nfsd4_set_ex_flags()
3586 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; in nfsd4_set_ex_flags()
3590 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; in nfsd4_set_ex_flags()
3593 clid->flags = new->cl_exchange_flags; in nfsd4_set_ex_flags()
3600 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) { in client_has_openowners()
3601 if (!list_empty(&oo->oo_owner.so_stateids)) in client_has_openowners()
3611 || !list_empty(&clp->cl_lo_states) in client_has_state()
3613 || !list_empty(&clp->cl_delegations) in client_has_state()
3614 || !list_empty(&clp->cl_sessions) in client_has_state()
3621 if (!exid->nii_domain.data) in copy_impl_id()
3623 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL); in copy_impl_id()
3624 if (!clp->cl_nii_domain.data) in copy_impl_id()
3626 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); in copy_impl_id()
3627 if (!clp->cl_nii_name.data) in copy_impl_id()
3629 clp->cl_nii_time = exid->nii_time; in copy_impl_id()
3637 struct nfsd4_exchange_id *exid = &u->exchange_id; in nfsd4_exchange_id()
3642 nfs4_verifier verf = exid->verifier; in nfsd4_exchange_id()
3644 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; in nfsd4_exchange_id()
3650 __func__, rqstp, exid, exid->clname.len, exid->clname.data, in nfsd4_exchange_id()
3651 addr_str, exid->flags, exid->spa_how); in nfsd4_exchange_id()
3653 exid->server_impl_name = kasprintf(GFP_KERNEL, "%s %s %s %s", in nfsd4_exchange_id()
3654 utsname()->sysname, utsname()->release, in nfsd4_exchange_id()
3655 utsname()->version, utsname()->machine); in nfsd4_exchange_id()
3656 if (!exid->server_impl_name) in nfsd4_exchange_id()
3659 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) in nfsd4_exchange_id()
3662 new = create_client(exid->clname, rqstp, &verf); in nfsd4_exchange_id()
3669 switch (exid->spa_how) { in nfsd4_exchange_id()
3671 exid->spo_must_enforce[0] = 0; in nfsd4_exchange_id()
3672 exid->spo_must_enforce[1] = ( in nfsd4_exchange_id()
3673 1 << (OP_BIND_CONN_TO_SESSION - 32) | in nfsd4_exchange_id()
3674 1 << (OP_EXCHANGE_ID - 32) | in nfsd4_exchange_id()
3675 1 << (OP_CREATE_SESSION - 32) | in nfsd4_exchange_id()
3676 1 << (OP_DESTROY_SESSION - 32) | in nfsd4_exchange_id()
3677 1 << (OP_DESTROY_CLIENTID - 32)); in nfsd4_exchange_id()
3679 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) | in nfsd4_exchange_id()
3684 exid->spo_must_allow[1] &= ( in nfsd4_exchange_id()
3685 1 << (OP_TEST_STATEID - 32) | in nfsd4_exchange_id()
3686 1 << (OP_FREE_STATEID - 32)); in nfsd4_exchange_id()
3696 if (!new->cl_cred.cr_principal && in nfsd4_exchange_id()
3697 !new->cl_cred.cr_raw_principal) { in nfsd4_exchange_id()
3701 new->cl_mach_cred = true; in nfsd4_exchange_id()
3714 spin_lock(&nn->client_lock); in nfsd4_exchange_id()
3715 conf = find_confirmed_client_by_name(&exid->clname, nn); in nfsd4_exchange_id()
3717 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); in nfsd4_exchange_id()
3718 bool verfs_match = same_verf(&verf, &conf->cl_verifier); in nfsd4_exchange_id()
3738 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; in nfsd4_exchange_id()
3751 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; in nfsd4_exchange_id()
3766 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); in nfsd4_exchange_id()
3778 trace_nfsd_clid_replaced(&conf->cl_clientid); in nfsd4_exchange_id()
3780 new->cl_minorversion = cstate->minorversion; in nfsd4_exchange_id()
3781 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; in nfsd4_exchange_id()
3782 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; in nfsd4_exchange_id()
3785 new->cl_cs_slot.sl_status = nfserr_seq_misordered; in nfsd4_exchange_id()
3790 exid->clientid.cl_boot = conf->cl_clientid.cl_boot; in nfsd4_exchange_id()
3791 exid->clientid.cl_id = conf->cl_clientid.cl_id; in nfsd4_exchange_id()
3793 exid->seqid = conf->cl_cs_slot.sl_seqid + 1; in nfsd4_exchange_id()
3796 exid->nii_domain.len = sizeof("kernel.org") - 1; in nfsd4_exchange_id()
3797 exid->nii_domain.data = "kernel.org"; in nfsd4_exchange_id()
3804 exid->nii_name.len = strlen(exid->server_impl_name); in nfsd4_exchange_id()
3805 if (exid->nii_name.len > NFS4_OPAQUE_LIMIT) in nfsd4_exchange_id()
3806 exid->nii_name.len = NFS4_OPAQUE_LIMIT; in nfsd4_exchange_id()
3807 exid->nii_name.data = exid->server_impl_name; in nfsd4_exchange_id()
3809 /* just send zeros - the date is in nii_name */ in nfsd4_exchange_id()
3810 exid->nii_time.tv_sec = 0; in nfsd4_exchange_id()
3811 exid->nii_time.tv_nsec = 0; in nfsd4_exchange_id()
3814 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); in nfsd4_exchange_id()
3818 spin_unlock(&nn->client_lock); in nfsd4_exchange_id()
3823 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); in nfsd4_exchange_id()
3832 struct nfsd4_exchange_id *exid = &u->exchange_id; in nfsd4_exchange_id_release()
3834 kfree(exid->server_impl_name); in nfsd4_exchange_id_release()
3846 /* Note unsigned 32-bit arithmetic handles wraparound: */ in check_slot_seqid()
3865 slot->sl_status = nfserr; in nfsd4_cache_create_session()
3866 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); in nfsd4_cache_create_session()
3873 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); in nfsd4_replay_create_session()
3874 return slot->sl_status; in nfsd4_replay_create_session()
3896 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; in check_forechannel_attrs()
3898 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) in check_forechannel_attrs()
3900 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) in check_forechannel_attrs()
3902 ca->headerpadsz = 0; in check_forechannel_attrs()
3903 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); in check_forechannel_attrs()
3904 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); in check_forechannel_attrs()
3905 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); in check_forechannel_attrs()
3906 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, in check_forechannel_attrs()
3908 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); in check_forechannel_attrs()
3914 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3931 ca->headerpadsz = 0; in check_backchannel_attrs()
3933 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) in check_backchannel_attrs()
3935 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) in check_backchannel_attrs()
3937 ca->maxresp_cached = 0; in check_backchannel_attrs()
3938 if (ca->maxops < 2) in check_backchannel_attrs()
3946 switch (cbs->flavor) { in nfsd4_check_cb_sec()
3966 struct nfsd4_create_session *cr_ses = &u->create_session; in nfsd4_create_session()
3976 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) in nfsd4_create_session()
3978 status = nfsd4_check_cb_sec(&cr_ses->cb_sec); in nfsd4_create_session()
3981 status = check_forechannel_attrs(&cr_ses->fore_channel, nn); in nfsd4_create_session()
3984 status = check_backchannel_attrs(&cr_ses->back_channel); in nfsd4_create_session()
3988 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); in nfsd4_create_session()
3995 spin_lock(&nn->client_lock); in nfsd4_create_session()
3997 /* RFC 8881 Section 18.36.4 Phase 1: Client record look-up. */ in nfsd4_create_session()
3998 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); in nfsd4_create_session()
3999 conf = find_confirmed_client(&cr_ses->clientid, true, nn); in nfsd4_create_session()
4007 cs_slot = &conf->cl_cs_slot; in nfsd4_create_session()
4010 cs_slot = &unconf->cl_cs_slot; in nfsd4_create_session()
4013 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); in nfsd4_create_session()
4016 cs_slot->sl_seqid++; in nfsd4_create_session()
4017 cr_ses->seqid = cs_slot->sl_seqid; in nfsd4_create_session()
4036 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || in nfsd4_create_session()
4037 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { in nfsd4_create_session()
4044 old = find_confirmed_client_by_name(&unconf->cl_name, nn); in nfsd4_create_session()
4049 trace_nfsd_clid_replaced(&old->cl_clientid); in nfsd4_create_session()
4058 cr_ses->flags &= ~SESSION4_PERSIST; in nfsd4_create_session()
4060 cr_ses->flags &= ~SESSION4_RDMA; in nfsd4_create_session()
4062 cr_ses->back_channel.maxreqs = new->se_cb_highest_slot + 1; in nfsd4_create_session()
4067 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, in nfsd4_create_session()
4072 spin_unlock(&nn->client_lock); in nfsd4_create_session()
4074 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); in nfsd4_create_session()
4088 cs_slot->sl_seqid--; in nfsd4_create_session()
4089 cr_ses->seqid = cs_slot->sl_seqid; in nfsd4_create_session()
4095 spin_unlock(&nn->client_lock); in nfsd4_create_session()
4121 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl; in nfsd4_backchannel_ctl()
4122 struct nfsd4_session *session = cstate->session; in nfsd4_backchannel_ctl()
4126 status = nfsd4_check_cb_sec(&bc->bc_cb_sec); in nfsd4_backchannel_ctl()
4129 spin_lock(&nn->client_lock); in nfsd4_backchannel_ctl()
4130 session->se_cb_prog = bc->bc_cb_program; in nfsd4_backchannel_ctl()
4131 session->se_cb_sec = bc->bc_cb_sec; in nfsd4_backchannel_ctl()
4132 spin_unlock(&nn->client_lock); in nfsd4_backchannel_ctl()
4134 nfsd4_probe_callback(session->se_client); in nfsd4_backchannel_ctl()
4143 list_for_each_entry(c, &s->se_conns, cn_persession) { in __nfsd4_find_conn()
4144 if (c->cn_xprt == xpt) { in __nfsd4_find_conn()
4154 struct nfs4_client *clp = session->se_client; in nfsd4_match_existing_connection()
4155 struct svc_xprt *xpt = rqst->rq_xprt; in nfsd4_match_existing_connection()
4160 spin_lock(&clp->cl_lock); in nfsd4_match_existing_connection()
4164 else if (req == c->cn_flags) in nfsd4_match_existing_connection()
4167 c->cn_flags != NFS4_CDFC4_BACK) in nfsd4_match_existing_connection()
4170 c->cn_flags != NFS4_CDFC4_FORE) in nfsd4_match_existing_connection()
4174 spin_unlock(&clp->cl_lock); in nfsd4_match_existing_connection()
4184 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session; in nfsd4_bind_conn_to_session()
4193 spin_lock(&nn->client_lock); in nfsd4_bind_conn_to_session()
4194 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); in nfsd4_bind_conn_to_session()
4195 spin_unlock(&nn->client_lock); in nfsd4_bind_conn_to_session()
4199 if (!nfsd4_mach_creds_match(session->se_client, rqstp)) in nfsd4_bind_conn_to_session()
4202 bcts->dir, &conn); in nfsd4_bind_conn_to_session()
4204 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH || in nfsd4_bind_conn_to_session()
4205 bcts->dir == NFS4_CDFC4_BACK) in nfsd4_bind_conn_to_session()
4206 conn->cn_flags |= NFS4_CDFC4_BACK; in nfsd4_bind_conn_to_session()
4207 nfsd4_probe_callback(session->se_client); in nfsd4_bind_conn_to_session()
4212 status = nfsd4_map_bcts_dir(&bcts->dir); in nfsd4_bind_conn_to_session()
4215 conn = alloc_conn(rqstp, bcts->dir); in nfsd4_bind_conn_to_session()
4229 if (!cstate->session) in nfsd4_compound_in_session()
4231 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid)); in nfsd4_compound_in_session()
4238 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid; in nfsd4_destroy_session()
4252 spin_lock(&nn->client_lock); in nfsd4_destroy_session()
4257 if (!nfsd4_mach_creds_match(ses->se_client, r)) in nfsd4_destroy_session()
4263 spin_unlock(&nn->client_lock); in nfsd4_destroy_session()
4265 nfsd4_probe_callback_sync(ses->se_client); in nfsd4_destroy_session()
4267 spin_lock(&nn->client_lock); in nfsd4_destroy_session()
4272 spin_unlock(&nn->client_lock); in nfsd4_destroy_session()
4279 struct nfs4_client *clp = ses->se_client; in nfsd4_sequence_check_conn()
4284 spin_lock(&clp->cl_lock); in nfsd4_sequence_check_conn()
4285 c = __nfsd4_find_conn(new->cn_xprt, ses); in nfsd4_sequence_check_conn()
4289 if (clp->cl_mach_cred) in nfsd4_sequence_check_conn()
4292 spin_unlock(&clp->cl_lock); in nfsd4_sequence_check_conn()
4296 nfsd4_conn_lost(&new->cn_xpt_user); in nfsd4_sequence_check_conn()
4299 spin_unlock(&clp->cl_lock); in nfsd4_sequence_check_conn()
4306 struct nfsd4_compoundargs *args = rqstp->rq_argp; in nfsd4_session_too_many_ops()
4308 return args->opcnt > session->se_fchannel.maxops; in nfsd4_session_too_many_ops()
4314 struct xdr_buf *xb = &rqstp->rq_arg; in nfsd4_request_too_big()
4316 return xb->len > session->se_fchannel.maxreq_sz; in nfsd4_request_too_big()
4322 struct nfsd4_compoundargs *argp = rqstp->rq_argp; in replay_matches_cache()
4324 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) != in replay_matches_cache()
4325 (bool)seq->cachethis) in replay_matches_cache()
4331 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status) in replay_matches_cache()
4338 if (slot->sl_opcnt > argp->opcnt) in replay_matches_cache()
4341 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) in replay_matches_cache()
4356 struct nfsd4_sequence *seq = &u->sequence; in nfsd4_sequence()
4357 struct nfsd4_compoundres *resp = rqstp->rq_resp; in nfsd4_sequence()
4358 struct xdr_stream *xdr = resp->xdr; in nfsd4_sequence()
4368 if (resp->opcnt != 1) in nfsd4_sequence()
4379 spin_lock(&nn->client_lock); in nfsd4_sequence()
4380 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); in nfsd4_sequence()
4383 clp = session->se_client; in nfsd4_sequence()
4394 if (seq->slotid >= session->se_fchannel.maxreqs) in nfsd4_sequence()
4397 slot = xa_load(&session->se_slots, seq->slotid); in nfsd4_sequence()
4398 dprintk("%s: slotid %d\n", __func__, seq->slotid); in nfsd4_sequence()
4401 status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags); in nfsd4_sequence()
4404 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) in nfsd4_sequence()
4409 cstate->slot = slot; in nfsd4_sequence()
4410 cstate->session = session; in nfsd4_sequence()
4411 cstate->clp = clp; in nfsd4_sequence()
4412 /* Return the cached reply status and set cstate->status in nfsd4_sequence()
4415 cstate->status = nfserr_replay_cache; in nfsd4_sequence()
4426 if (session->se_target_maxslots < session->se_fchannel.maxreqs && in nfsd4_sequence()
4427 slot->sl_generation == session->se_slot_gen && in nfsd4_sequence()
4428 seq->maxslots <= session->se_target_maxslots) in nfsd4_sequence()
4430 free_session_slots(session, session->se_target_maxslots); in nfsd4_sequence()
4432 buflen = (seq->cachethis) ? in nfsd4_sequence()
4433 session->se_fchannel.maxresp_cached : in nfsd4_sequence()
4434 session->se_fchannel.maxresp_sz; in nfsd4_sequence()
4435 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : in nfsd4_sequence()
4437 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) in nfsd4_sequence()
4443 slot->sl_seqid = seq->seqid; in nfsd4_sequence()
4444 slot->sl_flags &= ~NFSD4_SLOT_REUSED; in nfsd4_sequence()
4445 slot->sl_flags |= NFSD4_SLOT_INUSE; in nfsd4_sequence()
4446 slot->sl_generation = session->se_slot_gen; in nfsd4_sequence()
4447 if (seq->cachethis) in nfsd4_sequence()
4448 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; in nfsd4_sequence()
4450 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; in nfsd4_sequence()
4452 cstate->slot = slot; in nfsd4_sequence()
4453 cstate->session = session; in nfsd4_sequence()
4454 cstate->clp = clp; in nfsd4_sequence()
4459 * fairly quick growth without grossly over-shooting what in nfsd4_sequence()
4462 if (seq->slotid == session->se_fchannel.maxreqs - 1 && in nfsd4_sequence()
4463 session->se_target_maxslots >= session->se_fchannel.maxreqs && in nfsd4_sequence()
4464 session->se_fchannel.maxreqs < NFSD_MAX_SLOTS_PER_SESSION) { in nfsd4_sequence()
4465 int s = session->se_fchannel.maxreqs; in nfsd4_sequence()
4475 slot = nfsd4_alloc_slot(&session->se_fchannel, s, in nfsd4_sequence()
4477 prev_slot = xa_load(&session->se_slots, s); in nfsd4_sequence()
4479 slot->sl_seqid = xa_to_value(prev_slot); in nfsd4_sequence()
4480 slot->sl_flags |= NFSD4_SLOT_REUSED; in nfsd4_sequence()
4483 !xa_is_err(xa_store(&session->se_slots, s, slot, in nfsd4_sequence()
4486 session->se_fchannel.maxreqs = s; in nfsd4_sequence()
4487 atomic_add(s - session->se_target_maxslots, in nfsd4_sequence()
4489 session->se_target_maxslots = s; in nfsd4_sequence()
4494 } while (slot && --cnt > 0); in nfsd4_sequence()
4498 seq->maxslots = max(session->se_target_maxslots, seq->maxslots); in nfsd4_sequence()
4499 seq->target_maxslots = session->se_target_maxslots; in nfsd4_sequence()
4501 switch (clp->cl_cb_state) { in nfsd4_sequence()
4503 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; in nfsd4_sequence()
4506 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; in nfsd4_sequence()
4509 seq->status_flags = 0; in nfsd4_sequence()
4511 if (!list_empty(&clp->cl_revoked)) in nfsd4_sequence()
4512 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; in nfsd4_sequence()
4513 if (atomic_read(&clp->cl_admin_revoked)) in nfsd4_sequence()
4514 seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED; in nfsd4_sequence()
4519 spin_unlock(&nn->client_lock); in nfsd4_sequence()
4529 struct nfsd4_compound_state *cs = &resp->cstate; in nfsd4_sequence_done()
4532 if (cs->status != nfserr_replay_cache) { in nfsd4_sequence_done()
4534 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; in nfsd4_sequence_done()
4537 nfsd4_put_session(cs->session); in nfsd4_sequence_done()
4538 } else if (cs->clp) in nfsd4_sequence_done()
4539 put_client_renew(cs->clp); in nfsd4_sequence_done()
4547 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid; in nfsd4_destroy_clientid()
4553 spin_lock(&nn->client_lock); in nfsd4_destroy_clientid()
4554 unconf = find_unconfirmed_client(&dc->clientid, true, nn); in nfsd4_destroy_clientid()
4555 conf = find_confirmed_client(&dc->clientid, true, nn); in nfsd4_destroy_clientid()
4578 trace_nfsd_clid_destroyed(&clp->cl_clientid); in nfsd4_destroy_clientid()
4581 spin_unlock(&nn->client_lock); in nfsd4_destroy_clientid()
4591 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete; in nfsd4_reclaim_complete()
4592 struct nfs4_client *clp = cstate->clp; in nfsd4_reclaim_complete()
4595 if (rc->rca_one_fs) { in nfsd4_reclaim_complete()
4596 if (!cstate->current_fh.fh_dentry) in nfsd4_reclaim_complete()
4606 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) in nfsd4_reclaim_complete()
4621 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid); in nfsd4_reclaim_complete()
4632 struct nfsd4_setclientid *setclid = &u->setclientid; in nfsd4_setclientid()
4633 struct xdr_netobj clname = setclid->se_name; in nfsd4_setclientid()
4634 nfs4_verifier clverifier = setclid->se_verf; in nfsd4_setclientid()
4643 spin_lock(&nn->client_lock); in nfsd4_setclientid()
4649 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { in nfsd4_setclientid()
4658 if (same_verf(&conf->cl_verifier, &clverifier)) { in nfsd4_setclientid()
4666 new->cl_minorversion = 0; in nfsd4_setclientid()
4669 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; in nfsd4_setclientid()
4670 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; in nfsd4_setclientid()
4671 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); in nfsd4_setclientid()
4675 spin_unlock(&nn->client_lock); in nfsd4_setclientid()
4679 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); in nfsd4_setclientid()
4691 &u->setclientid_confirm; in nfsd4_setclientid_confirm()
4694 nfs4_verifier confirm = setclientid_confirm->sc_confirm; in nfsd4_setclientid_confirm()
4695 clientid_t * clid = &setclientid_confirm->sc_clientid; in nfsd4_setclientid_confirm()
4702 spin_lock(&nn->client_lock); in nfsd4_setclientid_confirm()
4713 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) { in nfsd4_setclientid_confirm()
4717 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) { in nfsd4_setclientid_confirm()
4721 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { in nfsd4_setclientid_confirm()
4722 if (conf && same_verf(&confirm, &conf->cl_confirm)) { in nfsd4_setclientid_confirm()
4733 nfsd4_change_callback(conf, &unconf->cl_cb_conn); in nfsd4_setclientid_confirm()
4740 old = find_confirmed_client_by_name(&unconf->cl_name, nn); in nfsd4_setclientid_confirm()
4744 && !same_creds(&unconf->cl_cred, in nfsd4_setclientid_confirm()
4745 &old->cl_cred)) { in nfsd4_setclientid_confirm()
4754 trace_nfsd_clid_replaced(&old->cl_clientid); in nfsd4_setclientid_confirm()
4764 spin_unlock(&nn->client_lock); in nfsd4_setclientid_confirm()
4766 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); in nfsd4_setclientid_confirm()
4768 spin_lock(&nn->client_lock); in nfsd4_setclientid_confirm()
4771 spin_unlock(&nn->client_lock); in nfsd4_setclientid_confirm()
4786 refcount_set(&fp->fi_ref, 1); in nfsd4_file_init()
4787 spin_lock_init(&fp->fi_lock); in nfsd4_file_init()
4788 INIT_LIST_HEAD(&fp->fi_stateids); in nfsd4_file_init()
4789 INIT_LIST_HEAD(&fp->fi_delegations); in nfsd4_file_init()
4790 INIT_LIST_HEAD(&fp->fi_clnt_odstate); in nfsd4_file_init()
4791 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle); in nfsd4_file_init()
4792 fp->fi_deleg_file = NULL; in nfsd4_file_init()
4793 fp->fi_rdeleg_file = NULL; in nfsd4_file_init()
4794 fp->fi_had_conflict = false; in nfsd4_file_init()
4795 fp->fi_share_deny = 0; in nfsd4_file_init()
4796 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); in nfsd4_file_init()
4797 memset(fp->fi_access, 0, sizeof(fp->fi_access)); in nfsd4_file_init()
4798 fp->fi_aliased = false; in nfsd4_file_init()
4799 fp->fi_inode = d_inode(fh->fh_dentry); in nfsd4_file_init()
4801 INIT_LIST_HEAD(&fp->fi_lo_states); in nfsd4_file_init()
4802 atomic_set(&fp->fi_lo_recalls, 0); in nfsd4_file_init()
4857 return -ENOMEM; in nfsd4_init_slabs()
4863 struct nfsd_net *nn = shrink->private_data; in nfsd4_state_shrinker_count()
4866 count = atomic_read(&nn->nfsd_courtesy_clients); in nfsd4_state_shrinker_count()
4870 queue_work(laundry_wq, &nn->nfsd_shrinker_work); in nfsd4_state_shrinker_count()
4886 nn->nfsd4_lease = 90; /* default lease time */ in nfsd4_init_leases_net()
4887 nn->nfsd4_grace = 90; in nfsd4_init_leases_net()
4888 nn->somebody_reclaimed = false; in nfsd4_init_leases_net()
4889 nn->track_reclaim_completes = false; in nfsd4_init_leases_net()
4890 nn->clverifier_counter = get_random_u32(); in nfsd4_init_leases_net()
4891 nn->clientid_base = get_random_u32(); in nfsd4_init_leases_net()
4892 nn->clientid_counter = nn->clientid_base + 1; in nfsd4_init_leases_net()
4893 nn->s2s_cp_cl_id = nn->clientid_counter++; in nfsd4_init_leases_net()
4895 atomic_set(&nn->nfs4_client_count, 0); in nfsd4_init_leases_net()
4899 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB); in nfsd4_init_leases_net()
4901 atomic_set(&nn->nfsd_courtesy_clients, 0); in nfsd4_init_leases_net()
4912 rp->rp_status = nfserr_serverfault; in init_nfs4_replay()
4913 rp->rp_buflen = 0; in init_nfs4_replay()
4914 rp->rp_buf = rp->rp_ibuf; in init_nfs4_replay()
4915 rp->rp_locked = RP_UNLOCKED; in init_nfs4_replay()
4922 wait_var_event(&so->so_replay.rp_locked, in nfsd4_cstate_assign_replay()
4923 cmpxchg(&so->so_replay.rp_locked, in nfsd4_cstate_assign_replay()
4925 if (so->so_replay.rp_locked == RP_UNHASHED) in nfsd4_cstate_assign_replay()
4926 return -EAGAIN; in nfsd4_cstate_assign_replay()
4927 cstate->replay_owner = nfs4_get_stateowner(so); in nfsd4_cstate_assign_replay()
4934 struct nfs4_stateowner *so = cstate->replay_owner; in nfsd4_cstate_clear_replay()
4937 cstate->replay_owner = NULL; in nfsd4_cstate_clear_replay()
4938 store_release_wake_up(&so->so_replay.rp_locked, RP_UNLOCKED); in nfsd4_cstate_clear_replay()
4951 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL); in alloc_stateowner()
4952 if (!sop->so_owner.data) { in alloc_stateowner()
4957 INIT_LIST_HEAD(&sop->so_stateids); in alloc_stateowner()
4958 sop->so_client = clp; in alloc_stateowner()
4959 init_nfs4_replay(&sop->so_replay); in alloc_stateowner()
4960 atomic_set(&sop->so_count, 1); in alloc_stateowner()
4966 lockdep_assert_held(&clp->cl_lock); in hash_openowner()
4968 list_add(&oo->oo_owner.so_strhash, in hash_openowner()
4969 &clp->cl_ownerstr_hashtbl[strhashval]); in hash_openowner()
4970 list_add(&oo->oo_perclient, &clp->cl_openowners); in hash_openowner()
4994 struct nfs4_openowner *oo = open->op_openowner; in nfsd4_find_existing_open()
4996 lockdep_assert_held(&fp->fi_lock); in nfsd4_find_existing_open()
4998 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { in nfsd4_find_existing_open()
5000 if (local->st_stateowner->so_is_open_owner == 0) in nfsd4_find_existing_open()
5002 if (local->st_stateowner != &oo->oo_owner) in nfsd4_find_existing_open()
5004 if (local->st_stid.sc_type == SC_TYPE_OPEN && in nfsd4_find_existing_open()
5005 !local->st_stid.sc_status) { in nfsd4_find_existing_open()
5007 refcount_inc(&ret->st_stid.sc_count); in nfsd4_find_existing_open()
5015 __releases(&s->sc_client->cl_lock) in nfsd4_drop_revoked_stid()
5017 struct nfs4_client *cl = s->sc_client; in nfsd4_drop_revoked_stid()
5023 switch (s->sc_type) { in nfsd4_drop_revoked_stid()
5028 spin_unlock(&cl->cl_lock); in nfsd4_drop_revoked_stid()
5034 spin_unlock(&cl->cl_lock); in nfsd4_drop_revoked_stid()
5040 list_del_init(&dp->dl_recall_lru); in nfsd4_drop_revoked_stid()
5041 spin_unlock(&cl->cl_lock); in nfsd4_drop_revoked_stid()
5045 spin_unlock(&cl->cl_lock); in nfsd4_drop_revoked_stid()
5053 * that it can forget an admin-revoked stateid. in nfsd40_drop_revoked_stid()
5060 if (cl->cl_minorversion == 0) { in nfsd40_drop_revoked_stid()
5063 spin_lock(&cl->cl_lock); in nfsd40_drop_revoked_stid()
5068 spin_unlock(&cl->cl_lock); in nfsd40_drop_revoked_stid()
5077 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) in nfsd4_verify_open_stid()
5079 else if (s->sc_status & SC_STATUS_REVOKED) in nfsd4_verify_open_stid()
5081 else if (s->sc_status & SC_STATUS_CLOSED) in nfsd4_verify_open_stid()
5092 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX); in nfsd4_lock_ol_stateid()
5093 ret = nfsd4_verify_open_stid(&stp->st_stid); in nfsd4_lock_ol_stateid()
5095 nfsd40_drop_revoked_stid(stp->st_stid.sc_client, in nfsd4_lock_ol_stateid()
5096 &stp->st_stid.sc_stateid); in nfsd4_lock_ol_stateid()
5099 mutex_unlock(&stp->st_mutex); in nfsd4_lock_ol_stateid()
5108 spin_lock(&fp->fi_lock); in nfsd4_find_and_lock_existing_open()
5110 spin_unlock(&fp->fi_lock); in nfsd4_find_and_lock_existing_open()
5113 nfs4_put_stid(&stp->st_stid); in nfsd4_find_and_lock_existing_open()
5122 struct nfs4_client *clp = cstate->clp; in find_or_alloc_open_stateowner()
5126 spin_lock(&clp->cl_lock); in find_or_alloc_open_stateowner()
5130 spin_unlock(&clp->cl_lock); in find_or_alloc_open_stateowner()
5133 spin_unlock(&clp->cl_lock); in find_or_alloc_open_stateowner()
5135 if (oo && !(oo->oo_flags & NFS4_OO_CONFIRMED)) { in find_or_alloc_open_stateowner()
5142 nfs4_free_stateowner(&new->oo_owner); in find_or_alloc_open_stateowner()
5146 new = alloc_stateowner(openowner_slab, &open->op_owner, clp); in find_or_alloc_open_stateowner()
5149 new->oo_owner.so_ops = &openowner_ops; in find_or_alloc_open_stateowner()
5150 new->oo_owner.so_is_open_owner = 1; in find_or_alloc_open_stateowner()
5151 new->oo_owner.so_seqid = open->op_seqid; in find_or_alloc_open_stateowner()
5152 new->oo_flags = 0; in find_or_alloc_open_stateowner()
5154 new->oo_flags |= NFS4_OO_CONFIRMED; in find_or_alloc_open_stateowner()
5155 new->oo_time = 0; in find_or_alloc_open_stateowner()
5156 new->oo_last_closed_stid = NULL; in find_or_alloc_open_stateowner()
5157 INIT_LIST_HEAD(&new->oo_close_lru); in find_or_alloc_open_stateowner()
5165 struct nfs4_openowner *oo = open->op_openowner; in init_open_stateid()
5169 stp = open->op_stp; in init_open_stateid()
5171 mutex_init(&stp->st_mutex); in init_open_stateid()
5172 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); in init_open_stateid()
5175 spin_lock(&oo->oo_owner.so_client->cl_lock); in init_open_stateid()
5176 spin_lock(&fp->fi_lock); in init_open_stateid()
5179 mutex_unlock(&stp->st_mutex); in init_open_stateid()
5188 open->op_stp = NULL; in init_open_stateid()
5189 refcount_inc(&stp->st_stid.sc_count); in init_open_stateid()
5190 stp->st_stid.sc_type = SC_TYPE_OPEN; in init_open_stateid()
5191 INIT_LIST_HEAD(&stp->st_locks); in init_open_stateid()
5192 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); in init_open_stateid()
5194 stp->st_stid.sc_file = fp; in init_open_stateid()
5195 stp->st_access_bmap = 0; in init_open_stateid()
5196 stp->st_deny_bmap = 0; in init_open_stateid()
5197 stp->st_openstp = NULL; in init_open_stateid()
5198 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); in init_open_stateid()
5199 list_add(&stp->st_perfile, &fp->fi_stateids); in init_open_stateid()
5202 spin_unlock(&fp->fi_lock); in init_open_stateid()
5203 spin_unlock(&oo->oo_owner.so_client->cl_lock); in init_open_stateid()
5207 nfs4_put_stid(&retstp->st_stid); in init_open_stateid()
5211 mutex_unlock(&stp->st_mutex); in init_open_stateid()
5226 struct nfs4_openowner *oo = openowner(s->st_stateowner); in move_to_close_lru()
5227 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, in move_to_close_lru()
5243 store_release_wake_up(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED); in move_to_close_lru()
5244 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2); in move_to_close_lru()
5247 if (s->st_stid.sc_file) { in move_to_close_lru()
5248 put_nfs4_file(s->st_stid.sc_file); in move_to_close_lru()
5249 s->st_stid.sc_file = NULL; in move_to_close_lru()
5252 spin_lock(&nn->client_lock); in move_to_close_lru()
5253 last = oo->oo_last_closed_stid; in move_to_close_lru()
5254 oo->oo_last_closed_stid = s; in move_to_close_lru()
5255 list_move_tail(&oo->oo_close_lru, &nn->close_lru); in move_to_close_lru()
5256 oo->oo_time = ktime_get_boottime_seconds(); in move_to_close_lru()
5257 spin_unlock(&nn->client_lock); in move_to_close_lru()
5259 nfs4_put_stid(&last->st_stid); in move_to_close_lru()
5265 struct inode *inode = d_inode(fhp->fh_dentry); in nfsd4_file_hash_lookup()
5273 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { in nfsd4_file_hash_lookup()
5274 if (refcount_inc_not_zero(&fi->fi_ref)) { in nfsd4_file_hash_lookup()
5289 * inode->i_lock prevents racing insertions from adding an entry
5295 struct inode *inode = d_inode(fhp->fh_dentry); in nfsd4_file_hash_insert()
5303 spin_lock(&inode->i_lock); in nfsd4_file_hash_insert()
5308 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { in nfsd4_file_hash_insert()
5309 if (refcount_inc_not_zero(&fi->fi_ref)) in nfsd4_file_hash_insert()
5312 fi->fi_aliased = alias_found = true; in nfsd4_file_hash_insert()
5318 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist, in nfsd4_file_hash_insert()
5323 new->fi_aliased = alias_found; in nfsd4_file_hash_insert()
5327 spin_unlock(&inode->i_lock); in nfsd4_file_hash_insert()
5334 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist, in nfsd4_file_hash_remove()
5353 spin_lock(&fp->fi_lock); in nfs4_share_conflict()
5354 if (fp->fi_share_deny & deny_type) in nfs4_share_conflict()
5356 spin_unlock(&fp->fi_lock); in nfs4_share_conflict()
5365 return ctx && !list_empty_careful(&ctx->flc_lease); in nfsd4_deleg_present()
5369 * nfsd_wait_for_delegreturn - wait for delegations to be returned
5371 * @inode: in-core inode of the file being waited for
5393 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, in nfsd4_cb_recall_prepare()
5396 block_delegations(&dp->dl_stid.sc_file->fi_fhandle); in nfsd4_cb_recall_prepare()
5400 * already holding inode->i_lock. in nfsd4_cb_recall_prepare()
5406 if (delegation_hashed(dp) && dp->dl_time == 0) { in nfsd4_cb_recall_prepare()
5407 dp->dl_time = ktime_get_boottime_seconds(); in nfsd4_cb_recall_prepare()
5408 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); in nfsd4_cb_recall_prepare()
5418 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task); in nfsd4_cb_recall_done()
5420 if (dp->dl_stid.sc_status) in nfsd4_cb_recall_done()
5424 switch (task->tk_status) { in nfsd4_cb_recall_done()
5427 case -NFS4ERR_DELAY: in nfsd4_cb_recall_done()
5430 case -EBADHANDLE: in nfsd4_cb_recall_done()
5431 case -NFS4ERR_BAD_STATEID: in nfsd4_cb_recall_done()
5436 if (dp->dl_retries--) { in nfsd4_cb_recall_done()
5450 nfs4_put_stid(&dp->dl_stid); in nfsd4_cb_recall_release()
5464 if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &dp->dl_recall.cb_flags)) in nfsd_break_one_deleg()
5474 refcount_inc(&dp->dl_stid.sc_count); in nfsd_break_one_deleg()
5475 queued = nfsd4_run_cb(&dp->dl_recall); in nfsd_break_one_deleg()
5478 refcount_dec(&dp->dl_stid.sc_count); in nfsd_break_one_deleg()
5485 struct nfs4_delegation *dp = (struct nfs4_delegation *) fl->c.flc_owner; in nfsd_break_deleg_cb()
5486 struct nfs4_file *fp = dp->dl_stid.sc_file; in nfsd_break_deleg_cb()
5487 struct nfs4_client *clp = dp->dl_stid.sc_client; in nfsd_break_deleg_cb()
5490 trace_nfsd_cb_recall(&dp->dl_stid); in nfsd_break_deleg_cb()
5492 dp->dl_recalled = true; in nfsd_break_deleg_cb()
5493 atomic_inc(&clp->cl_delegs_in_recall); in nfsd_break_deleg_cb()
5495 nn = net_generic(clp->net, nfsd_net_id); in nfsd_break_deleg_cb()
5496 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in nfsd_break_deleg_cb()
5504 fl->fl_break_time = 0; in nfsd_break_deleg_cb()
5506 fp->fi_had_conflict = true; in nfsd_break_deleg_cb()
5512 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
5521 struct nfs4_delegation *dl = fl->c.flc_owner; in nfsd_breaker_owns_lease()
5528 clp = *(rqst->rq_lease_breaker); in nfsd_breaker_owns_lease()
5529 return dl->dl_stid.sc_client == clp; in nfsd_breaker_owns_lease()
5536 struct nfs4_delegation *dp = (struct nfs4_delegation *) onlist->c.flc_owner; in nfsd_change_deleg_cb()
5537 struct nfs4_client *clp = dp->dl_stid.sc_client; in nfsd_change_deleg_cb()
5540 if (dp->dl_recalled) in nfsd_change_deleg_cb()
5541 atomic_dec(&clp->cl_delegs_in_recall); in nfsd_change_deleg_cb()
5544 return -EAGAIN; in nfsd_change_deleg_cb()
5557 if (seqid == so->so_seqid - 1) in nfsd4_check_seqid()
5559 if (seqid == so->so_seqid) in nfsd4_check_seqid()
5569 spin_lock(&nn->client_lock); in lookup_clientid()
5572 atomic_inc(&found->cl_rpc_users); in lookup_clientid()
5573 spin_unlock(&nn->client_lock); in lookup_clientid()
5581 if (cstate->clp) { in set_client()
5582 if (!same_clid(&cstate->clp->cl_clientid, clid)) in set_client()
5590 * set cstate->clp), so session = false: in set_client()
5592 cstate->clp = lookup_clientid(clid, false, nn); in set_client()
5593 if (!cstate->clp) in set_client()
5602 clientid_t *clientid = &open->op_clientid; in nfsd4_process_open1()
5612 open->op_file = nfsd4_alloc_file(); in nfsd4_process_open1()
5613 if (open->op_file == NULL) in nfsd4_process_open1()
5619 clp = cstate->clp; in nfsd4_process_open1()
5621 strhashval = ownerstr_hashval(&open->op_owner); in nfsd4_process_open1()
5624 open->op_openowner = oo; in nfsd4_process_open1()
5627 if (nfsd4_cstate_assign_replay(cstate, &oo->oo_owner) == -EAGAIN) { in nfsd4_process_open1()
5628 nfs4_put_stateowner(&oo->oo_owner); in nfsd4_process_open1()
5631 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); in nfsd4_process_open1()
5635 open->op_stp = nfs4_alloc_open_stateid(clp); in nfsd4_process_open1()
5636 if (!open->op_stp) in nfsd4_process_open1()
5640 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { in nfsd4_process_open1()
5641 open->op_odstate = alloc_clnt_odstate(clp); in nfsd4_process_open1()
5642 if (!open->op_odstate) in nfsd4_process_open1()
5652 if (!(flags & RD_STATE) && deleg_is_read(dp->dl_type)) in nfs4_check_delegmode()
5676 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || in nfsd4_is_deleg_cur()
5677 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; in nfsd4_is_deleg_cur()
5688 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); in nfs4_check_deleg()
5691 if (deleg->dl_stid.sc_status & SC_STATUS_ADMIN_REVOKED) { in nfs4_check_deleg()
5692 nfs4_put_stid(&deleg->dl_stid); in nfs4_check_deleg()
5696 if (deleg->dl_stid.sc_status & SC_STATUS_REVOKED) { in nfs4_check_deleg()
5697 nfs4_put_stid(&deleg->dl_stid); in nfs4_check_deleg()
5698 nfsd40_drop_revoked_stid(cl, &open->op_delegate_stateid); in nfs4_check_deleg()
5702 flags = share_access_to_flags(open->op_share_access); in nfs4_check_deleg()
5705 nfs4_put_stid(&deleg->dl_stid); in nfs4_check_deleg()
5714 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; in nfs4_check_deleg()
5740 if (!open->op_truncate) in nfsd4_truncate()
5742 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) in nfsd4_truncate()
5753 int oflag = nfs4_access_to_omode(open->op_share_access); in nfs4_get_vfs_file()
5754 int access = nfs4_access_to_access(open->op_share_access); in nfs4_get_vfs_file()
5757 spin_lock(&fp->fi_lock); in nfs4_get_vfs_file()
5763 status = nfs4_file_check_deny(fp, open->op_share_deny); in nfs4_get_vfs_file()
5766 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5770 stp, open->op_share_deny, false)) in nfs4_get_vfs_file()
5772 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5777 status = nfs4_file_get_access(fp, open->op_share_access); in nfs4_get_vfs_file()
5780 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5784 stp, open->op_share_access, true)) in nfs4_get_vfs_file()
5786 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5791 old_access_bmap = stp->st_access_bmap; in nfs4_get_vfs_file()
5792 set_access(open->op_share_access, stp); in nfs4_get_vfs_file()
5795 old_deny_bmap = stp->st_deny_bmap; in nfs4_get_vfs_file()
5796 set_deny(open->op_share_deny, stp); in nfs4_get_vfs_file()
5797 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); in nfs4_get_vfs_file()
5799 if (!fp->fi_fds[oflag]) { in nfs4_get_vfs_file()
5800 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5803 open->op_filp, &nf); in nfs4_get_vfs_file()
5807 spin_lock(&fp->fi_lock); in nfs4_get_vfs_file()
5808 if (!fp->fi_fds[oflag]) { in nfs4_get_vfs_file()
5809 fp->fi_fds[oflag] = nf; in nfs4_get_vfs_file()
5813 spin_unlock(&fp->fi_lock); in nfs4_get_vfs_file()
5817 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode, in nfs4_get_vfs_file()
5828 stp->st_access_bmap = old_access_bmap; in nfs4_get_vfs_file()
5829 nfs4_file_put_access(fp, open->op_share_access); in nfs4_get_vfs_file()
5840 unsigned char old_deny_bmap = stp->st_deny_bmap; in nfs4_upgrade_open()
5842 if (!test_access(open->op_share_access, stp)) in nfs4_upgrade_open()
5846 spin_lock(&fp->fi_lock); in nfs4_upgrade_open()
5847 status = nfs4_file_check_deny(fp, open->op_share_deny); in nfs4_upgrade_open()
5850 set_deny(open->op_share_deny, stp); in nfs4_upgrade_open()
5851 fp->fi_share_deny |= in nfs4_upgrade_open()
5852 (open->op_share_deny & NFS4_SHARE_DENY_BOTH); in nfs4_upgrade_open()
5856 stp, open->op_share_deny, false)) in nfs4_upgrade_open()
5860 spin_unlock(&fp->fi_lock); in nfs4_upgrade_open()
5874 if (clp->cl_cb_state == NFSD4_CB_UP) in nfsd4_cb_channel_good()
5881 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; in nfsd4_cb_channel_good()
5891 fl->fl_lmops = &nfsd_lease_mng_ops; in nfs4_alloc_init_lease()
5892 fl->c.flc_flags = FL_DELEG; in nfs4_alloc_init_lease()
5893 fl->c.flc_type = deleg_is_read(dp->dl_type) ? F_RDLCK : F_WRLCK; in nfs4_alloc_init_lease()
5894 fl->c.flc_owner = (fl_owner_t)dp; in nfs4_alloc_init_lease()
5895 fl->c.flc_pid = current->tgid; in nfs4_alloc_init_lease()
5896 fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; in nfs4_alloc_init_lease()
5904 struct file *f = fp->fi_deleg_file->nf_file; in nfsd4_check_conflicting_opens()
5908 writes = atomic_read(&ino->i_writecount); in nfsd4_check_conflicting_opens()
5918 if (fp->fi_aliased) in nfsd4_check_conflicting_opens()
5919 return -EAGAIN; in nfsd4_check_conflicting_opens()
5927 if (fp->fi_fds[O_WRONLY]) in nfsd4_check_conflicting_opens()
5928 writes--; in nfsd4_check_conflicting_opens()
5929 if (fp->fi_fds[O_RDWR]) in nfsd4_check_conflicting_opens()
5930 writes--; in nfsd4_check_conflicting_opens()
5932 return -EAGAIN; /* There may be non-NFSv4 writers */ in nfsd4_check_conflicting_opens()
5934 * It's possible there are non-NFSv4 write opens in progress, in nfsd4_check_conflicting_opens()
5940 spin_lock(&fp->fi_lock); in nfsd4_check_conflicting_opens()
5941 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { in nfsd4_check_conflicting_opens()
5942 if (st->st_openstp == NULL /* it's an open */ && in nfsd4_check_conflicting_opens()
5944 st->st_stid.sc_client != clp) { in nfsd4_check_conflicting_opens()
5945 spin_unlock(&fp->fi_lock); in nfsd4_check_conflicting_opens()
5946 return -EAGAIN; in nfsd4_check_conflicting_opens()
5949 spin_unlock(&fp->fi_lock); in nfsd4_check_conflicting_opens()
5972 err = nfsd_lookup_dentry(open->op_rqstp, parent, in nfsd4_verify_deleg_dentry()
5973 open->op_fname, open->op_fnamelen, in nfsd4_verify_deleg_dentry()
5977 return -EAGAIN; in nfsd4_verify_deleg_dentry()
5981 if (child != file_dentry(fp->fi_deleg_file->nf_file)) in nfsd4_verify_deleg_dentry()
5982 return -EAGAIN; in nfsd4_verify_deleg_dentry()
5996 struct inode *inode = file_inode(nf->nf_file); in nfsd4_verify_setuid_write()
5998 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) && in nfsd4_verify_setuid_write()
5999 (inode->i_mode & (S_ISUID|S_ISGID))) in nfsd4_verify_setuid_write()
6000 return -EAGAIN; in nfsd4_verify_setuid_write()
6007 return open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS; in nfsd4_want_deleg_timestamps()
6021 struct nfs4_client *clp = stp->st_stid.sc_client; in nfs4_set_delegation()
6022 struct nfs4_file *fp = stp->st_stid.sc_file; in nfs4_set_delegation()
6023 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate; in nfs4_set_delegation()
6035 if (fp->fi_had_conflict) in nfs4_set_delegation()
6036 return ERR_PTR(-EAGAIN); in nfs4_set_delegation()
6055 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { in nfs4_set_delegation()
6064 if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) { in nfs4_set_delegation()
6070 return ERR_PTR(-EAGAIN); in nfs4_set_delegation()
6076 if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) { in nfs4_set_delegation()
6078 return ERR_PTR(-EOPNOTSUPP); in nfs4_set_delegation()
6082 spin_lock(&fp->fi_lock); in nfs4_set_delegation()
6084 status = -EAGAIN; in nfs4_set_delegation()
6086 status = -EAGAIN; in nfs4_set_delegation()
6087 else if (!fp->fi_deleg_file) { in nfs4_set_delegation()
6088 fp->fi_deleg_file = nf; in nfs4_set_delegation()
6091 fp->fi_delegees = 1; in nfs4_set_delegation()
6094 fp->fi_delegees++; in nfs4_set_delegation()
6095 spin_unlock(&fp->fi_lock); in nfs4_set_delegation()
6102 status = -ENOMEM; in nfs4_set_delegation()
6111 status = kernel_setlease(fp->fi_deleg_file->nf_file, in nfs4_set_delegation()
6112 fl->c.flc_type, &fl, NULL); in nfs4_set_delegation()
6132 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file); in nfs4_set_delegation()
6136 status = -EAGAIN; in nfs4_set_delegation()
6137 if (fp->fi_had_conflict) in nfs4_set_delegation()
6141 spin_lock(&clp->cl_lock); in nfs4_set_delegation()
6142 spin_lock(&fp->fi_lock); in nfs4_set_delegation()
6144 spin_unlock(&fp->fi_lock); in nfs4_set_delegation()
6145 spin_unlock(&clp->cl_lock); in nfs4_set_delegation()
6153 kernel_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); in nfs4_set_delegation()
6155 put_clnt_odstate(dp->dl_clnt_odstate); in nfs4_set_delegation()
6156 nfs4_put_stid(&dp->dl_stid); in nfs4_set_delegation()
6164 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; in nfsd4_open_deleg_none_ext()
6165 if (status == -EAGAIN) in nfsd4_open_deleg_none_ext()
6166 open->op_why_no_deleg = WND4_CONTENTION; in nfsd4_open_deleg_none_ext()
6168 open->op_why_no_deleg = WND4_RESOURCE; in nfsd4_open_deleg_none_ext()
6169 switch (open->op_deleg_want) { in nfsd4_open_deleg_none_ext()
6175 open->op_why_no_deleg = WND4_CANCELLED; in nfsd4_open_deleg_none_ext()
6187 struct nfsd_file *nf = find_writeable_file(dp->dl_stid.sc_file); in nfs4_delegation_stat()
6194 path.mnt = currentfh->fh_export->ex_path.mnt; in nfs4_delegation_stat()
6195 path.dentry = file_dentry(nf->nf_file); in nfs4_delegation_stat()
6219 if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == in nfsd4_add_rdaccess_to_wrdeleg()
6223 fp = stp->st_stid.sc_file; in nfsd4_add_rdaccess_to_wrdeleg()
6224 spin_lock(&fp->fi_lock); in nfsd4_add_rdaccess_to_wrdeleg()
6226 fp = stp->st_stid.sc_file; in nfsd4_add_rdaccess_to_wrdeleg()
6227 fp->fi_fds[O_RDONLY] = nf; in nfsd4_add_rdaccess_to_wrdeleg()
6228 fp->fi_rdeleg_file = nf; in nfsd4_add_rdaccess_to_wrdeleg()
6229 spin_unlock(&fp->fi_lock); in nfsd4_add_rdaccess_to_wrdeleg()
6263 struct nfs4_openowner *oo = openowner(stp->st_stateowner); in nfs4_open_delegation()
6265 struct nfs4_client *clp = stp->st_stid.sc_client; in nfs4_open_delegation()
6272 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); in nfs4_open_delegation()
6273 open->op_recall = false; in nfs4_open_delegation()
6274 switch (open->op_claim_type) { in nfs4_open_delegation()
6277 open->op_recall = true; in nfs4_open_delegation()
6288 if (locks_in_grace(clp->net)) in nfs4_open_delegation()
6290 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) in nfs4_open_delegation()
6292 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE && in nfs4_open_delegation()
6293 !clp->cl_minorversion) in nfs4_open_delegation()
6303 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); in nfs4_open_delegation()
6305 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { in nfs4_open_delegation()
6306 struct file *f = dp->dl_stid.sc_file->fi_deleg_file->nf_file; in nfs4_open_delegation()
6310 nfs4_put_stid(&dp->dl_stid); in nfs4_open_delegation()
6314 open->op_delegate_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG : in nfs4_open_delegation()
6316 dp->dl_cb_fattr.ncf_cur_fsize = stat.size; in nfs4_open_delegation()
6317 dp->dl_cb_fattr.ncf_initial_cinfo = nfsd4_change_attribute(&stat); in nfs4_open_delegation()
6318 dp->dl_atime = stat.atime; in nfs4_open_delegation()
6319 dp->dl_ctime = stat.ctime; in nfs4_open_delegation()
6320 dp->dl_mtime = stat.mtime; in nfs4_open_delegation()
6321 spin_lock(&f->f_lock); in nfs4_open_delegation()
6322 f->f_mode |= FMODE_NOCMTIME; in nfs4_open_delegation()
6323 spin_unlock(&f->f_lock); in nfs4_open_delegation()
6324 trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid); in nfs4_open_delegation()
6326 open->op_delegate_type = deleg_ts && nfs4_delegation_stat(dp, currentfh, &stat) ? in nfs4_open_delegation()
6328 dp->dl_atime = stat.atime; in nfs4_open_delegation()
6329 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); in nfs4_open_delegation()
6331 nfs4_put_stid(&dp->dl_stid); in nfs4_open_delegation()
6334 open->op_delegate_type = OPEN_DELEGATE_NONE; in nfs4_open_delegation()
6335 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && in nfs4_open_delegation()
6336 open->op_delegate_type != OPEN_DELEGATE_NONE) { in nfs4_open_delegation()
6338 open->op_recall = true; in nfs4_open_delegation()
6342 if (open->op_deleg_want) in nfs4_open_delegation()
6350 if (deleg_is_write(dp->dl_type)) { in nfsd4_deleg_xgrade_none_ext()
6351 if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_READ_DELEG) { in nfsd4_deleg_xgrade_none_ext()
6352 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; in nfsd4_deleg_xgrade_none_ext()
6353 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; in nfsd4_deleg_xgrade_none_ext()
6354 } else if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG) { in nfsd4_deleg_xgrade_none_ext()
6355 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; in nfsd4_deleg_xgrade_none_ext()
6356 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; in nfsd4_deleg_xgrade_none_ext()
6368 if (!(open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION)) in open_xor_delegation()
6371 if (!deleg_is_read(open->op_delegate_type) && !deleg_is_write(open->op_delegate_type)) in open_xor_delegation()
6377 * nfsd4_process_open2 - finish open processing
6382 * If successful, (1) truncate the file if open->op_truncate was
6383 * set, (2) set open->op_stateid, (3) set open->op_delegation.
6391 struct nfsd4_compoundres *resp = rqstp->rq_resp; in nfsd4_process_open2()
6392 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; in nfsd4_process_open2()
6404 fp = nfsd4_file_hash_insert(open->op_file, current_fh); in nfsd4_process_open2()
6407 if (fp != open->op_file) { in nfsd4_process_open2()
6412 (dp->dl_stid.sc_file != fp)) { in nfsd4_process_open2()
6427 open->op_file = NULL; in nfsd4_process_open2()
6440 if (!open->op_stp) in nfsd4_process_open2()
6454 mutex_unlock(&stp->st_mutex); in nfsd4_process_open2()
6461 mutex_unlock(&stp->st_mutex); in nfsd4_process_open2()
6465 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, in nfsd4_process_open2()
6466 open->op_odstate); in nfsd4_process_open2()
6467 if (stp->st_clnt_odstate == open->op_odstate) in nfsd4_process_open2()
6468 open->op_odstate = NULL; in nfsd4_process_open2()
6471 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); in nfsd4_process_open2()
6472 mutex_unlock(&stp->st_mutex); in nfsd4_process_open2()
6474 if (nfsd4_has_session(&resp->cstate)) { in nfsd4_process_open2()
6475 if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_NO_DELEG) { in nfsd4_process_open2()
6476 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; in nfsd4_process_open2()
6477 open->op_why_no_deleg = WND4_NOT_WANTED; in nfsd4_process_open2()
6487 &resp->cstate.current_fh, current_fh); in nfsd4_process_open2()
6495 memcpy(&open->op_stateid, &zero_stateid, sizeof(open->op_stateid)); in nfsd4_process_open2()
6496 open->op_rflags |= OPEN4_RESULT_NO_OPEN_STATEID; in nfsd4_process_open2()
6501 trace_nfsd_open(&stp->st_stid.sc_stateid); in nfsd4_process_open2()
6504 if (open->op_delegate_type == OPEN_DELEGATE_NONE && dp && in nfsd4_process_open2()
6505 open->op_deleg_want) in nfsd4_process_open2()
6510 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) in nfsd4_process_open2()
6511 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; in nfsd4_process_open2()
6515 open->op_rflags |= NFS4_OPEN_RESULT_LOCKTYPE_POSIX; in nfsd4_process_open2()
6516 if (nfsd4_has_session(&resp->cstate)) in nfsd4_process_open2()
6517 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; in nfsd4_process_open2()
6518 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) in nfsd4_process_open2()
6519 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; in nfsd4_process_open2()
6522 nfs4_put_stid(&dp->dl_stid); in nfsd4_process_open2()
6524 nfs4_put_stid(&stp->st_stid); in nfsd4_process_open2()
6532 if (open->op_openowner) in nfsd4_cleanup_open_state()
6533 nfs4_put_stateowner(&open->op_openowner->oo_owner); in nfsd4_cleanup_open_state()
6534 if (open->op_file) in nfsd4_cleanup_open_state()
6535 kmem_cache_free(file_slab, open->op_file); in nfsd4_cleanup_open_state()
6536 if (open->op_stp) in nfsd4_cleanup_open_state()
6537 nfs4_put_stid(&open->op_stp->st_stid); in nfsd4_cleanup_open_state()
6538 if (open->op_odstate) in nfsd4_cleanup_open_state()
6539 kmem_cache_free(odstate_slab, open->op_odstate); in nfsd4_cleanup_open_state()
6546 clientid_t *clid = &u->renew; in nfsd4_renew()
6555 clp = cstate->clp; in nfsd4_renew()
6556 if (!list_empty(&clp->cl_delegations) in nfsd4_renew()
6557 && clp->cl_cb_state != NFSD4_CB_UP) in nfsd4_renew()
6566 if (nn->grace_ended) in nfsd4_end_grace()
6570 nn->grace_ended = true; in nfsd4_end_grace()
6587 locks_end_grace(&nn->nfsd4_manager); in nfsd4_end_grace()
6601 time64_t double_grace_period_end = nn->boot_time + in clients_still_reclaiming()
6602 2 * nn->nfsd4_lease; in clients_still_reclaiming()
6604 if (nn->track_reclaim_completes && in clients_still_reclaiming()
6605 atomic_read(&nn->nr_reclaim_complete) == in clients_still_reclaiming()
6606 nn->reclaim_str_hashtbl_size) in clients_still_reclaiming()
6608 if (!nn->somebody_reclaimed) in clients_still_reclaiming()
6610 nn->somebody_reclaimed = false; in clients_still_reclaiming()
6629 if (last_refresh < lt->cutoff) in state_expired()
6631 time_remaining = last_refresh - lt->cutoff; in state_expired()
6632 lt->new_timeo = min(lt->new_timeo, time_remaining); in state_expired()
6639 spin_lock_init(&nn->nfsd_ssc_lock); in nfsd4_ssc_init_umount_work()
6640 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list); in nfsd4_ssc_init_umount_work()
6641 init_waitqueue_head(&nn->nfsd_ssc_waitq); in nfsd4_ssc_init_umount_work()
6646 * cleanup were done, to destroy the ssc delayed unmount list.
6653 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
6654 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { in nfsd4_ssc_shutdown_umount()
6655 list_del(&ni->nsui_list); in nfsd4_ssc_shutdown_umount()
6656 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
6657 mntput(ni->nsui_vfsmount); in nfsd4_ssc_shutdown_umount()
6659 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
6661 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_shutdown_umount()
6670 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6671 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { in nfsd4_ssc_expire_umount()
6672 if (time_after(jiffies, ni->nsui_expire)) { in nfsd4_ssc_expire_umount()
6673 if (refcount_read(&ni->nsui_refcnt) > 1) in nfsd4_ssc_expire_umount()
6677 ni->nsui_busy = true; in nfsd4_ssc_expire_umount()
6678 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6679 mntput(ni->nsui_vfsmount); in nfsd4_ssc_expire_umount()
6680 spin_lock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6683 list_del(&ni->nsui_list); in nfsd4_ssc_expire_umount()
6693 wake_up_all(&nn->nfsd_ssc_waitq); in nfsd4_ssc_expire_umount()
6694 spin_unlock(&nn->nfsd_ssc_lock); in nfsd4_ssc_expire_umount()
6706 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { in nfs4_lockowner_has_blockers()
6707 nf = stp->st_stid.sc_file; in nfs4_lockowner_has_blockers()
6708 ctx = locks_inode_context(nf->fi_inode); in nfs4_lockowner_has_blockers()
6724 if (atomic_read(&clp->cl_delegs_in_recall)) in nfs4_anylock_blockers()
6726 spin_lock(&clp->cl_lock); in nfs4_anylock_blockers()
6728 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i], in nfs4_anylock_blockers()
6730 if (so->so_is_open_owner) in nfs4_anylock_blockers()
6734 spin_unlock(&clp->cl_lock); in nfs4_anylock_blockers()
6739 spin_unlock(&clp->cl_lock); in nfs4_anylock_blockers()
6751 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ? in nfs4_get_client_reaplist()
6754 spin_lock(&nn->client_lock); in nfs4_get_client_reaplist()
6755 list_for_each_safe(pos, next, &nn->client_lru) { in nfs4_get_client_reaplist()
6757 if (clp->cl_state == NFSD4_EXPIRABLE) in nfs4_get_client_reaplist()
6759 if (!state_expired(lt, clp->cl_time)) in nfs4_get_client_reaplist()
6761 if (!atomic_read(&clp->cl_rpc_users)) { in nfs4_get_client_reaplist()
6762 if (clp->cl_state == NFSD4_ACTIVE) in nfs4_get_client_reaplist()
6763 atomic_inc(&nn->nfsd_courtesy_clients); in nfs4_get_client_reaplist()
6764 clp->cl_state = NFSD4_COURTESY; in nfs4_get_client_reaplist()
6773 list_add(&clp->cl_lru, reaplist); in nfs4_get_client_reaplist()
6777 spin_unlock(&nn->client_lock); in nfs4_get_client_reaplist()
6791 spin_lock(&nn->client_lock); in nfs4_get_courtesy_client_reaplist()
6792 list_for_each_safe(pos, next, &nn->client_lru) { in nfs4_get_courtesy_client_reaplist()
6794 if (clp->cl_state == NFSD4_ACTIVE) in nfs4_get_courtesy_client_reaplist()
6799 list_add(&clp->cl_lru, reaplist); in nfs4_get_courtesy_client_reaplist()
6803 spin_unlock(&nn->client_lock); in nfs4_get_courtesy_client_reaplist()
6814 trace_nfsd_clid_purged(&clp->cl_clientid); in nfs4_process_client_reaplist()
6815 list_del_init(&clp->cl_lru); in nfs4_process_client_reaplist()
6825 spin_lock(&nn->client_lock); in nfs40_clean_admin_revoked()
6826 if (nn->nfs40_last_revoke == 0 || in nfs40_clean_admin_revoked()
6827 nn->nfs40_last_revoke > lt->cutoff) { in nfs40_clean_admin_revoked()
6828 spin_unlock(&nn->client_lock); in nfs40_clean_admin_revoked()
6831 nn->nfs40_last_revoke = 0; in nfs40_clean_admin_revoked()
6834 list_for_each_entry(clp, &nn->client_lru, cl_lru) { in nfs40_clean_admin_revoked()
6838 if (atomic_read(&clp->cl_admin_revoked) == 0) in nfs40_clean_admin_revoked()
6841 spin_lock(&clp->cl_lock); in nfs40_clean_admin_revoked()
6842 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id) in nfs40_clean_admin_revoked()
6843 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) { in nfs40_clean_admin_revoked()
6844 refcount_inc(&stid->sc_count); in nfs40_clean_admin_revoked()
6845 spin_unlock(&nn->client_lock); in nfs40_clean_admin_revoked()
6846 /* this function drops ->cl_lock */ in nfs40_clean_admin_revoked()
6849 spin_lock(&nn->client_lock); in nfs40_clean_admin_revoked()
6852 spin_unlock(&clp->cl_lock); in nfs40_clean_admin_revoked()
6854 spin_unlock(&nn->client_lock); in nfs40_clean_admin_revoked()
6866 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease, in nfs4_laundromat()
6867 .new_timeo = nn->nfsd4_lease in nfs4_laundromat()
6879 spin_lock(&nn->s2s_cp_lock); in nfs4_laundromat()
6880 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { in nfs4_laundromat()
6882 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID && in nfs4_laundromat()
6883 state_expired(<, cps->cpntf_time)) in nfs4_laundromat()
6886 spin_unlock(&nn->s2s_cp_lock); in nfs4_laundromat()
6894 list_for_each_safe(pos, next, &nn->del_recall_lru) { in nfs4_laundromat()
6896 if (!state_expired(<, dp->dl_time)) in nfs4_laundromat()
6898 refcount_inc(&dp->dl_stid.sc_count); in nfs4_laundromat()
6900 list_add(&dp->dl_recall_lru, &reaplist); in nfs4_laundromat()
6906 list_del_init(&dp->dl_recall_lru); in nfs4_laundromat()
6910 spin_lock(&nn->client_lock); in nfs4_laundromat()
6911 while (!list_empty(&nn->close_lru)) { in nfs4_laundromat()
6912 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, in nfs4_laundromat()
6914 if (!state_expired(<, oo->oo_time)) in nfs4_laundromat()
6916 list_del_init(&oo->oo_close_lru); in nfs4_laundromat()
6917 stp = oo->oo_last_closed_stid; in nfs4_laundromat()
6918 oo->oo_last_closed_stid = NULL; in nfs4_laundromat()
6919 spin_unlock(&nn->client_lock); in nfs4_laundromat()
6920 nfs4_put_stid(&stp->st_stid); in nfs4_laundromat()
6921 spin_lock(&nn->client_lock); in nfs4_laundromat()
6923 spin_unlock(&nn->client_lock); in nfs4_laundromat()
6928 * So, we clean out any un-revisited request after a lease period in nfs4_laundromat()
6937 spin_lock(&nn->blocked_locks_lock); in nfs4_laundromat()
6938 while (!list_empty(&nn->blocked_locks_lru)) { in nfs4_laundromat()
6939 nbl = list_first_entry(&nn->blocked_locks_lru, in nfs4_laundromat()
6941 if (!state_expired(<, nbl->nbl_time)) in nfs4_laundromat()
6943 list_move(&nbl->nbl_lru, &reaplist); in nfs4_laundromat()
6944 list_del_init(&nbl->nbl_list); in nfs4_laundromat()
6946 spin_unlock(&nn->blocked_locks_lock); in nfs4_laundromat()
6951 list_del_init(&nbl->nbl_lru); in nfs4_laundromat()
6955 /* service the server-to-server copy delayed unmount list */ in nfs4_laundromat()
6975 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); in laundromat_main()
6993 spin_lock(&nn->client_lock); in deleg_reaper()
6994 list_for_each_safe(pos, next, &nn->client_lru) { in deleg_reaper()
6997 if (clp->cl_state != NFSD4_ACTIVE) in deleg_reaper()
6999 if (list_empty(&clp->cl_delegations)) in deleg_reaper()
7001 if (atomic_read(&clp->cl_delegs_in_recall)) in deleg_reaper()
7003 if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &clp->cl_ra->ra_cb.cb_flags)) in deleg_reaper()
7005 if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5) in deleg_reaper()
7007 if (clp->cl_cb_state != NFSD4_CB_UP) in deleg_reaper()
7011 kref_get(&clp->cl_nfsdfs.cl_ref); in deleg_reaper()
7012 clp->cl_ra_time = ktime_get_boottime_seconds(); in deleg_reaper()
7013 clp->cl_ra->ra_keep = 0; in deleg_reaper()
7014 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) | in deleg_reaper()
7016 trace_nfsd_cb_recall_any(clp->cl_ra); in deleg_reaper()
7017 nfsd4_run_cb(&clp->cl_ra->ra_cb); in deleg_reaper()
7019 spin_unlock(&nn->client_lock); in deleg_reaper()
7034 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) in nfs4_check_fh()
7045 if (stp->st_openstp) in nfs4_check_openmode()
7046 stp = stp->st_openstp; in nfs4_check_openmode()
7079 if (has_session && in->si_generation == 0) in check_stateid_generation()
7082 if (in->si_generation == ref->si_generation) in check_stateid_generation()
7090 * non-buggy client. For example, if the client sends a lock in check_stateid_generation()
7105 spin_lock(&s->sc_lock); in nfsd4_stid_check_stateid_generation()
7108 ret = check_stateid_generation(in, &s->sc_stateid, has_session); in nfsd4_stid_check_stateid_generation()
7109 spin_unlock(&s->sc_lock); in nfsd4_stid_check_stateid_generation()
7111 nfsd40_drop_revoked_stid(s->sc_client, in nfsd4_stid_check_stateid_generation()
7112 &s->sc_stateid); in nfsd4_stid_check_stateid_generation()
7118 if (ols->st_stateowner->so_is_open_owner && in nfsd4_check_openowner_confirmed()
7119 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) in nfsd4_check_openowner_confirmed()
7132 spin_lock(&cl->cl_lock); in nfsd4_validate_stateid()
7143 switch (s->sc_type) { in nfsd4_validate_stateid()
7152 printk("unknown stateid type %x\n", s->sc_type); in nfsd4_validate_stateid()
7156 spin_unlock(&cl->cl_lock); in nfsd4_validate_stateid()
7189 status = set_client(&stateid->si_opaque.so_clid, cstate, nn); in nfsd4_lookup_stateid()
7191 if (cstate->session) in nfsd4_lookup_stateid()
7197 stid = find_stateid_by_type(cstate->clp, stateid, typemask, statusmask); in nfsd4_lookup_stateid()
7200 if ((stid->sc_status & SC_STATUS_REVOKED) && !return_revoked) { in nfsd4_lookup_stateid()
7204 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) { in nfsd4_lookup_stateid()
7205 nfsd40_drop_revoked_stid(cstate->clp, stateid); in nfsd4_lookup_stateid()
7218 if (!s || s->sc_status) in nfs4_find_file()
7221 switch (s->sc_type) { in nfs4_find_file()
7226 ret = find_readable_file(s->sc_file); in nfs4_find_file()
7228 ret = find_writeable_file(s->sc_file); in nfs4_find_file()
7255 status = nfsd_permission(&rqstp->rq_cred, in nfs4_check_file()
7256 fhp->fh_export, fhp->fh_dentry, in nfs4_check_file()
7274 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID); in _free_cpntf_state_locked()
7275 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count)) in _free_cpntf_state_locked()
7277 list_del(&cps->cp_list); in _free_cpntf_state_locked()
7278 idr_remove(&nn->s2s_cp_stateids, in _free_cpntf_state_locked()
7279 cps->cp_stateid.cs_stid.si_opaque.so_id); in _free_cpntf_state_locked()
7294 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id) in manage_cpntf_state()
7296 spin_lock(&nn->s2s_cp_lock); in manage_cpntf_state()
7297 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id); in manage_cpntf_state()
7301 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) { in manage_cpntf_state()
7306 refcount_inc(&state->cp_stateid.cs_count); in manage_cpntf_state()
7311 spin_unlock(&nn->s2s_cp_lock); in manage_cpntf_state()
7330 cps->cpntf_time = ktime_get_boottime_seconds(); in find_cpntf_state()
7333 found = lookup_clientid(&cps->cp_p_clid, true, nn); in find_cpntf_state()
7337 *stid = find_stateid_by_type(found, &cps->cp_p_stateid, in find_cpntf_state()
7353 spin_lock(&nn->s2s_cp_lock); in nfs4_put_cpntf_state()
7355 spin_unlock(&nn->s2s_cp_lock); in nfs4_put_cpntf_state()
7359 * nfs4_preprocess_stateid_op - find and prep stateid for an operation
7403 switch (s->sc_type) { in nfs4_preprocess_stateid_op()
7436 struct nfsd4_test_stateid *test_stateid = &u->test_stateid; in nfsd4_test_stateid()
7438 struct nfs4_client *cl = cstate->clp; in nfsd4_test_stateid()
7440 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) in nfsd4_test_stateid()
7441 stateid->ts_id_status = in nfsd4_test_stateid()
7442 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); in nfsd4_test_stateid()
7457 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); in nfsd4_free_lock_stateid()
7462 if (check_for_locks(stp->st_stid.sc_file, in nfsd4_free_lock_stateid()
7463 lockowner(stp->st_stateowner))) in nfsd4_free_lock_stateid()
7470 mutex_unlock(&stp->st_mutex); in nfsd4_free_lock_stateid()
7480 struct nfsd4_free_stateid *free_stateid = &u->free_stateid; in nfsd4_free_stateid()
7481 stateid_t *stateid = &free_stateid->fr_stateid; in nfsd4_free_stateid()
7484 struct nfs4_client *cl = cstate->clp; in nfsd4_free_stateid()
7487 spin_lock(&cl->cl_lock); in nfsd4_free_stateid()
7489 if (!s || s->sc_status & SC_STATUS_CLOSED) in nfsd4_free_stateid()
7491 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) { in nfsd4_free_stateid()
7496 spin_lock(&s->sc_lock); in nfsd4_free_stateid()
7497 switch (s->sc_type) { in nfsd4_free_stateid()
7499 if (s->sc_status & SC_STATUS_REVOKED) { in nfsd4_free_stateid()
7500 s->sc_status |= SC_STATUS_CLOSED; in nfsd4_free_stateid()
7501 spin_unlock(&s->sc_lock); in nfsd4_free_stateid()
7503 if (s->sc_status & SC_STATUS_FREEABLE) in nfsd4_free_stateid()
7504 list_del_init(&dp->dl_recall_lru); in nfsd4_free_stateid()
7505 s->sc_status |= SC_STATUS_FREED; in nfsd4_free_stateid()
7506 spin_unlock(&cl->cl_lock); in nfsd4_free_stateid()
7514 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); in nfsd4_free_stateid()
7520 spin_unlock(&s->sc_lock); in nfsd4_free_stateid()
7521 refcount_inc(&s->sc_count); in nfsd4_free_stateid()
7522 spin_unlock(&cl->cl_lock); in nfsd4_free_stateid()
7526 spin_unlock(&s->sc_lock); in nfsd4_free_stateid()
7528 spin_unlock(&cl->cl_lock); in nfsd4_free_stateid()
7542 struct svc_fh *current_fh = &cstate->current_fh; in nfs4_seqid_op_checks()
7543 struct nfs4_stateowner *sop = stp->st_stateowner; in nfs4_seqid_op_checks()
7552 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); in nfs4_seqid_op_checks()
7554 status = nfs4_check_fh(current_fh, &stp->st_stid); in nfs4_seqid_op_checks()
7556 mutex_unlock(&stp->st_mutex); in nfs4_seqid_op_checks()
7561 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
7594 if (nfsd4_cstate_assign_replay(cstate, stp->st_stateowner) == -EAGAIN) { in nfs4_preprocess_seqid_op()
7595 nfs4_put_stateowner(stp->st_stateowner); in nfs4_preprocess_seqid_op()
7603 nfs4_put_stid(&stp->st_stid); in nfs4_preprocess_seqid_op()
7618 oo = openowner(stp->st_stateowner); in nfs4_preprocess_confirmed_seqid_op()
7619 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { in nfs4_preprocess_confirmed_seqid_op()
7620 mutex_unlock(&stp->st_mutex); in nfs4_preprocess_confirmed_seqid_op()
7621 nfs4_put_stid(&stp->st_stid); in nfs4_preprocess_confirmed_seqid_op()
7632 struct nfsd4_open_confirm *oc = &u->open_confirm; in nfsd4_open_confirm()
7639 cstate->current_fh.fh_dentry); in nfsd4_open_confirm()
7641 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); in nfsd4_open_confirm()
7646 oc->oc_seqid, &oc->oc_req_stateid, in nfsd4_open_confirm()
7650 oo = openowner(stp->st_stateowner); in nfsd4_open_confirm()
7652 if (oo->oo_flags & NFS4_OO_CONFIRMED) { in nfsd4_open_confirm()
7653 mutex_unlock(&stp->st_mutex); in nfsd4_open_confirm()
7656 oo->oo_flags |= NFS4_OO_CONFIRMED; in nfsd4_open_confirm()
7657 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); in nfsd4_open_confirm()
7658 mutex_unlock(&stp->st_mutex); in nfsd4_open_confirm()
7659 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid); in nfsd4_open_confirm()
7660 nfsd4_client_record_create(oo->oo_owner.so_client); in nfsd4_open_confirm()
7663 nfs4_put_stid(&stp->st_stid); in nfsd4_open_confirm()
7673 nfs4_file_put_access(stp->st_stid.sc_file, access); in nfs4_stateid_downgrade_bit()
7699 struct nfsd4_open_downgrade *od = &u->open_downgrade; in nfsd4_open_downgrade()
7705 cstate->current_fh.fh_dentry); in nfsd4_open_downgrade()
7708 if (od->od_deleg_want) in nfsd4_open_downgrade()
7710 od->od_deleg_want); in nfsd4_open_downgrade()
7712 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, in nfsd4_open_downgrade()
7713 &od->od_stateid, &stp, nn); in nfsd4_open_downgrade()
7717 if (!test_access(od->od_share_access, stp)) { in nfsd4_open_downgrade()
7719 stp->st_access_bmap, od->od_share_access); in nfsd4_open_downgrade()
7722 if (!test_deny(od->od_share_deny, stp)) { in nfsd4_open_downgrade()
7724 stp->st_deny_bmap, od->od_share_deny); in nfsd4_open_downgrade()
7727 nfs4_stateid_downgrade(stp, od->od_share_access); in nfsd4_open_downgrade()
7728 reset_union_bmap_deny(od->od_share_deny, stp); in nfsd4_open_downgrade()
7729 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); in nfsd4_open_downgrade()
7732 mutex_unlock(&stp->st_mutex); in nfsd4_open_downgrade()
7733 nfs4_put_stid(&stp->st_stid); in nfsd4_open_downgrade()
7741 struct nfs4_client *clp = s->st_stid.sc_client; in nfsd4_close_open_stateid()
7746 spin_lock(&clp->cl_lock); in nfsd4_close_open_stateid()
7749 if (clp->cl_minorversion) { in nfsd4_close_open_stateid()
7752 spin_unlock(&clp->cl_lock); in nfsd4_close_open_stateid()
7754 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid); in nfsd4_close_open_stateid()
7758 spin_unlock(&clp->cl_lock); in nfsd4_close_open_stateid()
7771 struct nfsd4_close *close = &u->close; in nfsd4_close()
7779 cstate->current_fh.fh_dentry); in nfsd4_close()
7781 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, in nfsd4_close()
7782 &close->cl_stateid, in nfsd4_close()
7789 spin_lock(&stp->st_stid.sc_client->cl_lock); in nfsd4_close()
7790 stp->st_stid.sc_status |= SC_STATUS_CLOSED; in nfsd4_close()
7791 spin_unlock(&stp->st_stid.sc_client->cl_lock); in nfsd4_close()
7799 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); in nfsd4_close()
7802 mutex_unlock(&stp->st_mutex); in nfsd4_close()
7813 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid)); in nfsd4_close()
7816 nfs4_put_stid(&stp->st_stid); in nfsd4_close()
7825 struct nfsd4_delegreturn *dr = &u->delegreturn; in nfsd4_delegreturn()
7827 stateid_t *stateid = &dr->dr_stateid; in nfsd4_delegreturn()
7832 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) in nfsd4_delegreturn()
7839 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate)); in nfsd4_delegreturn()
7846 wake_up_var(d_inode(cstate->current_fh.fh_dentry)); in nfsd4_delegreturn()
7848 nfs4_put_stid(&dp->dl_stid); in nfsd4_delegreturn()
7853 /* last octet in a range */
7861 return end > start ? end - 1: NFS4_MAX_UINT64; in last_byte_offset()
7865 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
7866 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
7867 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
7868 * locking, this prevents us from being completely protocol-compliant. The
7875 if (lock->fl_start < 0) in nfs4_transform_lock_offset()
7876 lock->fl_start = OFFSET_MAX; in nfs4_transform_lock_offset()
7877 if (lock->fl_end < 0) in nfs4_transform_lock_offset()
7878 lock->fl_end = OFFSET_MAX; in nfs4_transform_lock_offset()
7886 nfs4_get_stateowner(&lo->lo_owner); in nfsd4_lm_get_owner()
7896 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_lm_put_owner()
7903 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) cfl->c.flc_owner; in nfsd4_lm_lock_expirable()
7904 struct nfs4_client *clp = lo->lo_owner.so_client; in nfsd4_lm_lock_expirable()
7908 nn = net_generic(clp->net, nfsd_net_id); in nfsd4_lm_lock_expirable()
7909 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); in nfsd4_lm_lock_expirable()
7925 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) fl->c.flc_owner; in nfsd4_lm_notify()
7926 struct net *net = lo->lo_owner.so_client->net; in nfsd4_lm_notify()
7933 spin_lock(&nn->blocked_locks_lock); in nfsd4_lm_notify()
7934 if (!list_empty(&nbl->nbl_list)) { in nfsd4_lm_notify()
7935 list_del_init(&nbl->nbl_list); in nfsd4_lm_notify()
7936 list_del_init(&nbl->nbl_lru); in nfsd4_lm_notify()
7939 spin_unlock(&nn->blocked_locks_lock); in nfsd4_lm_notify()
7943 nfsd4_try_run_cb(&nbl->nbl_cb); in nfsd4_lm_notify()
7961 if (fl->fl_lmops == &nfsd_posix_mng_ops) { in nfs4_set_lock_denied()
7962 lo = (struct nfs4_lockowner *) fl->c.flc_owner; in nfs4_set_lock_denied()
7963 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, in nfs4_set_lock_denied()
7965 if (!deny->ld_owner.data) in nfs4_set_lock_denied()
7968 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; in nfs4_set_lock_denied()
7971 deny->ld_owner.len = 0; in nfs4_set_lock_denied()
7972 deny->ld_owner.data = NULL; in nfs4_set_lock_denied()
7973 deny->ld_clientid.cl_boot = 0; in nfs4_set_lock_denied()
7974 deny->ld_clientid.cl_id = 0; in nfs4_set_lock_denied()
7976 deny->ld_start = fl->fl_start; in nfs4_set_lock_denied()
7977 deny->ld_length = NFS4_MAX_UINT64; in nfs4_set_lock_denied()
7978 if (fl->fl_end != NFS4_MAX_UINT64) in nfs4_set_lock_denied()
7979 deny->ld_length = fl->fl_end - fl->fl_start + 1; in nfs4_set_lock_denied()
7980 deny->ld_type = NFS4_READ_LT; in nfs4_set_lock_denied()
7981 if (fl->c.flc_type != F_RDLCK) in nfs4_set_lock_denied()
7982 deny->ld_type = NFS4_WRITE_LT; in nfs4_set_lock_denied()
7991 lockdep_assert_held(&clp->cl_lock); in find_lockowner_str_locked()
7993 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], in find_lockowner_str_locked()
7995 if (so->so_is_open_owner) in find_lockowner_str_locked()
8008 spin_lock(&clp->cl_lock); in find_lockowner_str()
8010 spin_unlock(&clp->cl_lock); in find_lockowner_str()
8033 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
8045 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); in alloc_init_lock_stateowner()
8048 INIT_LIST_HEAD(&lo->lo_blocked); in alloc_init_lock_stateowner()
8049 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); in alloc_init_lock_stateowner()
8050 lo->lo_owner.so_is_open_owner = 0; in alloc_init_lock_stateowner()
8051 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; in alloc_init_lock_stateowner()
8052 lo->lo_owner.so_ops = &lockowner_ops; in alloc_init_lock_stateowner()
8053 spin_lock(&clp->cl_lock); in alloc_init_lock_stateowner()
8054 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner); in alloc_init_lock_stateowner()
8056 list_add(&lo->lo_owner.so_strhash, in alloc_init_lock_stateowner()
8057 &clp->cl_ownerstr_hashtbl[strhashval]); in alloc_init_lock_stateowner()
8060 nfs4_free_stateowner(&lo->lo_owner); in alloc_init_lock_stateowner()
8062 spin_unlock(&clp->cl_lock); in alloc_init_lock_stateowner()
8072 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); in find_lock_stateid()
8074 /* If ost is not hashed, ost->st_locks will not be valid */ in find_lock_stateid()
8076 list_for_each_entry(lst, &ost->st_locks, st_locks) { in find_lock_stateid()
8077 if (lst->st_stateowner == &lo->lo_owner) { in find_lock_stateid()
8078 refcount_inc(&lst->st_stid.sc_count); in find_lock_stateid()
8090 struct nfs4_client *clp = lo->lo_owner.so_client; in init_lock_stateid()
8093 mutex_init(&stp->st_mutex); in init_lock_stateid()
8094 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); in init_lock_stateid()
8096 spin_lock(&clp->cl_lock); in init_lock_stateid()
8102 refcount_inc(&stp->st_stid.sc_count); in init_lock_stateid()
8103 stp->st_stid.sc_type = SC_TYPE_LOCK; in init_lock_stateid()
8104 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); in init_lock_stateid()
8106 stp->st_stid.sc_file = fp; in init_lock_stateid()
8107 stp->st_access_bmap = 0; in init_lock_stateid()
8108 stp->st_deny_bmap = open_stp->st_deny_bmap; in init_lock_stateid()
8109 stp->st_openstp = open_stp; in init_lock_stateid()
8110 spin_lock(&fp->fi_lock); in init_lock_stateid()
8111 list_add(&stp->st_locks, &open_stp->st_locks); in init_lock_stateid()
8112 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); in init_lock_stateid()
8113 list_add(&stp->st_perfile, &fp->fi_stateids); in init_lock_stateid()
8114 spin_unlock(&fp->fi_lock); in init_lock_stateid()
8115 spin_unlock(&clp->cl_lock); in init_lock_stateid()
8118 spin_unlock(&clp->cl_lock); in init_lock_stateid()
8120 nfs4_put_stid(&retstp->st_stid); in init_lock_stateid()
8124 mutex_unlock(&stp->st_mutex); in init_lock_stateid()
8127 spin_unlock(&clp->cl_lock); in init_lock_stateid()
8128 mutex_unlock(&stp->st_mutex); in init_lock_stateid()
8139 struct nfs4_openowner *oo = openowner(ost->st_stateowner); in find_or_create_lock_stateid()
8140 struct nfs4_client *clp = oo->oo_owner.so_client; in find_or_create_lock_stateid()
8143 spin_lock(&clp->cl_lock); in find_or_create_lock_stateid()
8145 spin_unlock(&clp->cl_lock); in find_or_create_lock_stateid()
8149 nfs4_put_stid(&lst->st_stid); in find_or_create_lock_stateid()
8173 struct nfs4_file *fp = lock_stp->st_stid.sc_file; in get_lock_access()
8175 lockdep_assert_held(&fp->fi_lock); in get_lock_access()
8190 struct nfs4_file *fi = ost->st_stid.sc_file; in lookup_or_create_lock_state()
8191 struct nfs4_openowner *oo = openowner(ost->st_stateowner); in lookup_or_create_lock_state()
8192 struct nfs4_client *cl = oo->oo_owner.so_client; in lookup_or_create_lock_state()
8193 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); in lookup_or_create_lock_state()
8198 lo = find_lockowner_str(cl, &lock->lk_new_owner); in lookup_or_create_lock_state()
8200 strhashval = ownerstr_hashval(&lock->lk_new_owner); in lookup_or_create_lock_state()
8207 if (!cstate->minorversion && in lookup_or_create_lock_state()
8208 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) in lookup_or_create_lock_state()
8221 nfs4_put_stateowner(&lo->lo_owner); in lookup_or_create_lock_state()
8232 struct nfsd4_lock *lock = &u->lock; in nfsd4_lock()
8252 (long long) lock->lk_offset, in nfsd4_lock()
8253 (long long) lock->lk_length); in nfsd4_lock()
8255 if (check_lock_length(lock->lk_offset, lock->lk_length)) in nfsd4_lock()
8258 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); in nfsd4_lock()
8261 if (exportfs_cannot_lock(cstate->current_fh.fh_dentry->d_sb->s_export_op)) { in nfsd4_lock()
8266 if (lock->lk_is_new) { in nfsd4_lock()
8269 memcpy(&lock->lk_new_clientid, in nfsd4_lock()
8270 &cstate->clp->cl_clientid, in nfsd4_lock()
8275 lock->lk_new_open_seqid, in nfsd4_lock()
8276 &lock->lk_new_open_stateid, in nfsd4_lock()
8280 mutex_unlock(&open_stp->st_mutex); in nfsd4_lock()
8281 open_sop = openowner(open_stp->st_stateowner); in nfsd4_lock()
8283 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, in nfsd4_lock()
8284 &lock->lk_new_clientid)) in nfsd4_lock()
8290 lock->lk_old_lock_seqid, in nfsd4_lock()
8291 &lock->lk_old_lock_stateid, in nfsd4_lock()
8297 lock_sop = lockowner(lock_stp->st_stateowner); in nfsd4_lock()
8299 lkflg = setlkflg(lock->lk_type); in nfsd4_lock()
8305 if (locks_in_grace(net) && !lock->lk_reclaim) in nfsd4_lock()
8308 if (!locks_in_grace(net) && lock->lk_reclaim) in nfsd4_lock()
8311 if (lock->lk_reclaim) in nfsd4_lock()
8314 fp = lock_stp->st_stid.sc_file; in nfsd4_lock()
8315 switch (lock->lk_type) { in nfsd4_lock()
8319 spin_lock(&fp->fi_lock); in nfsd4_lock()
8323 spin_unlock(&fp->fi_lock); in nfsd4_lock()
8329 spin_lock(&fp->fi_lock); in nfsd4_lock()
8333 spin_unlock(&fp->fi_lock); in nfsd4_lock()
8346 if (lock->lk_type & (NFS4_READW_LT | NFS4_WRITEW_LT) && in nfsd4_lock()
8348 locks_can_async_lock(nf->nf_file->f_op)) in nfsd4_lock()
8351 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); in nfsd4_lock()
8358 file_lock = &nbl->nbl_lock; in nfsd4_lock()
8359 file_lock->c.flc_type = type; in nfsd4_lock()
8360 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); in nfsd4_lock()
8361 file_lock->c.flc_pid = current->tgid; in nfsd4_lock()
8362 file_lock->c.flc_file = nf->nf_file; in nfsd4_lock()
8363 file_lock->c.flc_flags = flags; in nfsd4_lock()
8364 file_lock->fl_lmops = &nfsd_posix_mng_ops; in nfsd4_lock()
8365 file_lock->fl_start = lock->lk_offset; in nfsd4_lock()
8366 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); in nfsd4_lock()
8377 nbl->nbl_time = ktime_get_boottime_seconds(); in nfsd4_lock()
8378 spin_lock(&nn->blocked_locks_lock); in nfsd4_lock()
8379 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); in nfsd4_lock()
8380 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); in nfsd4_lock()
8381 kref_get(&nbl->nbl_kref); in nfsd4_lock()
8382 spin_unlock(&nn->blocked_locks_lock); in nfsd4_lock()
8385 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock); in nfsd4_lock()
8388 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); in nfsd4_lock()
8390 if (lock->lk_reclaim) in nfsd4_lock()
8391 nn->somebody_reclaimed = true; in nfsd4_lock()
8394 kref_put(&nbl->nbl_kref, free_nbl); in nfsd4_lock()
8397 case -EAGAIN: /* conflock holds conflicting lock */ in nfsd4_lock()
8400 nfs4_set_lock_denied(conflock, &lock->lk_denied); in nfsd4_lock()
8402 case -EDEADLK: in nfsd4_lock()
8414 spin_lock(&nn->blocked_locks_lock); in nfsd4_lock()
8415 if (!list_empty(&nbl->nbl_list) && in nfsd4_lock()
8416 !list_empty(&nbl->nbl_lru)) { in nfsd4_lock()
8417 list_del_init(&nbl->nbl_list); in nfsd4_lock()
8418 list_del_init(&nbl->nbl_lru); in nfsd4_lock()
8419 kref_put(&nbl->nbl_kref, free_nbl); in nfsd4_lock()
8422 spin_unlock(&nn->blocked_locks_lock); in nfsd4_lock()
8430 if (cstate->replay_owner && in nfsd4_lock()
8431 cstate->replay_owner != &lock_sop->lo_owner && in nfsd4_lock()
8433 lock_sop->lo_owner.so_seqid++; in nfsd4_lock()
8436 * If this is a new, never-before-used stateid, and we are in nfsd4_lock()
8442 mutex_unlock(&lock_stp->st_mutex); in nfsd4_lock()
8444 nfs4_put_stid(&lock_stp->st_stid); in nfsd4_lock()
8447 nfs4_put_stid(&open_stp->st_stid); in nfsd4_lock()
8456 struct nfsd4_lock *lock = &u->lock; in nfsd4_lock_release()
8457 struct nfsd4_lock_denied *deny = &lock->lk_denied; in nfsd4_lock_release()
8459 kfree(deny->ld_owner.data); in nfsd4_lock_release()
8476 inode = fhp->fh_dentry->d_inode; in nfsd_test_lock()
8481 lock->c.flc_file = nf->nf_file; in nfsd_test_lock()
8482 err = nfserrno(vfs_test_lock(nf->nf_file, lock)); in nfsd_test_lock()
8483 lock->c.flc_file = NULL; in nfsd_test_lock()
8497 struct nfsd4_lockt *lockt = &u->lockt; in nfsd4_lockt()
8506 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) in nfsd4_lockt()
8510 status = set_client(&lockt->lt_clientid, cstate, nn); in nfsd4_lockt()
8515 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) in nfsd4_lockt()
8525 switch (lockt->lt_type) { in nfsd4_lockt()
8528 file_lock->c.flc_type = F_RDLCK; in nfsd4_lockt()
8532 file_lock->c.flc_type = F_WRLCK; in nfsd4_lockt()
8540 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); in nfsd4_lockt()
8542 file_lock->c.flc_owner = (fl_owner_t)lo; in nfsd4_lockt()
8543 file_lock->c.flc_pid = current->tgid; in nfsd4_lockt()
8544 file_lock->c.flc_flags = FL_POSIX; in nfsd4_lockt()
8546 file_lock->fl_start = lockt->lt_offset; in nfsd4_lockt()
8547 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); in nfsd4_lockt()
8551 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); in nfsd4_lockt()
8555 if (file_lock->c.flc_type != F_UNLCK) { in nfsd4_lockt()
8557 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); in nfsd4_lockt()
8561 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_lockt()
8569 struct nfsd4_lockt *lockt = &u->lockt; in nfsd4_lockt_release()
8570 struct nfsd4_lock_denied *deny = &lockt->lt_denied; in nfsd4_lockt_release()
8572 kfree(deny->ld_owner.data); in nfsd4_lockt_release()
8579 struct nfsd4_locku *locku = &u->locku; in nfsd4_locku()
8588 (long long) locku->lu_offset, in nfsd4_locku()
8589 (long long) locku->lu_length); in nfsd4_locku()
8591 if (check_lock_length(locku->lu_offset, locku->lu_length)) in nfsd4_locku()
8594 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, in nfsd4_locku()
8595 &locku->lu_stateid, SC_TYPE_LOCK, 0, in nfsd4_locku()
8599 nf = find_any_file(stp->st_stid.sc_file); in nfsd4_locku()
8604 if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) { in nfsd4_locku()
8616 file_lock->c.flc_type = F_UNLCK; in nfsd4_locku()
8617 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); in nfsd4_locku()
8618 file_lock->c.flc_pid = current->tgid; in nfsd4_locku()
8619 file_lock->c.flc_file = nf->nf_file; in nfsd4_locku()
8620 file_lock->c.flc_flags = FL_POSIX; in nfsd4_locku()
8621 file_lock->fl_lmops = &nfsd_posix_mng_ops; in nfsd4_locku()
8622 file_lock->fl_start = locku->lu_offset; in nfsd4_locku()
8624 file_lock->fl_end = last_byte_offset(locku->lu_offset, in nfsd4_locku()
8625 locku->lu_length); in nfsd4_locku()
8628 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL); in nfsd4_locku()
8633 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); in nfsd4_locku()
8637 mutex_unlock(&stp->st_mutex); in nfsd4_locku()
8638 nfs4_put_stid(&stp->st_stid); in nfsd4_locku()
8664 spin_lock(&fp->fi_lock); in check_for_locks()
8672 inode = file_inode(nf->nf_file); in check_for_locks()
8675 if (flctx && !list_empty_careful(&flctx->flc_posix)) { in check_for_locks()
8676 spin_lock(&flctx->flc_lock); in check_for_locks()
8677 for_each_file_lock(fl, &flctx->flc_posix) { in check_for_locks()
8678 if (fl->c.flc_owner == (fl_owner_t)lowner) { in check_for_locks()
8683 spin_unlock(&flctx->flc_lock); in check_for_locks()
8686 spin_unlock(&fp->fi_lock); in check_for_locks()
8691 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
8710 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner; in nfsd4_release_lockowner()
8712 clientid_t *clid = &rlockowner->rl_clientid; in nfsd4_release_lockowner()
8720 clid->cl_boot, clid->cl_id); in nfsd4_release_lockowner()
8725 clp = cstate->clp; in nfsd4_release_lockowner()
8727 spin_lock(&clp->cl_lock); in nfsd4_release_lockowner()
8728 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner); in nfsd4_release_lockowner()
8730 spin_unlock(&clp->cl_lock); in nfsd4_release_lockowner()
8734 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { in nfsd4_release_lockowner()
8735 if (check_for_locks(stp->st_stid.sc_file, lo)) { in nfsd4_release_lockowner()
8736 spin_unlock(&clp->cl_lock); in nfsd4_release_lockowner()
8737 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_release_lockowner()
8742 while (!list_empty(&lo->lo_owner.so_stateids)) { in nfsd4_release_lockowner()
8743 stp = list_first_entry(&lo->lo_owner.so_stateids, in nfsd4_release_lockowner()
8749 spin_unlock(&clp->cl_lock); in nfsd4_release_lockowner()
8753 nfs4_put_stateowner(&lo->lo_owner); in nfsd4_release_lockowner()
8769 return (crp && crp->cr_clp); in nfs4_has_reclaimed_state()
8788 INIT_LIST_HEAD(&crp->cr_strhash); in nfs4_client_to_reclaim()
8789 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); in nfs4_client_to_reclaim()
8790 crp->cr_name.data = name.data; in nfs4_client_to_reclaim()
8791 crp->cr_name.len = name.len; in nfs4_client_to_reclaim()
8792 crp->cr_princhash.data = princhash.data; in nfs4_client_to_reclaim()
8793 crp->cr_princhash.len = princhash.len; in nfs4_client_to_reclaim()
8794 crp->cr_clp = NULL; in nfs4_client_to_reclaim()
8795 nn->reclaim_str_hashtbl_size++; in nfs4_client_to_reclaim()
8803 list_del(&crp->cr_strhash); in nfs4_remove_reclaim_record()
8804 kfree(crp->cr_name.data); in nfs4_remove_reclaim_record()
8805 kfree(crp->cr_princhash.data); in nfs4_remove_reclaim_record()
8807 nn->reclaim_str_hashtbl_size--; in nfs4_remove_reclaim_record()
8817 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { in nfs4_release_reclaim()
8818 crp = list_entry(nn->reclaim_str_hashtbl[i].next, in nfs4_release_reclaim()
8823 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); in nfs4_release_reclaim()
8835 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { in nfsd4_find_reclaim_client()
8836 if (compare_blob(&crp->cr_name, &name) == 0) { in nfsd4_find_reclaim_client()
8846 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) in nfs4_check_open_reclaim()
8873 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); in set_max_delegations()
8881 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, in nfs4_state_create_net()
8884 if (!nn->conf_id_hashtbl) in nfs4_state_create_net()
8886 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, in nfs4_state_create_net()
8889 if (!nn->unconf_id_hashtbl) in nfs4_state_create_net()
8891 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE, in nfs4_state_create_net()
8894 if (!nn->sessionid_hashtbl) in nfs4_state_create_net()
8898 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); in nfs4_state_create_net()
8899 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); in nfs4_state_create_net()
8902 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); in nfs4_state_create_net()
8903 nn->conf_name_tree = RB_ROOT; in nfs4_state_create_net()
8904 nn->unconf_name_tree = RB_ROOT; in nfs4_state_create_net()
8905 nn->boot_time = ktime_get_real_seconds(); in nfs4_state_create_net()
8906 nn->grace_ended = false; in nfs4_state_create_net()
8907 nn->nfsd4_manager.block_opens = true; in nfs4_state_create_net()
8908 INIT_LIST_HEAD(&nn->nfsd4_manager.list); in nfs4_state_create_net()
8909 INIT_LIST_HEAD(&nn->client_lru); in nfs4_state_create_net()
8910 INIT_LIST_HEAD(&nn->close_lru); in nfs4_state_create_net()
8911 INIT_LIST_HEAD(&nn->del_recall_lru); in nfs4_state_create_net()
8912 spin_lock_init(&nn->client_lock); in nfs4_state_create_net()
8913 spin_lock_init(&nn->s2s_cp_lock); in nfs4_state_create_net()
8914 idr_init(&nn->s2s_cp_stateids); in nfs4_state_create_net()
8915 atomic_set(&nn->pending_async_copies, 0); in nfs4_state_create_net()
8917 spin_lock_init(&nn->blocked_locks_lock); in nfs4_state_create_net()
8918 INIT_LIST_HEAD(&nn->blocked_locks_lru); in nfs4_state_create_net()
8920 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); in nfs4_state_create_net()
8921 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker); in nfs4_state_create_net()
8924 nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client"); in nfs4_state_create_net()
8925 if (!nn->nfsd_client_shrinker) in nfs4_state_create_net()
8928 nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan; in nfs4_state_create_net()
8929 nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count; in nfs4_state_create_net()
8930 nn->nfsd_client_shrinker->private_data = nn; in nfs4_state_create_net()
8932 shrinker_register(nn->nfsd_client_shrinker); in nfs4_state_create_net()
8938 kfree(nn->sessionid_hashtbl); in nfs4_state_create_net()
8940 kfree(nn->unconf_id_hashtbl); in nfs4_state_create_net()
8942 kfree(nn->conf_id_hashtbl); in nfs4_state_create_net()
8944 return -ENOMEM; in nfs4_state_create_net()
8955 while (!list_empty(&nn->conf_id_hashtbl[i])) { in nfs4_state_destroy_net()
8956 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); in nfs4_state_destroy_net()
8961 WARN_ON(!list_empty(&nn->blocked_locks_lru)); in nfs4_state_destroy_net()
8964 while (!list_empty(&nn->unconf_id_hashtbl[i])) { in nfs4_state_destroy_net()
8965 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); in nfs4_state_destroy_net()
8970 kfree(nn->sessionid_hashtbl); in nfs4_state_destroy_net()
8971 kfree(nn->unconf_id_hashtbl); in nfs4_state_destroy_net()
8972 kfree(nn->conf_id_hashtbl); in nfs4_state_destroy_net()
8985 locks_start_grace(net, &nn->nfsd4_manager); in nfs4_state_start_net()
8987 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) in nfs4_state_start_net()
8989 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", in nfs4_state_start_net()
8990 nn->nfsd4_grace, net->ns.inum); in nfs4_state_start_net()
8992 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); in nfs4_state_start_net()
8997 net->ns.inum); in nfs4_state_start_net()
8998 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ); in nfs4_state_start_net()
9013 nfsd_slot_shrinker = shrinker_alloc(0, "nfsd-DRC-slot"); in nfs4_state_start()
9016 return -ENOMEM; in nfs4_state_start()
9018 nfsd_slot_shrinker->count_objects = nfsd_slot_count; in nfs4_state_start()
9019 nfsd_slot_shrinker->scan_objects = nfsd_slot_scan; in nfs4_state_start()
9033 shrinker_free(nn->nfsd_client_shrinker); in nfs4_state_shutdown_net()
9034 cancel_work_sync(&nn->nfsd_shrinker_work); in nfs4_state_shutdown_net()
9035 cancel_delayed_work_sync(&nn->laundromat_work); in nfs4_state_shutdown_net()
9036 locks_end_grace(&nn->nfsd4_manager); in nfs4_state_shutdown_net()
9040 list_for_each_safe(pos, next, &nn->del_recall_lru) { in nfs4_state_shutdown_net()
9043 list_add(&dp->dl_recall_lru, &reaplist); in nfs4_state_shutdown_net()
9048 list_del_init(&dp->dl_recall_lru); in nfs4_state_shutdown_net()
9071 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); in get_stateid()
9077 if (cstate->minorversion) { in put_stateid()
9078 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); in put_stateid()
9096 put_stateid(cstate, &u->open_downgrade.od_stateid); in nfsd4_set_opendowngradestateid()
9103 put_stateid(cstate, &u->open.op_stateid); in nfsd4_set_openstateid()
9110 put_stateid(cstate, &u->close.cl_stateid); in nfsd4_set_closestateid()
9117 put_stateid(cstate, &u->lock.lk_resp_stateid); in nfsd4_set_lockstateid()
9128 get_stateid(cstate, &u->open_downgrade.od_stateid); in nfsd4_get_opendowngradestateid()
9135 get_stateid(cstate, &u->delegreturn.dr_stateid); in nfsd4_get_delegreturnstateid()
9142 get_stateid(cstate, &u->free_stateid.fr_stateid); in nfsd4_get_freestateid()
9149 get_stateid(cstate, &u->setattr.sa_stateid); in nfsd4_get_setattrstateid()
9156 get_stateid(cstate, &u->close.cl_stateid); in nfsd4_get_closestateid()
9163 get_stateid(cstate, &u->locku.lu_stateid); in nfsd4_get_lockustateid()
9170 get_stateid(cstate, &u->read.rd_stateid); in nfsd4_get_readstateid()
9177 get_stateid(cstate, &u->write.wr_stateid); in nfsd4_get_writestateid()
9181 * nfsd4_vet_deleg_time - vet and set the timespec for a delegated timestamp update
9216 struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr; in cb_getattr_update_times()
9220 if (deleg_attrs_deleg(dp->dl_type)) { in cb_getattr_update_times()
9223 attrs.ia_atime = ncf->ncf_cb_atime; in cb_getattr_update_times()
9224 attrs.ia_mtime = ncf->ncf_cb_mtime; in cb_getattr_update_times()
9226 if (nfsd4_vet_deleg_time(&attrs.ia_atime, &dp->dl_atime, &now)) in cb_getattr_update_times()
9229 if (nfsd4_vet_deleg_time(&attrs.ia_mtime, &dp->dl_mtime, &now)) { in cb_getattr_update_times()
9232 if (nfsd4_vet_deleg_time(&attrs.ia_ctime, &dp->dl_ctime, &now)) in cb_getattr_update_times()
9250 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
9263 * code is returned. If @pdp is set to a non-NULL value, then the
9284 spin_lock(&ctx->flc_lock); in nfsd4_deleg_getattr_conflict()
9285 for_each_file_lock(fl, &ctx->flc_lease) { in nfsd4_deleg_getattr_conflict()
9286 if (fl->c.flc_flags == FL_LAYOUT) in nfsd4_deleg_getattr_conflict()
9288 if (fl->c.flc_type == F_WRLCK) { in nfsd4_deleg_getattr_conflict()
9289 if (fl->fl_lmops == &nfsd_lease_mng_ops) in nfsd4_deleg_getattr_conflict()
9290 dp = fl->c.flc_owner; in nfsd4_deleg_getattr_conflict()
9297 dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { in nfsd4_deleg_getattr_conflict()
9298 spin_unlock(&ctx->flc_lock); in nfsd4_deleg_getattr_conflict()
9310 refcount_inc(&dp->dl_stid.sc_count); in nfsd4_deleg_getattr_conflict()
9311 ncf = &dp->dl_cb_fattr; in nfsd4_deleg_getattr_conflict()
9312 nfs4_cb_getattr(&dp->dl_cb_fattr); in nfsd4_deleg_getattr_conflict()
9313 spin_unlock(&ctx->flc_lock); in nfsd4_deleg_getattr_conflict()
9315 wait_on_bit_timeout(&ncf->ncf_getattr.cb_flags, NFSD4_CALLBACK_RUNNING, in nfsd4_deleg_getattr_conflict()
9317 if (ncf->ncf_cb_status) { in nfsd4_deleg_getattr_conflict()
9324 if (!ncf->ncf_file_modified && in nfsd4_deleg_getattr_conflict()
9325 (ncf->ncf_initial_cinfo != ncf->ncf_cb_change || in nfsd4_deleg_getattr_conflict()
9326 ncf->ncf_cur_fsize != ncf->ncf_cb_fsize)) in nfsd4_deleg_getattr_conflict()
9327 ncf->ncf_file_modified = true; in nfsd4_deleg_getattr_conflict()
9328 if (ncf->ncf_file_modified) { in nfsd4_deleg_getattr_conflict()
9341 ncf->ncf_cur_fsize = ncf->ncf_cb_fsize; in nfsd4_deleg_getattr_conflict()
9347 nfs4_put_stid(&dp->dl_stid); in nfsd4_deleg_getattr_conflict()