1 /*- 2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 3 * Authors: Doug Rabson <dfr@rabson.org> 4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 */ 29 30 #ifndef _NLM_NLM_H_ 31 #define _NLM_NLM_H_ 32 33 #ifdef _KERNEL 34 35 #ifdef _SYS_MALLOC_H_ 36 MALLOC_DECLARE(M_NLM); 37 #endif 38 39 /* 40 * This value is added to host system IDs when recording NFS client 41 * locks in the local lock manager. 42 */ 43 #define NLM_SYSID_CLIENT 0x1000000 44 45 struct nlm_host; 46 struct vnode; 47 48 extern struct timeval nlm_zero_tv; 49 extern int nlm_nsm_state; 50 51 /* 52 * Copy a struct netobj. 53 */ 54 extern void nlm_copy_netobj(struct netobj *dst, struct netobj *src, 55 struct malloc_type *type); 56 57 /* 58 * Search for an existing NLM host that matches the given name 59 * (typically the caller_name element of an nlm4_lock). If none is 60 * found, create a new host. If 'addr' is non-NULL, record the remote 61 * address of the host so that we can call it back for async 62 * responses. If 'vers' is greater than zero then record the NLM 63 * program version to use to communicate with this client. The host 64 * reference count is incremented - the caller must call 65 * nlm_host_release when it has finished using it. 66 */ 67 extern struct nlm_host *nlm_find_host_by_name(const char *name, 68 const struct sockaddr *addr, rpcvers_t vers); 69 70 /* 71 * Search for an existing NLM host that matches the given remote 72 * address. If none is found, create a new host with the requested 73 * address and remember 'vers' as the NLM protocol version to use for 74 * that host. The host reference count is incremented - the caller 75 * must call nlm_host_release when it has finished using it. 76 */ 77 extern struct nlm_host *nlm_find_host_by_addr(const struct sockaddr *addr, 78 int vers); 79 80 /* 81 * Register this NLM host with the local NSM so that we can be 82 * notified if it reboots. 83 */ 84 extern void nlm_host_monitor(struct nlm_host *host, int state); 85 86 /* 87 * Decrement the host reference count, freeing resources if the 88 * reference count reaches zero. 89 */ 90 extern void nlm_host_release(struct nlm_host *host); 91 92 /* 93 * Return an RPC client handle that can be used to talk to the NLM 94 * running on the given host. 95 */ 96 extern CLIENT *nlm_host_get_rpc(struct nlm_host *host, bool_t isserver); 97 98 /* 99 * Return the system ID for a host. 100 */ 101 extern int nlm_host_get_sysid(struct nlm_host *host); 102 103 /* 104 * Return the remote NSM state value for a host. 105 */ 106 extern int nlm_host_get_state(struct nlm_host *host); 107 108 /* 109 * When sending a blocking lock request, we need to track the request 110 * in our waiting lock list. We add an entry to the waiting list 111 * before we send the lock RPC so that we can cope with a granted 112 * message arriving at any time. Call this function before sending the 113 * lock rpc. If the lock succeeds, call nlm_deregister_wait_lock with 114 * the handle this function returns, otherwise nlm_wait_lock. Both 115 * will remove the entry from the waiting list. 116 */ 117 extern void *nlm_register_wait_lock(struct nlm4_lock *lock, struct vnode *vp); 118 119 /* 120 * Deregister a blocking lock request. Call this if the lock succeeded 121 * without blocking. 122 */ 123 extern void nlm_deregister_wait_lock(void *handle); 124 125 /* 126 * Wait for a granted callback for a blocked lock request, waiting at 127 * most timo ticks. If no granted message is received within the 128 * timeout, return EWOULDBLOCK. If a signal interrupted the wait, 129 * return EINTR - the caller must arrange to send a cancellation to 130 * the server. In both cases, the request is removed from the waiting 131 * list. 132 */ 133 extern int nlm_wait_lock(void *handle, int timo); 134 135 /* 136 * Cancel any pending waits for this vnode - called on forcible unmounts. 137 */ 138 extern void nlm_cancel_wait(struct vnode *vp); 139 140 /* 141 * Called when a host restarts. 142 */ 143 extern void nlm_sm_notify(nlm_sm_status *argp); 144 145 /* 146 * Implementation for lock testing RPCs. If the request was handled 147 * successfully and rpcp is non-NULL, *rpcp is set to an RPC client 148 * handle which can be used to send an async rpc reply. Returns zero 149 * if the request was handled, or a suitable unix error code 150 * otherwise. 151 */ 152 extern int nlm_do_test(nlm4_testargs *argp, nlm4_testres *result, 153 struct svc_req *rqstp, CLIENT **rpcp); 154 155 /* 156 * Implementation for lock setting RPCs. If the request was handled 157 * successfully and rpcp is non-NULL, *rpcp is set to an RPC client 158 * handle which can be used to send an async rpc reply. Returns zero 159 * if the request was handled, or a suitable unix error code 160 * otherwise. 161 */ 162 extern int nlm_do_lock(nlm4_lockargs *argp, nlm4_res *result, 163 struct svc_req *rqstp, bool_t monitor, CLIENT **rpcp); 164 165 /* 166 * Implementation for cancelling a pending lock request. If the 167 * request was handled successfully and rpcp is non-NULL, *rpcp is set 168 * to an RPC client handle which can be used to send an async rpc 169 * reply. Returns zero if the request was handled, or a suitable unix 170 * error code otherwise. 171 */ 172 extern int nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *result, 173 struct svc_req *rqstp, CLIENT **rpcp); 174 175 /* 176 * Implementation for unlocking RPCs. If the request was handled 177 * successfully and rpcp is non-NULL, *rpcp is set to an RPC client 178 * handle which can be used to send an async rpc reply. Returns zero 179 * if the request was handled, or a suitable unix error code 180 * otherwise. 181 */ 182 extern int nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *result, 183 struct svc_req *rqstp, CLIENT **rpcp); 184 185 /* 186 * Implementation for granted RPCs. If the request was handled 187 * successfully and rpcp is non-NULL, *rpcp is set to an RPC client 188 * handle which can be used to send an async rpc reply. Returns zero 189 * if the request was handled, or a suitable unix error code 190 * otherwise. 191 */ 192 extern int nlm_do_granted(nlm4_testargs *argp, nlm4_res *result, 193 struct svc_req *rqstp, CLIENT **rpcp); 194 195 /* 196 * Free all locks associated with the hostname argp->name. 197 */ 198 extern void nlm_do_free_all(nlm4_notify *argp); 199 200 /* 201 * Recover client lock state after a server reboot. 202 */ 203 extern void nlm_client_recovery(struct nlm_host *); 204 205 /* 206 * Interface from NFS client code to the NLM. 207 */ 208 struct vop_advlock_args; 209 struct vop_reclaim_args; 210 extern int nlm_advlock(struct vop_advlock_args *ap); 211 extern int nlm_reclaim(struct vop_reclaim_args *ap); 212 213 #endif 214 215 #endif 216