/linux/fs/afs/ |
H A D | server.c | 2 /* AFS server record management 14 static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */ 22 * Find a server by one of its addresses. 26 struct afs_server *server = (struct afs_server *)rxrpc_kernel_get_peer_data(peer); in afs_find_server() local 28 if (!server) in afs_find_server() 30 return afs_use_server(server, false, afs_server_trace_use_cm_call); in afs_find_server() 34 * Look up a server by its UUID and mark it active. The caller must hold 39 struct afs_server *server; in afs_find_server_by_uuid() local 47 server = rb_entry(p, struct afs_server, uuid_rb); in afs_find_server_by_uuid() 49 diff = memcmp(uuid, &server->uuid, sizeof(*uuid)); in afs_find_server_by_uuid() [all …]
|
H A D | vl_probe.c | 18 static void afs_finished_vl_probe(struct afs_vlserver *server) in afs_finished_vl_probe() argument 20 if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) { in afs_finished_vl_probe() 21 server->rtt = UINT_MAX; in afs_finished_vl_probe() 22 clear_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags); in afs_finished_vl_probe() 25 clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags); in afs_finished_vl_probe() 26 wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING); in afs_finished_vl_probe() 32 static void afs_done_one_vl_probe(struct afs_vlserver *server, bool wake_up) in afs_done_one_vl_probe() argument 34 if (atomic_dec_and_test(&server->probe_outstanding)) { in afs_done_one_vl_probe() 35 afs_finished_vl_probe(server); in afs_done_one_vl_probe() 40 wake_up_all(&server->probe_wq); in afs_done_one_vl_probe() [all …]
|
H A D | fs_probe.c | 56 * outstanding server count. 59 struct afs_server *server, bool fast) in afs_schedule_fs_probe() argument 66 atj = server->probed_at; in afs_schedule_fs_probe() 77 static void afs_finished_fs_probe(struct afs_net *net, struct afs_server *server, in afs_finished_fs_probe() argument 84 list_add_tail(&server->probe_link, &net->fs_probe_slow); in afs_finished_fs_probe() 86 server->rtt = UINT_MAX; in afs_finished_fs_probe() 87 clear_bit(AFS_SERVER_FL_RESPONDING, &server->flags); in afs_finished_fs_probe() 88 list_add_tail(&server->probe_link, &net->fs_probe_fast); in afs_finished_fs_probe() 93 afs_schedule_fs_probe(net, server, !responded); in afs_finished_fs_probe() 99 static void afs_done_one_fs_probe(struct afs_net *net, struct afs_server *server, in afs_done_one_fs_probe() argument [all …]
|
H A D | server_list.c | 18 afs_unuse_server(net, slist->servers[i].server, in afs_put_serverlist() 25 * Build a server list from a VLDB record. 32 struct afs_server *server; in afs_alloc_server_list() local 70 /* Make sure a records exists for each server in the list. */ in afs_alloc_server_list() 82 server = afs_lookup_server(volume->cell, key, &vldb->fs_server[i], in afs_alloc_server_list() 84 if (IS_ERR(server)) { in afs_alloc_server_list() 85 ret = PTR_ERR(server); in afs_alloc_server_list() 94 if (memcmp(&slist->servers[j].server->uuid, in afs_alloc_server_list() 95 &server->uuid, in afs_alloc_server_list() 96 sizeof(server->uuid)) >= 0) in afs_alloc_server_list() [all …]
|
H A D | vl_list.c | 71 afs_put_vlserver(net, vllist->servers[i].server); in afs_put_vlserverlist() 88 * Build a VL server address list from a DNS queried server list. 157 * Build a VL server list from a DNS queried server list. 167 struct afs_vlserver *server; in afs_extract_vlserver_list() local 173 /* Check that it's a server list, v1 */ in afs_extract_vlserver_list() 234 /* See if we can update an old server record */ in afs_extract_vlserver_list() 235 server = NULL; in afs_extract_vlserver_list() 237 struct afs_vlserver *p = previous->servers[i].server; in afs_extract_vlserver_list() 242 server = afs_get_vlserver(p); in afs_extract_vlserver_list() 247 if (!server) { in afs_extract_vlserver_list() [all …]
|
/linux/fs/smb/client/ |
H A D | transport.c | 45 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) in alloc_mid() argument 49 if (server == NULL) { in alloc_mid() 64 temp->server = server; in alloc_mid() 85 __le16 command = midEntry->server->vals->lock_cmd; in __release_mid() 90 struct TCP_Server_Info *server = midEntry->server; in __release_mid() local 95 server->ops->handle_cancelled_mid) in __release_mid() 96 server->ops->handle_cancelled_mid(midEntry, server); in __release_mid() 111 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) { in __release_mid() 112 server->slowest_cmd[smb_cmd] = roundtrip_time; in __release_mid() 113 server->fastest_cmd[smb_cmd] = roundtrip_time; in __release_mid() [all …]
|
H A D | connect.c | 59 /* Drop the connection to not overload the server */ 62 static int ip_connect(struct TCP_Server_Info *server); 63 static int generic_ip_connect(struct TCP_Server_Info *server); 71 * This should be called with server->srv_mutex held. 73 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) in reconn_set_ipaddr_from_hostname() argument 78 if (!server->hostname) in reconn_set_ipaddr_from_hostname() 81 /* if server hostname isn't populated, there's nothing to do here */ in reconn_set_ipaddr_from_hostname() 82 if (server->hostname[0] == '\0') in reconn_set_ipaddr_from_hostname() 85 spin_lock(&server->srv_lock); in reconn_set_ipaddr_from_hostname() 86 ss = server->dstaddr; in reconn_set_ipaddr_from_hostname() [all …]
|
H A D | cifs_debug.c | 38 void cifs_dump_detail(void *buf, struct TCP_Server_Info *server) in cifs_dump_detail() argument 46 if (!server->ops->check_message(buf, server->total_read, server)) { in cifs_dump_detail() 48 server->ops->calc_smb_size(smb)); in cifs_dump_detail() 53 void cifs_dump_mids(struct TCP_Server_Info *server) in cifs_dump_mids() argument 58 if (server == NULL) in cifs_dump_mids() 62 spin_lock(&server->mid_lock); in cifs_dump_mids() 63 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { in cifs_dump_mids() 80 cifs_dump_detail(mid_entry->resp_buf, server); in cifs_dump_mids() 85 spin_unlock(&server->mid_lock); in cifs_dump_mids() 119 if (tcon->ses->server->ops->dump_share_caps) in cifs_debug_tcon() [all …]
|
H A D | file.c | 44 * Prepare a subrequest to upload to the server. We need to allocate credits 53 struct TCP_Server_Info *server; in cifs_prepare_write() local 63 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); in cifs_prepare_write() 64 wdata->server = server; in cifs_prepare_write() 77 rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len, in cifs_prepare_write() 90 server->credits, server->in_flight, in cifs_prepare_write() 95 if (server->smbd_conn) in cifs_prepare_write() 96 stream->sreq_max_segs = server->smbd_conn->max_frmr_depth; in cifs_prepare_write() 101 * Issue a subrequest to upload to the server. 115 rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust); in cifs_issue_write() [all …]
|
/linux/fs/nfsd/ |
H A D | Kconfig | 3 tristate "NFS server support" 18 protocol. To compile the NFS server support as a module, 21 You may choose to use a user-space NFS server instead, in which 27 the Linux NFS server implementation is available via the 31 available to clients mounting the NFS server on this system. 38 bool "NFS server support for NFS version 2 (DEPRECATED)" 49 bool "NFS server support for the NFSv2 ACL protocol extension" 53 bool "NFS server support for the NFSv3 ACL protocol extension" 63 This option enables support in your system's NFS server for the 65 POSIX ACLs on files exported by your system's NFS server. NFS [all …]
|
/linux/Documentation/block/ |
H A D | ublk.rst | 30 in this document, ``ublk server`` refers to generic ublk userspace 43 assigned by one queue wide unique tag. ublk server assigns unique tag to each 51 done by io_uring, but also the preferred IO handling in ublk server is io_uring 64 ublk requires userspace ublk server to handle real block device logic. 102 Add a ublk char device (``/dev/ublkc*``) which is talked with ublk server 106 for which the info is negotiated with the driver and sent back to the server. 118 After the server prepares userspace resources (such as creating per-queue 126 ublk server will release resources (such as destroying per-queue pthread & 137 that each queue's affinity info is available. The server sends 144 For retrieving device info via ``ublksrv_ctrl_dev_info``. It is the server's [all …]
|
/linux/arch/powerpc/sysdev/xics/ |
H A D | ics-opal.c | 29 static int ics_opal_mangle_server(int server) in ics_opal_mangle_server() argument 32 return server << 2; in ics_opal_mangle_server() 35 static int ics_opal_unmangle_server(int server) in ics_opal_unmangle_server() argument 38 return server >> 2; in ics_opal_unmangle_server() 45 int server; in ics_opal_unmask_irq() local 52 server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0); in ics_opal_unmask_irq() 53 server = ics_opal_mangle_server(server); in ics_opal_unmask_irq() 55 rc = opal_set_xive(hw_irq, server, DEFAULT_PRIORITY); in ics_opal_unmask_irq() 57 pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)" in ics_opal_unmask_irq() 59 __func__, d->irq, hw_irq, server, rc); in ics_opal_unmask_irq() [all …]
|
/linux/net/netfilter/ipvs/ |
H A D | Kconfig | 3 # IP Virtual Server configuration 6 tristate "IP virtual server support" 10 IP Virtual Server support will let you build a high-performance 11 virtual server based on cluster of two or more real servers. This 17 virtual server via NAT, virtual server via tunneling and virtual 18 server via direct routing. The several scheduling algorithms can 19 be used to choose which server the connection is directed to, 39 bool "IP virtual server debugging" 42 debugging the IP virtual server code. You can change the debug 61 table size yourself, according to your virtual server application. [all …]
|
/linux/fs/nfs/ |
H A D | delegation.c | 78 static void nfs_mark_return_delegation(struct nfs_server *server, in nfs_mark_return_delegation() argument 82 set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags); in nfs_mark_return_delegation() 83 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); in nfs_mark_return_delegation() 335 struct nfs_server *server, int err) in nfs_abort_delegation_return() argument 342 &server->delegation_flags); in nfs_abort_delegation_return() 344 &server->nfs_client->cl_state); in nfs_abort_delegation_return() 375 struct nfs_server *server) in nfs_detach_delegation() argument 377 struct nfs_client *clp = server->nfs_client; in nfs_detach_delegation() 389 struct nfs_server *server = NFS_SERVER(inode); in nfs_inode_detach_delegation() local 395 delegation = nfs_detach_delegation(nfsi, delegation, server); in nfs_inode_detach_delegation() [all …]
|
H A D | nfs4proc.c | 96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 153 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) in nfs4_bitmask() argument 156 return server->attr_bitmask; in nfs4_bitmask() 158 return server->attr_bitmask_nl; in nfs4_bitmask() 169 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) in nfs4_bitmask() argument 170 { return server->attr_bitmask; } in nfs4_bitmask() 360 * when talking to the server, we always send cookie 0 in nfs4_setup_readdir() 404 static void nfs4_test_and_free_stateid(struct nfs_server *server, in nfs4_test_and_free_stateid() argument 408 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; in nfs4_test_and_free_stateid() 410 ops->test_and_free_expired(server, stateid, cred); in nfs4_test_and_free_stateid() [all …]
|
/linux/drivers/infiniband/ulp/rtrs/ |
H A D | README | 7 between client and server machines using RDMA (InfiniBand, RoCE, iWarp) 27 An established connection between a client and a server is called rtrs 29 server side for a given client for rdma transfer. A session 31 between client and server. Those are used for load balancing and failover. 36 chunks reserved for him on the server side. Their number, size and addresses 37 need to be exchanged between client and server during the connection 39 inform the server about the session name and identify each path and connection 42 On an established session client sends to server write or read messages. 43 Server uses immediate field to tell the client which request is being 44 acknowledged and for errno. Client uses immediate field to tell the server [all …]
|
/linux/Documentation/admin-guide/cifs/ |
H A D | usage.rst | 66 and maximum number of simultaneous requests to one server can be configured. 100 //server/usersharename /mnt/username cifs user 0 0 132 to the file /etc/fstab for each //server/share you wish to mount, but 142 (CIFS/SMB1), we recommend using a server that supports the SNIA CIFS 146 not have a server that supports the Unix extensions for CIFS (such as Samba 147 2.2.5 or later). To enable the Unix CIFS Extensions in the Samba server, add 152 to your smb.conf file on the server. Note that the following smb.conf settings 153 are also useful (on the Samba server) when the majority of clients are Unix or 160 Note that server ea support is required for supporting xattrs from the Linux 178 enabled on the server and client, subsequent setattr calls (e.g. chmod) can [all …]
|
/linux/tools/testing/selftests/net/mptcp/ |
H A D | userspace_pm.sh | 176 # the MPTCP client and server 207 # Run the server 213 # Run the client, transfer $file and stay connected to the server 221 # Capture client/server attributes from MPTCP connection netlink events 242 …test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serversi… 309 # Capture events on the network namespace running the server 327 # ADD_ADDR from the client to server machine reusing the subflow port 337 # ADD_ADDR6 from the client to server machine reusing the subflow port 346 # ADD_ADDR from the client to server machine using a new port 359 # ADD_ADDR from the server to client machine reusing the subflow port [all …]
|
/linux/Documentation/filesystems/nfs/ |
H A D | client-identifier.rst | 25 Simply put, an NFSv4 server creates a lease for each NFSv4 client. 26 The server collects each client's file open and lock state under 30 While a lease remains valid, the server holding that lease 34 the NFSv4 protocol allows the server to remove the client's open 40 In addition, each NFSv4 server manages a persistent list of client 41 leases. When the server restarts and clients attempt to recover 42 their state, the server uses this list to distinguish amongst 43 clients that held state before the server restarted and clients 45 persist safely across server restarts. 57 server to distinguish successive boot epochs of the same client. [all …]
|
/linux/drivers/block/rnbd/ |
H A D | README | 9 (client and server) that allow for remote access of a block device on 10 the server over RTRS protocol using the RDMA (InfiniBand, RoCE, iWARP) 14 I/O is transferred between client and server by the RTRS transport 26 Server side: 35 on client and on server sides; "path=" is a destination IP address or 39 mapped from the server side. After the session to the server machine is 44 RNBD-Server Module Parameters 50 When a device is mapped from the client, the server generates the path 51 to the block device on the server side by concatenating dev_search_path 64 1. Before mapping first device from a given server, client sends an [all …]
|
/linux/tools/testing/selftests/net/tcp_ao/ |
H A D | unsigned-md5.c | 160 try_accept("[server] AO server (INADDR_ANY): AO client", port++, NULL, 0, in server_fn() 163 try_accept("[server] AO server (INADDR_ANY): MD5 client", port++, NULL, 0, in server_fn() 166 try_accept("[server] AO server (INADDR_ANY): no sign client", port++, NULL, 0, in server_fn() 169 try_accept("[server] AO server (AO_REQUIRED): AO client", port++, NULL, 0, in server_fn() 172 try_accept("[server] AO server (AO_REQUIRED): unsigned client", port++, NULL, 0, in server_fn() 177 try_accept("[server] MD5 server (INADDR_ANY): AO client", port++, &addr_any, 0, in server_fn() 180 try_accept("[server] MD5 server (INADDR_ANY): MD5 client", port++, &addr_any, 0, in server_fn() 182 try_accept("[server] MD5 server (INADDR_ANY): no sign client", port++, &addr_any, in server_fn() 186 try_accept("[server] no sign server: AO client", port++, NULL, 0, in server_fn() 189 try_accept("[server] no sign server: MD5 client", port++, NULL, 0, in server_fn() [all …]
|
/linux/Documentation/arch/powerpc/ |
H A D | hvcs.rst | 2 HVCS IBM "Hypervisor Virtual Console Server" Installation Guide 36 This is the device driver for the IBM Hypervisor Virtual Console Server, 55 major and minor numbers are associated with each vty-server. Directions 79 <*> IBM Hypervisor Virtual Console Server Support 91 <M> IBM Hypervisor Virtual Console Server Support 114 We think that 1024 is currently a decent maximum number of server adapters 206 is a vty-server device configured for that node. 249 Hotplug add and remove of vty-server adapters affects which /dev/hvcs* node 250 is used to connect to each vty-server adapter. In order to determine which 251 vty-server adapter is associated with which /dev/hvcs* node a special sysfs [all …]
|
/linux/tools/testing/selftests/net/ |
H A D | fcnal-test.sh | 9 # 2. client, server, no-server 38 # server / client nomenclature relative to ns-A 882 # client sends MD5, server not configured 888 log_test $? 2 "MD5: Server no config, client uses password" 950 # client sends MD5, server not configured 952 show_hint "Should timeout since server does not have MD5 auth" 956 log_test $? 2 "MD5: VRF: Server no config, client uses password" 968 show_hint "Should timeout since server config differs from client" 1087 log_test $? 0 "MD5: VRF: VRF-bound server, unbound key accepts connection" 1094 log_test $? 0 "MD5: VRF: VRF-bound server, bound key accepts connection" [all …]
|
/linux/tools/usb/usbip/ |
H A D | README | 8 USB/IP protocol allows to pass USB device from server to client over the 9 network. Server is a machine which provides (shares) a USB device. Client is 10 a machine which uses USB device provided by server over the network. 11 The USB device may be either physical device connected to a server or 12 software entity created on a server using USB gadget subsystem. 20 A server side module which provides a USB device driver which can be 24 A server side module which provides a virtual USB Device Controller and allows 62 On a server side there are two entities which can be shared. 66 server:# (Physically attach your USB device.) 68 server:# insmod usbip-core.ko [all …]
|
/linux/Documentation/admin-guide/nfs/ |
H A D | nfs-rdma.rst | 15 and server software. 17 The NFS/RDMA client was first included in Linux 2.6.24. The NFS/RDMA server 46 The first kernel release to contain both the NFS/RDMA client and server was 102 nfs-utils on the server. Furthermore, only the mount.nfs command from 107 The NFS/RDMA client and server are both included in the mainline Linux 125 - Configure the NFS client and server 128 NFS server support enabled. These and other NFS related configuration 134 are turned on. The NFS/RDMA client and server are configured via the hidden 139 and server will not be built 142 in this case the NFS/RDMA client and server will be built as modules [all …]
|