xref: /linux/fs/afs/server_list.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS fileserver list management.
3  *
4  * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include "internal.h"
11 
12 void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
13 {
14 	int i;
15 
16 	if (slist && refcount_dec_and_test(&slist->usage)) {
17 		for (i = 0; i < slist->nr_servers; i++)
18 			afs_unuse_server(net, slist->servers[i].server,
19 					 afs_server_trace_put_slist);
20 		kfree_rcu(slist, rcu);
21 	}
22 }
23 
24 /*
25  * Build a server list from a VLDB record.
26  */
27 struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
28 					      struct key *key,
29 					      struct afs_vldb_entry *vldb)
30 {
31 	struct afs_server_list *slist;
32 	struct afs_server *server;
33 	unsigned int type_mask = 1 << volume->type;
34 	bool use_newrepsites = false;
35 	int ret = -ENOMEM, nr_servers = 0, newrep = 0, i, j, usable = 0;
36 
37 	/* Work out if we're going to restrict to NEWREPSITE-marked servers or
38 	 * not.  If at least one site is marked as NEWREPSITE, then it's likely
39 	 * that "vos release" is busy updating RO sites.  We cut over from one
40 	 * to the other when >=50% of the sites have been updated.  Sites that
41 	 * are in the process of being updated are marked DONTUSE.
42 	 */
43 	for (i = 0; i < vldb->nr_servers; i++) {
44 		if (!(vldb->fs_mask[i] & type_mask))
45 			continue;
46 		nr_servers++;
47 		if (vldb->vlsf_flags[i] & AFS_VLSF_DONTUSE)
48 			continue;
49 		usable++;
50 		if (vldb->vlsf_flags[i] & AFS_VLSF_NEWREPSITE)
51 			newrep++;
52 	}
53 
54 	slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL);
55 	if (!slist)
56 		goto error;
57 
58 	if (newrep) {
59 		if (newrep < usable / 2) {
60 			slist->ro_replicating = AFS_RO_REPLICATING_USE_OLD;
61 		} else {
62 			slist->ro_replicating = AFS_RO_REPLICATING_USE_NEW;
63 			use_newrepsites = true;
64 		}
65 	}
66 
67 	refcount_set(&slist->usage, 1);
68 	rwlock_init(&slist->lock);
69 
70 	/* Make sure a records exists for each server in the list. */
71 	for (i = 0; i < vldb->nr_servers; i++) {
72 		unsigned long se_flags = 0;
73 		bool newrepsite = vldb->vlsf_flags[i] & AFS_VLSF_NEWREPSITE;
74 
75 		if (!(vldb->fs_mask[i] & type_mask))
76 			continue;
77 		if (vldb->vlsf_flags[i] & AFS_VLSF_DONTUSE)
78 			__set_bit(AFS_SE_EXCLUDED, &se_flags);
79 		if (newrep && (newrepsite ^ use_newrepsites))
80 			__set_bit(AFS_SE_EXCLUDED, &se_flags);
81 
82 		server = afs_lookup_server(volume->cell, key, &vldb->fs_server[i],
83 					   vldb->addr_version[i]);
84 		if (IS_ERR(server)) {
85 			ret = PTR_ERR(server);
86 			if (ret == -ENOENT ||
87 			    ret == -ENOMEDIUM)
88 				continue;
89 			goto error_2;
90 		}
91 
92 		/* Insertion-sort by UUID */
93 		for (j = 0; j < slist->nr_servers; j++)
94 			if (memcmp(&slist->servers[j].server->uuid,
95 				   &server->uuid,
96 				   sizeof(server->uuid)) >= 0)
97 				break;
98 		if (j < slist->nr_servers) {
99 			if (slist->servers[j].server == server) {
100 				afs_put_server(volume->cell->net, server,
101 					       afs_server_trace_put_slist_isort);
102 				continue;
103 			}
104 
105 			memmove(slist->servers + j + 1,
106 				slist->servers + j,
107 				(slist->nr_servers - j) * sizeof(struct afs_server_entry));
108 		}
109 
110 		slist->servers[j].server = server;
111 		slist->servers[j].volume = volume;
112 		slist->servers[j].flags = se_flags;
113 		slist->servers[j].cb_expires_at = AFS_NO_CB_PROMISE;
114 		slist->nr_servers++;
115 	}
116 
117 	if (slist->nr_servers == 0) {
118 		ret = -EDESTADDRREQ;
119 		goto error_2;
120 	}
121 
122 	return slist;
123 
124 error_2:
125 	afs_put_serverlist(volume->cell->net, slist);
126 error:
127 	return ERR_PTR(ret);
128 }
129 
130 /*
131  * Copy the annotations from an old server list to its potential replacement.
132  */
133 bool afs_annotate_server_list(struct afs_server_list *new,
134 			      struct afs_server_list *old)
135 {
136 	unsigned long mask = 1UL << AFS_SE_EXCLUDED;
137 	int i;
138 
139 	if (old->nr_servers != new->nr_servers ||
140 	    old->ro_replicating != new->ro_replicating)
141 		goto changed;
142 
143 	for (i = 0; i < old->nr_servers; i++) {
144 		if (old->servers[i].server != new->servers[i].server)
145 			goto changed;
146 		if ((old->servers[i].flags & mask) != (new->servers[i].flags & mask))
147 			goto changed;
148 	}
149 	return false;
150 changed:
151 	return true;
152 }
153 
154 /*
155  * Attach a volume to the servers it is going to use.
156  */
157 void afs_attach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *slist)
158 {
159 	struct afs_server_entry *se, *pe;
160 	struct afs_server *server;
161 	struct list_head *p;
162 	unsigned int i;
163 
164 	down_write(&volume->cell->vs_lock);
165 
166 	for (i = 0; i < slist->nr_servers; i++) {
167 		se = &slist->servers[i];
168 		server = se->server;
169 
170 		list_for_each(p, &server->volumes) {
171 			pe = list_entry(p, struct afs_server_entry, slink);
172 			if (volume->vid <= pe->volume->vid)
173 				break;
174 		}
175 		list_add_tail(&se->slink, p);
176 	}
177 
178 	slist->attached = true;
179 	up_write(&volume->cell->vs_lock);
180 }
181 
182 /*
183  * Reattach a volume to the servers it is going to use when server list is
184  * replaced.  We try to switch the attachment points to avoid rewalking the
185  * lists.
186  */
187 void afs_reattach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *new,
188 				    struct afs_server_list *old)
189 {
190 	unsigned int n = 0, o = 0;
191 
192 	down_write(&volume->cell->vs_lock);
193 
194 	while (n < new->nr_servers || o < old->nr_servers) {
195 		struct afs_server_entry *pn = n < new->nr_servers ? &new->servers[n] : NULL;
196 		struct afs_server_entry *po = o < old->nr_servers ? &old->servers[o] : NULL;
197 		struct afs_server_entry *s;
198 		struct list_head *p;
199 		int diff;
200 
201 		if (pn && po && pn->server == po->server) {
202 			pn->cb_expires_at = po->cb_expires_at;
203 			list_replace(&po->slink, &pn->slink);
204 			n++;
205 			o++;
206 			continue;
207 		}
208 
209 		if (pn && po)
210 			diff = memcmp(&pn->server->uuid, &po->server->uuid,
211 				      sizeof(pn->server->uuid));
212 		else
213 			diff = pn ? -1 : 1;
214 
215 		if (diff < 0) {
216 			list_for_each(p, &pn->server->volumes) {
217 				s = list_entry(p, struct afs_server_entry, slink);
218 				if (volume->vid <= s->volume->vid)
219 					break;
220 			}
221 			list_add_tail(&pn->slink, p);
222 			n++;
223 		} else {
224 			list_del(&po->slink);
225 			o++;
226 		}
227 	}
228 
229 	up_write(&volume->cell->vs_lock);
230 }
231 
232 /*
233  * Detach a volume from the servers it has been using.
234  */
235 void afs_detach_volume_from_servers(struct afs_volume *volume, struct afs_server_list *slist)
236 {
237 	unsigned int i;
238 
239 	if (!slist->attached)
240 		return;
241 
242 	down_write(&volume->cell->vs_lock);
243 
244 	for (i = 0; i < slist->nr_servers; i++)
245 		list_del(&slist->servers[i].slink);
246 
247 	slist->attached = false;
248 	up_write(&volume->cell->vs_lock);
249 }
250