1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DFS referral cache routines
4 *
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
6 */
7
8 #include <linux/jhash.h>
9 #include <linux/ktime.h>
10 #include <linux/slab.h>
11 #include <linux/proc_fs.h>
12 #include <linux/nls.h>
13 #include <linux/workqueue.h>
14 #include <linux/uuid.h>
15 #include "cifsglob.h"
16 #include "smb2pdu.h"
17 #include "smb2proto.h"
18 #include "cifsproto.h"
19 #include "cifs_debug.h"
20 #include "cifs_unicode.h"
21 #include "smb2glob.h"
22 #include "dns_resolve.h"
23 #include "dfs.h"
24
25 #include "dfs_cache.h"
26
27 #define CACHE_HTABLE_SIZE 32
28 #define CACHE_MAX_ENTRIES 64
29 #define CACHE_MIN_TTL 120 /* 2 minutes */
30 #define CACHE_DEFAULT_TTL 300 /* 5 minutes */
31
32 struct cache_dfs_tgt {
33 char *name;
34 int path_consumed;
35 struct list_head list;
36 };
37
38 struct cache_entry {
39 struct hlist_node hlist;
40 const char *path;
41 int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
42 int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
43 int srvtype; /* DFS_REREFERRAL_V3.ServerType */
44 int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
45 struct timespec64 etime;
46 int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
47 int numtgts;
48 struct list_head tlist;
49 struct cache_dfs_tgt *tgthint;
50 };
51
52 static struct kmem_cache *cache_slab __read_mostly;
53 struct workqueue_struct *dfscache_wq;
54
55 atomic_t dfs_cache_ttl;
56
57 static struct nls_table *cache_cp;
58
59 /*
60 * Number of entries in the cache
61 */
62 static atomic_t cache_count;
63
64 static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
65 static DECLARE_RWSEM(htable_rw_lock);
66
67 /**
68 * dfs_cache_canonical_path - get a canonical DFS path
69 *
70 * @path: DFS path
71 * @cp: codepage
72 * @remap: mapping type
73 *
74 * Return canonical path if success, otherwise error.
75 */
dfs_cache_canonical_path(const char * path,const struct nls_table * cp,int remap)76 char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
77 {
78 char *tmp;
79 int plen = 0;
80 char *npath;
81
82 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
83 return ERR_PTR(-EINVAL);
84
85 if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
86 tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
87 if (!tmp) {
88 cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
89 return ERR_PTR(-EINVAL);
90 }
91
92 npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
93 kfree(tmp);
94
95 if (!npath) {
96 cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
97 return ERR_PTR(-EINVAL);
98 }
99 } else {
100 npath = kstrdup(path, GFP_KERNEL);
101 if (!npath)
102 return ERR_PTR(-ENOMEM);
103 }
104 convert_delimiter(npath, '\\');
105 return npath;
106 }
107
cache_entry_expired(const struct cache_entry * ce)108 static inline bool cache_entry_expired(const struct cache_entry *ce)
109 {
110 struct timespec64 ts;
111
112 ktime_get_coarse_real_ts64(&ts);
113 return timespec64_compare(&ts, &ce->etime) >= 0;
114 }
115
free_tgts(struct cache_entry * ce)116 static inline void free_tgts(struct cache_entry *ce)
117 {
118 struct cache_dfs_tgt *t, *n;
119
120 list_for_each_entry_safe(t, n, &ce->tlist, list) {
121 list_del(&t->list);
122 kfree(t->name);
123 kfree(t);
124 }
125 }
126
flush_cache_ent(struct cache_entry * ce)127 static inline void flush_cache_ent(struct cache_entry *ce)
128 {
129 cifs_dbg(FYI, "%s: %s\n", __func__, ce->path);
130 hlist_del_init(&ce->hlist);
131 kfree(ce->path);
132 free_tgts(ce);
133 atomic_dec(&cache_count);
134 kmem_cache_free(cache_slab, ce);
135 }
136
flush_cache_ents(void)137 static void flush_cache_ents(void)
138 {
139 int i;
140
141 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
142 struct hlist_head *l = &cache_htable[i];
143 struct hlist_node *n;
144 struct cache_entry *ce;
145
146 hlist_for_each_entry_safe(ce, n, l, hlist) {
147 if (!hlist_unhashed(&ce->hlist))
148 flush_cache_ent(ce);
149 }
150 }
151 }
152
153 /*
154 * dfs cache /proc file
155 */
dfscache_proc_show(struct seq_file * m,void * v)156 static int dfscache_proc_show(struct seq_file *m, void *v)
157 {
158 int i;
159 struct cache_entry *ce;
160 struct cache_dfs_tgt *t;
161
162 seq_puts(m, "DFS cache\n---------\n");
163
164 down_read(&htable_rw_lock);
165 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
166 struct hlist_head *l = &cache_htable[i];
167
168 hlist_for_each_entry(ce, l, hlist) {
169 if (hlist_unhashed(&ce->hlist))
170 continue;
171
172 seq_printf(m,
173 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
174 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
175 ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
176 DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
177 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
178
179 list_for_each_entry(t, &ce->tlist, list) {
180 seq_printf(m, " %s%s\n",
181 t->name,
182 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
183 }
184 }
185 }
186 up_read(&htable_rw_lock);
187
188 return 0;
189 }
190
dfscache_proc_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)191 static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
192 size_t count, loff_t *ppos)
193 {
194 char c;
195 int rc;
196
197 rc = get_user(c, buffer);
198 if (rc)
199 return rc;
200
201 if (c != '0')
202 return -EINVAL;
203
204 cifs_dbg(FYI, "clearing dfs cache\n");
205
206 down_write(&htable_rw_lock);
207 flush_cache_ents();
208 up_write(&htable_rw_lock);
209
210 return count;
211 }
212
dfscache_proc_open(struct inode * inode,struct file * file)213 static int dfscache_proc_open(struct inode *inode, struct file *file)
214 {
215 return single_open(file, dfscache_proc_show, NULL);
216 }
217
218 const struct proc_ops dfscache_proc_ops = {
219 .proc_open = dfscache_proc_open,
220 .proc_read = seq_read,
221 .proc_lseek = seq_lseek,
222 .proc_release = single_release,
223 .proc_write = dfscache_proc_write,
224 };
225
226 #ifdef CONFIG_CIFS_DEBUG2
dump_tgts(const struct cache_entry * ce)227 static inline void dump_tgts(const struct cache_entry *ce)
228 {
229 struct cache_dfs_tgt *t;
230
231 cifs_dbg(FYI, "target list:\n");
232 list_for_each_entry(t, &ce->tlist, list) {
233 cifs_dbg(FYI, " %s%s\n", t->name,
234 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
235 }
236 }
237
dump_ce(const struct cache_entry * ce)238 static inline void dump_ce(const struct cache_entry *ce)
239 {
240 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
241 ce->path,
242 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
243 ce->etime.tv_nsec,
244 ce->hdr_flags, ce->ref_flags,
245 DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
246 ce->path_consumed,
247 cache_entry_expired(ce) ? "yes" : "no");
248 dump_tgts(ce);
249 }
250
dump_refs(const struct dfs_info3_param * refs,int numrefs)251 static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
252 {
253 int i;
254
255 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
256 for (i = 0; i < numrefs; i++) {
257 const struct dfs_info3_param *ref = &refs[i];
258
259 cifs_dbg(FYI,
260 "\n"
261 "flags: 0x%x\n"
262 "path_consumed: %d\n"
263 "server_type: 0x%x\n"
264 "ref_flag: 0x%x\n"
265 "path_name: %s\n"
266 "node_name: %s\n"
267 "ttl: %d (%dm)\n",
268 ref->flags, ref->path_consumed, ref->server_type,
269 ref->ref_flag, ref->path_name, ref->node_name,
270 ref->ttl, ref->ttl / 60);
271 }
272 }
273 #else
274 #define dump_tgts(e)
275 #define dump_ce(e)
276 #define dump_refs(r, n)
277 #endif
278
279 /**
280 * dfs_cache_init - Initialize DFS referral cache.
281 *
282 * Return zero if initialized successfully, otherwise non-zero.
283 */
dfs_cache_init(void)284 int dfs_cache_init(void)
285 {
286 int rc;
287 int i;
288
289 dfscache_wq = alloc_workqueue("cifs-dfscache",
290 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM,
291 0);
292 if (!dfscache_wq)
293 return -ENOMEM;
294
295 cache_slab = kmem_cache_create("cifs_dfs_cache",
296 sizeof(struct cache_entry), 0,
297 SLAB_HWCACHE_ALIGN, NULL);
298 if (!cache_slab) {
299 rc = -ENOMEM;
300 goto out_destroy_wq;
301 }
302
303 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
304 INIT_HLIST_HEAD(&cache_htable[i]);
305
306 atomic_set(&cache_count, 0);
307 atomic_set(&dfs_cache_ttl, CACHE_DEFAULT_TTL);
308 cache_cp = load_nls("utf8");
309 if (!cache_cp)
310 cache_cp = load_nls_default();
311
312 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
313 return 0;
314
315 out_destroy_wq:
316 destroy_workqueue(dfscache_wq);
317 return rc;
318 }
319
cache_entry_hash(const void * data,int size,unsigned int * hash)320 static int cache_entry_hash(const void *data, int size, unsigned int *hash)
321 {
322 int i, clen;
323 const unsigned char *s = data;
324 wchar_t c;
325 unsigned int h = 0;
326
327 for (i = 0; i < size; i += clen) {
328 clen = cache_cp->char2uni(&s[i], size - i, &c);
329 if (unlikely(clen < 0)) {
330 cifs_dbg(VFS, "%s: can't convert char\n", __func__);
331 return clen;
332 }
333 c = cifs_toupper(c);
334 h = jhash(&c, sizeof(c), h);
335 }
336 *hash = h % CACHE_HTABLE_SIZE;
337 return 0;
338 }
339
340 /* Return target hint of a DFS cache entry */
get_tgt_name(const struct cache_entry * ce)341 static inline char *get_tgt_name(const struct cache_entry *ce)
342 {
343 struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
344
345 return t ? t->name : ERR_PTR(-ENOENT);
346 }
347
348 /* Return expire time out of a new entry's TTL */
get_expire_time(int ttl)349 static inline struct timespec64 get_expire_time(int ttl)
350 {
351 struct timespec64 ts = {
352 .tv_sec = ttl,
353 .tv_nsec = 0,
354 };
355 struct timespec64 now;
356
357 ktime_get_coarse_real_ts64(&now);
358 return timespec64_add(now, ts);
359 }
360
361 /* Allocate a new DFS target */
alloc_target(const char * name,int path_consumed)362 static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
363 {
364 struct cache_dfs_tgt *t;
365
366 t = kmalloc(sizeof(*t), GFP_ATOMIC);
367 if (!t)
368 return ERR_PTR(-ENOMEM);
369 t->name = kstrdup(name, GFP_ATOMIC);
370 if (!t->name) {
371 kfree(t);
372 return ERR_PTR(-ENOMEM);
373 }
374 t->path_consumed = path_consumed;
375 INIT_LIST_HEAD(&t->list);
376 return t;
377 }
378
379 /*
380 * Copy DFS referral information to a cache entry and conditionally update
381 * target hint.
382 */
copy_ref_data(const struct dfs_info3_param * refs,int numrefs,struct cache_entry * ce,const char * tgthint)383 static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
384 struct cache_entry *ce, const char *tgthint)
385 {
386 struct cache_dfs_tgt *target;
387 int i;
388
389 ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
390 ce->etime = get_expire_time(ce->ttl);
391 ce->srvtype = refs[0].server_type;
392 ce->hdr_flags = refs[0].flags;
393 ce->ref_flags = refs[0].ref_flag;
394 ce->path_consumed = refs[0].path_consumed;
395
396 for (i = 0; i < numrefs; i++) {
397 struct cache_dfs_tgt *t;
398
399 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
400 if (IS_ERR(t)) {
401 free_tgts(ce);
402 return PTR_ERR(t);
403 }
404 if (tgthint && !strcasecmp(t->name, tgthint)) {
405 list_add(&t->list, &ce->tlist);
406 tgthint = NULL;
407 } else {
408 list_add_tail(&t->list, &ce->tlist);
409 }
410 ce->numtgts++;
411 }
412
413 target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
414 list);
415 WRITE_ONCE(ce->tgthint, target);
416
417 return 0;
418 }
419
420 /* Allocate a new cache entry */
alloc_cache_entry(struct dfs_info3_param * refs,int numrefs)421 static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
422 {
423 struct cache_entry *ce;
424 int rc;
425
426 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
427 if (!ce)
428 return ERR_PTR(-ENOMEM);
429
430 ce->path = refs[0].path_name;
431 refs[0].path_name = NULL;
432
433 INIT_HLIST_NODE(&ce->hlist);
434 INIT_LIST_HEAD(&ce->tlist);
435
436 rc = copy_ref_data(refs, numrefs, ce, NULL);
437 if (rc) {
438 kfree(ce->path);
439 kmem_cache_free(cache_slab, ce);
440 ce = ERR_PTR(rc);
441 }
442 return ce;
443 }
444
445 /* Remove all referrals that have a single target or oldest entry */
purge_cache(void)446 static void purge_cache(void)
447 {
448 int i;
449 struct cache_entry *ce;
450 struct cache_entry *oldest = NULL;
451
452 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
453 struct hlist_head *l = &cache_htable[i];
454 struct hlist_node *n;
455
456 hlist_for_each_entry_safe(ce, n, l, hlist) {
457 if (hlist_unhashed(&ce->hlist))
458 continue;
459 if (ce->numtgts == 1)
460 flush_cache_ent(ce);
461 else if (!oldest ||
462 timespec64_compare(&ce->etime,
463 &oldest->etime) < 0)
464 oldest = ce;
465 }
466 }
467
468 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES && oldest)
469 flush_cache_ent(oldest);
470 }
471
472 /* Add a new DFS cache entry */
add_cache_entry_locked(struct dfs_info3_param * refs,int numrefs)473 static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
474 int numrefs)
475 {
476 int rc;
477 struct cache_entry *ce;
478 unsigned int hash;
479 int ttl;
480
481 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
482
483 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
484 cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
485 purge_cache();
486 }
487
488 rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
489 if (rc)
490 return ERR_PTR(rc);
491
492 ce = alloc_cache_entry(refs, numrefs);
493 if (IS_ERR(ce))
494 return ce;
495
496 ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl);
497 atomic_set(&dfs_cache_ttl, ttl);
498
499 hlist_add_head(&ce->hlist, &cache_htable[hash]);
500 dump_ce(ce);
501
502 atomic_inc(&cache_count);
503
504 return ce;
505 }
506
507 /* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
dfs_path_equal(const char * s1,int len1,const char * s2,int len2)508 static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
509 {
510 int i, l1, l2;
511 wchar_t c1, c2;
512
513 if (len1 != len2)
514 return false;
515
516 for (i = 0; i < len1; i += l1) {
517 l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
518 l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
519 if (unlikely(l1 < 0 && l2 < 0)) {
520 if (s1[i] != s2[i])
521 return false;
522 l1 = 1;
523 continue;
524 }
525 if (l1 != l2)
526 return false;
527 if (cifs_toupper(c1) != cifs_toupper(c2))
528 return false;
529 }
530 return true;
531 }
532
__lookup_cache_entry(const char * path,unsigned int hash,int len)533 static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
534 {
535 struct cache_entry *ce;
536
537 hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
538 if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
539 dump_ce(ce);
540 return ce;
541 }
542 }
543 return ERR_PTR(-ENOENT);
544 }
545
546 /*
547 * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
548 *
549 * Use whole path components in the match. Must be called with htable_rw_lock held.
550 *
551 * Return cached entry if successful.
552 * Return ERR_PTR(-ENOENT) if the entry is not found.
553 * Return error ptr otherwise.
554 */
lookup_cache_entry(const char * path)555 static struct cache_entry *lookup_cache_entry(const char *path)
556 {
557 struct cache_entry *ce;
558 int cnt = 0;
559 const char *s = path, *e;
560 char sep = *s;
561 unsigned int hash;
562 int rc;
563
564 while ((s = strchr(s, sep)) && ++cnt < 3)
565 s++;
566
567 if (cnt < 3) {
568 rc = cache_entry_hash(path, strlen(path), &hash);
569 if (rc)
570 return ERR_PTR(rc);
571 return __lookup_cache_entry(path, hash, strlen(path));
572 }
573 /*
574 * Handle paths that have more than two path components and are a complete prefix of the DFS
575 * referral request path (@path).
576 *
577 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
578 */
579 e = path + strlen(path) - 1;
580 while (e > s) {
581 int len;
582
583 /* skip separators */
584 while (e > s && *e == sep)
585 e--;
586 if (e == s)
587 break;
588
589 len = e + 1 - path;
590 rc = cache_entry_hash(path, len, &hash);
591 if (rc)
592 return ERR_PTR(rc);
593 ce = __lookup_cache_entry(path, hash, len);
594 if (!IS_ERR(ce))
595 return ce;
596
597 /* backward until separator */
598 while (e > s && *e != sep)
599 e--;
600 }
601 return ERR_PTR(-ENOENT);
602 }
603
604 /**
605 * dfs_cache_destroy - destroy DFS referral cache
606 */
dfs_cache_destroy(void)607 void dfs_cache_destroy(void)
608 {
609 unload_nls(cache_cp);
610 flush_cache_ents();
611 kmem_cache_destroy(cache_slab);
612 destroy_workqueue(dfscache_wq);
613
614 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
615 }
616
617 /* Update a cache entry with the new referral in @refs */
update_cache_entry_locked(struct cache_entry * ce,const struct dfs_info3_param * refs,int numrefs)618 static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
619 int numrefs)
620 {
621 struct cache_dfs_tgt *target;
622 char *th = NULL;
623 int rc;
624
625 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
626
627 target = READ_ONCE(ce->tgthint);
628 if (target) {
629 th = kstrdup(target->name, GFP_ATOMIC);
630 if (!th)
631 return -ENOMEM;
632 }
633
634 free_tgts(ce);
635 ce->numtgts = 0;
636
637 rc = copy_ref_data(refs, numrefs, ce, th);
638
639 kfree(th);
640
641 return rc;
642 }
643
get_dfs_referral(const unsigned int xid,struct cifs_ses * ses,const char * path,struct dfs_info3_param ** refs,int * numrefs)644 static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
645 struct dfs_info3_param **refs, int *numrefs)
646 {
647 int rc;
648 int i;
649
650 *refs = NULL;
651 *numrefs = 0;
652
653 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
654 return -EOPNOTSUPP;
655 if (unlikely(!cache_cp))
656 return -EINVAL;
657
658 cifs_dbg(FYI, "%s: ipc=%s referral=%s\n", __func__, ses->tcon_ipc->tree_name, path);
659 rc = ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
660 NO_MAP_UNI_RSVD);
661 if (!rc) {
662 struct dfs_info3_param *ref = *refs;
663
664 for (i = 0; i < *numrefs; i++)
665 convert_delimiter(ref[i].path_name, '\\');
666 }
667 return rc;
668 }
669
670 /*
671 * Find, create or update a DFS cache entry.
672 *
673 * If the entry wasn't found, it will create a new one. Or if it was found but
674 * expired, then it will update the entry accordingly.
675 *
676 * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
677 * handle them properly.
678 *
679 * On success, return entry with acquired lock for reading, otherwise error ptr.
680 */
cache_refresh_path(const unsigned int xid,struct cifs_ses * ses,const char * path,bool force_refresh)681 static struct cache_entry *cache_refresh_path(const unsigned int xid,
682 struct cifs_ses *ses,
683 const char *path,
684 bool force_refresh)
685 {
686 struct dfs_info3_param *refs = NULL;
687 struct cache_entry *ce;
688 int numrefs = 0;
689 int rc;
690
691 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
692
693 down_read(&htable_rw_lock);
694
695 ce = lookup_cache_entry(path);
696 if (!IS_ERR(ce)) {
697 if (!force_refresh && !cache_entry_expired(ce))
698 return ce;
699 } else if (PTR_ERR(ce) != -ENOENT) {
700 up_read(&htable_rw_lock);
701 return ce;
702 }
703
704 /*
705 * Unlock shared access as we don't want to hold any locks while getting
706 * a new referral. The @ses used for performing the I/O could be
707 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
708 * in order to failover -- if necessary.
709 */
710 up_read(&htable_rw_lock);
711
712 /*
713 * Either the entry was not found, or it is expired, or it is a forced
714 * refresh.
715 * Request a new DFS referral in order to create or update a cache entry.
716 */
717 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
718 if (rc) {
719 ce = ERR_PTR(rc);
720 goto out;
721 }
722
723 dump_refs(refs, numrefs);
724
725 down_write(&htable_rw_lock);
726 /* Re-check as another task might have it added or refreshed already */
727 ce = lookup_cache_entry(path);
728 if (!IS_ERR(ce)) {
729 if (force_refresh || cache_entry_expired(ce)) {
730 rc = update_cache_entry_locked(ce, refs, numrefs);
731 if (rc)
732 ce = ERR_PTR(rc);
733 }
734 } else if (PTR_ERR(ce) == -ENOENT) {
735 ce = add_cache_entry_locked(refs, numrefs);
736 }
737
738 if (IS_ERR(ce)) {
739 up_write(&htable_rw_lock);
740 goto out;
741 }
742
743 downgrade_write(&htable_rw_lock);
744 out:
745 free_dfs_info_array(refs, numrefs);
746 return ce;
747 }
748
749 /*
750 * Set up a DFS referral from a given cache entry.
751 *
752 * Must be called with htable_rw_lock held.
753 */
setup_referral(const char * path,struct cache_entry * ce,struct dfs_info3_param * ref,const char * target)754 static int setup_referral(const char *path, struct cache_entry *ce,
755 struct dfs_info3_param *ref, const char *target)
756 {
757 int rc;
758
759 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
760
761 memset(ref, 0, sizeof(*ref));
762
763 ref->path_name = kstrdup(path, GFP_ATOMIC);
764 if (!ref->path_name)
765 return -ENOMEM;
766
767 ref->node_name = kstrdup(target, GFP_ATOMIC);
768 if (!ref->node_name) {
769 rc = -ENOMEM;
770 goto err_free_path;
771 }
772
773 ref->path_consumed = ce->path_consumed;
774 ref->ttl = ce->ttl;
775 ref->server_type = ce->srvtype;
776 ref->ref_flag = ce->ref_flags;
777 ref->flags = ce->hdr_flags;
778
779 return 0;
780
781 err_free_path:
782 kfree(ref->path_name);
783 ref->path_name = NULL;
784 return rc;
785 }
786
787 /* Return target list of a DFS cache entry */
get_targets(struct cache_entry * ce,struct dfs_cache_tgt_list * tl)788 static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
789 {
790 int rc;
791 struct list_head *head = &tl->tl_list;
792 struct cache_dfs_tgt *t;
793 struct dfs_cache_tgt_iterator *it, *nit;
794
795 memset(tl, 0, sizeof(*tl));
796 INIT_LIST_HEAD(head);
797
798 list_for_each_entry(t, &ce->tlist, list) {
799 it = kzalloc(sizeof(*it), GFP_ATOMIC);
800 if (!it) {
801 rc = -ENOMEM;
802 goto err_free_it;
803 }
804
805 it->it_name = kstrdup(t->name, GFP_ATOMIC);
806 if (!it->it_name) {
807 kfree(it);
808 rc = -ENOMEM;
809 goto err_free_it;
810 }
811 it->it_path_consumed = t->path_consumed;
812
813 if (READ_ONCE(ce->tgthint) == t)
814 list_add(&it->it_list, head);
815 else
816 list_add_tail(&it->it_list, head);
817 }
818
819 tl->tl_numtgts = ce->numtgts;
820
821 return 0;
822
823 err_free_it:
824 list_for_each_entry_safe(it, nit, head, it_list) {
825 list_del(&it->it_list);
826 kfree(it->it_name);
827 kfree(it);
828 }
829 return rc;
830 }
831
832 /**
833 * dfs_cache_find - find a DFS cache entry
834 *
835 * If it doesn't find the cache entry, then it will get a DFS referral
836 * for @path and create a new entry.
837 *
838 * In case the cache entry exists but expired, it will get a DFS referral
839 * for @path and then update the respective cache entry.
840 *
841 * These parameters are passed down to the get_dfs_refer() call if it
842 * needs to be issued:
843 * @xid: syscall xid
844 * @ses: smb session to issue the request on
845 * @cp: codepage
846 * @remap: path character remapping type
847 * @path: path to lookup in DFS referral cache.
848 *
849 * @ref: when non-NULL, store single DFS referral result in it.
850 * @tgt_list: when non-NULL, store complete DFS target list in it.
851 *
852 * Return zero if the target was found, otherwise non-zero.
853 */
dfs_cache_find(const unsigned int xid,struct cifs_ses * ses,const struct nls_table * cp,int remap,const char * path,struct dfs_info3_param * ref,struct dfs_cache_tgt_list * tgt_list)854 int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
855 int remap, const char *path, struct dfs_info3_param *ref,
856 struct dfs_cache_tgt_list *tgt_list)
857 {
858 int rc;
859 const char *npath;
860 struct cache_entry *ce;
861
862 npath = dfs_cache_canonical_path(path, cp, remap);
863 if (IS_ERR(npath))
864 return PTR_ERR(npath);
865
866 ce = cache_refresh_path(xid, ses, npath, false);
867 if (IS_ERR(ce)) {
868 rc = PTR_ERR(ce);
869 goto out_free_path;
870 }
871
872 if (ref)
873 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
874 else
875 rc = 0;
876 if (!rc && tgt_list)
877 rc = get_targets(ce, tgt_list);
878
879 up_read(&htable_rw_lock);
880
881 out_free_path:
882 kfree(npath);
883 return rc;
884 }
885
886 /**
887 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
888 * the currently connected server.
889 *
890 * NOTE: This function will neither update a cache entry in case it was
891 * expired, nor create a new cache entry if @path hasn't been found. It heavily
892 * relies on an existing cache entry.
893 *
894 * @path: canonical DFS path to lookup in the DFS referral cache.
895 * @ref: when non-NULL, store single DFS referral result in it.
896 * @tgt_list: when non-NULL, store complete DFS target list in it.
897 *
898 * Return 0 if successful.
899 * Return -ENOENT if the entry was not found.
900 * Return non-zero for other errors.
901 */
dfs_cache_noreq_find(const char * path,struct dfs_info3_param * ref,struct dfs_cache_tgt_list * tgt_list)902 int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
903 struct dfs_cache_tgt_list *tgt_list)
904 {
905 int rc;
906 struct cache_entry *ce;
907
908 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
909
910 down_read(&htable_rw_lock);
911
912 ce = lookup_cache_entry(path);
913 if (IS_ERR(ce)) {
914 rc = PTR_ERR(ce);
915 goto out_unlock;
916 }
917
918 if (ref)
919 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
920 else
921 rc = 0;
922 if (!rc && tgt_list)
923 rc = get_targets(ce, tgt_list);
924
925 out_unlock:
926 up_read(&htable_rw_lock);
927 return rc;
928 }
929
930 /**
931 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
932 * without sending any requests to the currently connected server.
933 *
934 * NOTE: This function will neither update a cache entry in case it was
935 * expired, nor create a new cache entry if @path hasn't been found. It heavily
936 * relies on an existing cache entry.
937 *
938 * @path: canonical DFS path to lookup in DFS referral cache.
939 * @it: target iterator which contains the target hint to update the cache
940 * entry with.
941 *
942 * Return zero if the target hint was updated successfully, otherwise non-zero.
943 */
dfs_cache_noreq_update_tgthint(const char * path,const struct dfs_cache_tgt_iterator * it)944 void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
945 {
946 struct cache_dfs_tgt *t;
947 struct cache_entry *ce;
948
949 if (!path || !it)
950 return;
951
952 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
953
954 down_read(&htable_rw_lock);
955
956 ce = lookup_cache_entry(path);
957 if (IS_ERR(ce))
958 goto out_unlock;
959
960 t = READ_ONCE(ce->tgthint);
961
962 if (unlikely(!strcasecmp(it->it_name, t->name)))
963 goto out_unlock;
964
965 list_for_each_entry(t, &ce->tlist, list) {
966 if (!strcasecmp(t->name, it->it_name)) {
967 WRITE_ONCE(ce->tgthint, t);
968 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
969 it->it_name);
970 break;
971 }
972 }
973
974 out_unlock:
975 up_read(&htable_rw_lock);
976 }
977
978 /**
979 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
980 * target iterator (@it).
981 *
982 * @path: canonical DFS path to lookup in DFS referral cache.
983 * @it: DFS target iterator.
984 * @ref: DFS referral pointer to set up the gathered information.
985 *
986 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
987 */
dfs_cache_get_tgt_referral(const char * path,const struct dfs_cache_tgt_iterator * it,struct dfs_info3_param * ref)988 int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
989 struct dfs_info3_param *ref)
990 {
991 int rc;
992 struct cache_entry *ce;
993
994 if (!it || !ref)
995 return -EINVAL;
996
997 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
998
999 down_read(&htable_rw_lock);
1000
1001 ce = lookup_cache_entry(path);
1002 if (IS_ERR(ce)) {
1003 rc = PTR_ERR(ce);
1004 goto out_unlock;
1005 }
1006
1007 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1008
1009 rc = setup_referral(path, ce, ref, it->it_name);
1010
1011 out_unlock:
1012 up_read(&htable_rw_lock);
1013 return rc;
1014 }
1015
1016 /* Extract share from DFS target and return a pointer to prefix path or NULL */
parse_target_share(const char * target,char ** share)1017 static const char *parse_target_share(const char *target, char **share)
1018 {
1019 const char *s, *seps = "/\\";
1020 size_t len;
1021
1022 s = strpbrk(target + 1, seps);
1023 if (!s)
1024 return ERR_PTR(-EINVAL);
1025
1026 len = strcspn(s + 1, seps);
1027 if (!len)
1028 return ERR_PTR(-EINVAL);
1029 s += len;
1030
1031 len = s - target + 1;
1032 *share = kstrndup(target, len, GFP_KERNEL);
1033 if (!*share)
1034 return ERR_PTR(-ENOMEM);
1035
1036 s = target + len;
1037 return s + strspn(s, seps);
1038 }
1039
1040 /**
1041 * dfs_cache_get_tgt_share - parse a DFS target
1042 *
1043 * @path: DFS full path
1044 * @it: DFS target iterator.
1045 * @share: tree name.
1046 * @prefix: prefix path.
1047 *
1048 * Return zero if target was parsed correctly, otherwise non-zero.
1049 */
dfs_cache_get_tgt_share(char * path,const struct dfs_cache_tgt_iterator * it,char ** share,char ** prefix)1050 int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
1051 char **prefix)
1052 {
1053 char sep;
1054 char *target_share;
1055 char *ppath = NULL;
1056 const char *target_ppath, *dfsref_ppath;
1057 size_t target_pplen, dfsref_pplen;
1058 size_t len, c;
1059
1060 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1061 return -EINVAL;
1062
1063 sep = it->it_name[0];
1064 if (sep != '\\' && sep != '/')
1065 return -EINVAL;
1066
1067 target_ppath = parse_target_share(it->it_name, &target_share);
1068 if (IS_ERR(target_ppath))
1069 return PTR_ERR(target_ppath);
1070
1071 /* point to prefix in DFS referral path */
1072 dfsref_ppath = path + it->it_path_consumed;
1073 dfsref_ppath += strspn(dfsref_ppath, "/\\");
1074
1075 target_pplen = strlen(target_ppath);
1076 dfsref_pplen = strlen(dfsref_ppath);
1077
1078 /* merge prefix paths from DFS referral path and target node */
1079 if (target_pplen || dfsref_pplen) {
1080 len = target_pplen + dfsref_pplen + 2;
1081 ppath = kzalloc(len, GFP_KERNEL);
1082 if (!ppath) {
1083 kfree(target_share);
1084 return -ENOMEM;
1085 }
1086 c = strscpy(ppath, target_ppath, len);
1087 if (c && dfsref_pplen)
1088 ppath[c] = sep;
1089 strlcat(ppath, dfsref_ppath, len);
1090 }
1091 *share = target_share;
1092 *prefix = ppath;
1093 return 0;
1094 }
1095
target_share_equal(struct cifs_tcon * tcon,const char * s1)1096 static bool target_share_equal(struct cifs_tcon *tcon, const char *s1)
1097 {
1098 struct TCP_Server_Info *server = tcon->ses->server;
1099 struct sockaddr_storage ss;
1100 const char *host;
1101 const char *s2 = &tcon->tree_name[1];
1102 size_t hostlen;
1103 char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
1104 bool match;
1105 int rc;
1106
1107 if (strcasecmp(s2, s1))
1108 return false;
1109
1110 /*
1111 * Resolve share's hostname and check if server address matches. Otherwise just ignore it
1112 * as we could not have upcall to resolve hostname or failed to convert ip address.
1113 */
1114 extract_unc_hostname(s1, &host, &hostlen);
1115 scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
1116
1117 rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
1118 if (rc < 0) {
1119 cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
1120 __func__, (int)hostlen, host);
1121 return true;
1122 }
1123
1124 cifs_server_lock(server);
1125 match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1126 cifs_server_unlock(server);
1127
1128 return match;
1129 }
1130
is_ses_good(struct cifs_ses * ses)1131 static bool is_ses_good(struct cifs_ses *ses)
1132 {
1133 struct TCP_Server_Info *server = ses->server;
1134 struct cifs_tcon *tcon = ses->tcon_ipc;
1135 bool ret;
1136
1137 spin_lock(&ses->ses_lock);
1138 spin_lock(&ses->chan_lock);
1139 ret = !cifs_chan_needs_reconnect(ses, server) &&
1140 ses->ses_status == SES_GOOD &&
1141 !tcon->need_reconnect;
1142 spin_unlock(&ses->chan_lock);
1143 spin_unlock(&ses->ses_lock);
1144 return ret;
1145 }
1146
get_ses_refpath(struct cifs_ses * ses)1147 static char *get_ses_refpath(struct cifs_ses *ses)
1148 {
1149 struct TCP_Server_Info *server = ses->server;
1150 char *path = ERR_PTR(-ENOENT);
1151
1152 mutex_lock(&server->refpath_lock);
1153 if (server->leaf_fullpath) {
1154 path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
1155 if (!path)
1156 path = ERR_PTR(-ENOMEM);
1157 }
1158 mutex_unlock(&server->refpath_lock);
1159 return path;
1160 }
1161
1162 /* Refresh dfs referral of @ses */
refresh_ses_referral(struct cifs_ses * ses)1163 static void refresh_ses_referral(struct cifs_ses *ses)
1164 {
1165 struct cache_entry *ce;
1166 unsigned int xid;
1167 char *path;
1168 int rc = 0;
1169
1170 xid = get_xid();
1171
1172 path = get_ses_refpath(ses);
1173 if (IS_ERR(path)) {
1174 rc = PTR_ERR(path);
1175 path = NULL;
1176 goto out;
1177 }
1178
1179 ses = CIFS_DFS_ROOT_SES(ses);
1180 if (!is_ses_good(ses)) {
1181 cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
1182 __func__);
1183 goto out;
1184 }
1185
1186 ce = cache_refresh_path(xid, ses, path, false);
1187 if (!IS_ERR(ce))
1188 up_read(&htable_rw_lock);
1189 else
1190 rc = PTR_ERR(ce);
1191
1192 out:
1193 free_xid(xid);
1194 kfree(path);
1195 }
1196
__refresh_tcon_referral(struct cifs_tcon * tcon,const char * path,struct dfs_info3_param * refs,int numrefs,bool force_refresh)1197 static int __refresh_tcon_referral(struct cifs_tcon *tcon,
1198 const char *path,
1199 struct dfs_info3_param *refs,
1200 int numrefs, bool force_refresh)
1201 {
1202 struct cache_entry *ce;
1203 bool reconnect = force_refresh;
1204 int rc = 0;
1205 int i;
1206
1207 if (unlikely(!numrefs))
1208 return 0;
1209
1210 if (force_refresh) {
1211 for (i = 0; i < numrefs; i++) {
1212 /* TODO: include prefix paths in the matching */
1213 if (target_share_equal(tcon, refs[i].node_name)) {
1214 reconnect = false;
1215 break;
1216 }
1217 }
1218 }
1219
1220 down_write(&htable_rw_lock);
1221 ce = lookup_cache_entry(path);
1222 if (!IS_ERR(ce)) {
1223 if (force_refresh || cache_entry_expired(ce))
1224 rc = update_cache_entry_locked(ce, refs, numrefs);
1225 } else if (PTR_ERR(ce) == -ENOENT) {
1226 ce = add_cache_entry_locked(refs, numrefs);
1227 }
1228 up_write(&htable_rw_lock);
1229
1230 if (IS_ERR(ce))
1231 rc = PTR_ERR(ce);
1232 if (reconnect) {
1233 cifs_tcon_dbg(FYI, "%s: mark for reconnect\n", __func__);
1234 cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
1235 }
1236 return rc;
1237 }
1238
refresh_tcon_referral(struct cifs_tcon * tcon,bool force_refresh)1239 static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh)
1240 {
1241 struct dfs_info3_param *refs = NULL;
1242 struct cache_entry *ce;
1243 struct cifs_ses *ses;
1244 unsigned int xid;
1245 bool needs_refresh;
1246 char *path;
1247 int numrefs = 0;
1248 int rc = 0;
1249
1250 xid = get_xid();
1251 ses = tcon->ses;
1252
1253 path = get_ses_refpath(ses);
1254 if (IS_ERR(path)) {
1255 rc = PTR_ERR(path);
1256 path = NULL;
1257 goto out;
1258 }
1259
1260 down_read(&htable_rw_lock);
1261 ce = lookup_cache_entry(path);
1262 needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
1263 if (!needs_refresh) {
1264 up_read(&htable_rw_lock);
1265 goto out;
1266 }
1267 up_read(&htable_rw_lock);
1268
1269 ses = CIFS_DFS_ROOT_SES(ses);
1270 if (!is_ses_good(ses)) {
1271 cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
1272 __func__);
1273 goto out;
1274 }
1275
1276 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
1277 if (!rc) {
1278 rc = __refresh_tcon_referral(tcon, path, refs,
1279 numrefs, force_refresh);
1280 }
1281
1282 out:
1283 free_xid(xid);
1284 kfree(path);
1285 free_dfs_info_array(refs, numrefs);
1286 }
1287
1288 /**
1289 * dfs_cache_remount_fs - remount a DFS share
1290 *
1291 * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
1292 * match any of the new targets, mark it for reconnect.
1293 *
1294 * @cifs_sb: cifs superblock.
1295 *
1296 * Return zero if remounted, otherwise non-zero.
1297 */
dfs_cache_remount_fs(struct cifs_sb_info * cifs_sb)1298 int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1299 {
1300 struct cifs_tcon *tcon;
1301
1302 if (!cifs_sb || !cifs_sb->master_tlink)
1303 return -EINVAL;
1304
1305 tcon = cifs_sb_master_tcon(cifs_sb);
1306
1307 spin_lock(&tcon->tc_lock);
1308 if (!tcon->origin_fullpath) {
1309 spin_unlock(&tcon->tc_lock);
1310 cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
1311 return 0;
1312 }
1313 spin_unlock(&tcon->tc_lock);
1314
1315 /*
1316 * After reconnecting to a different server, unique ids won't match anymore, so we disable
1317 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
1318 */
1319 cifs_autodisable_serverino(cifs_sb);
1320 /*
1321 * Force the use of prefix path to support failover on DFS paths that resolve to targets
1322 * that have different prefix paths.
1323 */
1324 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1325
1326 refresh_tcon_referral(tcon, true);
1327 return 0;
1328 }
1329
1330 /* Refresh all DFS referrals related to DFS tcon */
dfs_cache_refresh(struct work_struct * work)1331 void dfs_cache_refresh(struct work_struct *work)
1332 {
1333 struct cifs_tcon *tcon;
1334 struct cifs_ses *ses;
1335
1336 tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
1337
1338 list_for_each_entry(ses, &tcon->dfs_ses_list, dlist)
1339 refresh_ses_referral(ses);
1340 refresh_tcon_referral(tcon, false);
1341
1342 queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
1343 atomic_read(&dfs_cache_ttl) * HZ);
1344 }
1345