xref: /linux/fs/autofs/waitq.c (revision 9a4e47ef98a3041f6d2869ba2cd3401701776275)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
4  * Copyright 2001-2006 Ian Kent <raven@themaw.net>
5  */
6 
7 #include <linux/sched/signal.h>
8 #include "autofs_i.h"
9 
10 /* We make this a static variable rather than a part of the superblock; it
11  * is better if we don't reassign numbers easily even across filesystems
12  */
13 static autofs_wqt_t autofs_next_wait_queue = 1;
14 
15 void autofs_catatonic_mode(struct autofs_sb_info *sbi)
16 {
17 	struct autofs_wait_queue *wq, *nwq;
18 
19 	mutex_lock(&sbi->wq_mutex);
20 	if (sbi->flags & AUTOFS_SBI_CATATONIC) {
21 		mutex_unlock(&sbi->wq_mutex);
22 		return;
23 	}
24 
25 	pr_debug("entering catatonic mode\n");
26 
27 	sbi->flags |= AUTOFS_SBI_CATATONIC;
28 	wq = sbi->queues;
29 	sbi->queues = NULL;	/* Erase all wait queues */
30 	while (wq) {
31 		nwq = wq->next;
32 		wq->status = -ENOENT; /* Magic is gone - report failure */
33 		kfree(wq->name.name - wq->offset);
34 		wq->name.name = NULL;
35 		wake_up(&wq->queue);
36 		if (!--wq->wait_ctr)
37 			kfree(wq);
38 		wq = nwq;
39 	}
40 	fput(sbi->pipe);	/* Close the pipe */
41 	sbi->pipe = NULL;
42 	sbi->pipefd = -1;
43 	mutex_unlock(&sbi->wq_mutex);
44 }
45 
46 static int autofs_write(struct autofs_sb_info *sbi,
47 			struct file *file, const void *addr, int bytes)
48 {
49 	unsigned long sigpipe, flags;
50 	const char *data = (const char *)addr;
51 	ssize_t wr = 0;
52 
53 	sigpipe = sigismember(&current->pending.signal, SIGPIPE);
54 
55 	mutex_lock(&sbi->pipe_mutex);
56 	while (bytes) {
57 		wr = __kernel_write(file, data, bytes, NULL);
58 		if (wr <= 0)
59 			break;
60 		data += wr;
61 		bytes -= wr;
62 	}
63 	mutex_unlock(&sbi->pipe_mutex);
64 
65 	/* Keep the currently executing process from receiving a
66 	 * SIGPIPE unless it was already supposed to get one
67 	 */
68 	if (wr == -EPIPE && !sigpipe) {
69 		spin_lock_irqsave(&current->sighand->siglock, flags);
70 		sigdelset(&current->pending.signal, SIGPIPE);
71 		recalc_sigpending();
72 		spin_unlock_irqrestore(&current->sighand->siglock, flags);
73 	}
74 
75 	/* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
76 	return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
77 }
78 
79 static void autofs_notify_daemon(struct autofs_sb_info *sbi,
80 				 struct autofs_wait_queue *wq,
81 				 int type)
82 {
83 	union {
84 		struct autofs_packet_hdr hdr;
85 		union autofs_packet_union v4_pkt;
86 		union autofs_v5_packet_union v5_pkt;
87 	} pkt;
88 	struct file *pipe = NULL;
89 	size_t pktsz;
90 	int ret;
91 
92 	pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
93 		 (unsigned long) wq->wait_queue_token,
94 		 wq->name.len, wq->name.name, type);
95 
96 	memset(&pkt, 0, sizeof(pkt)); /* For security reasons */
97 
98 	pkt.hdr.proto_version = sbi->version;
99 	pkt.hdr.type = type;
100 
101 	switch (type) {
102 	/* Kernel protocol v4 missing and expire packets */
103 	case autofs_ptype_missing:
104 	{
105 		struct autofs_packet_missing *mp = &pkt.v4_pkt.missing;
106 
107 		pktsz = sizeof(*mp);
108 
109 		mp->wait_queue_token = wq->wait_queue_token;
110 		mp->len = wq->name.len;
111 		memcpy(mp->name, wq->name.name, wq->name.len);
112 		mp->name[wq->name.len] = '\0';
113 		break;
114 	}
115 	case autofs_ptype_expire_multi:
116 	{
117 		struct autofs_packet_expire_multi *ep =
118 					&pkt.v4_pkt.expire_multi;
119 
120 		pktsz = sizeof(*ep);
121 
122 		ep->wait_queue_token = wq->wait_queue_token;
123 		ep->len = wq->name.len;
124 		memcpy(ep->name, wq->name.name, wq->name.len);
125 		ep->name[wq->name.len] = '\0';
126 		break;
127 	}
128 	/*
129 	 * Kernel protocol v5 packet for handling indirect and direct
130 	 * mount missing and expire requests
131 	 */
132 	case autofs_ptype_missing_indirect:
133 	case autofs_ptype_expire_indirect:
134 	case autofs_ptype_missing_direct:
135 	case autofs_ptype_expire_direct:
136 	{
137 		struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet;
138 		struct user_namespace *user_ns = sbi->pipe->f_cred->user_ns;
139 
140 		pktsz = sizeof(*packet);
141 
142 		packet->wait_queue_token = wq->wait_queue_token;
143 		packet->len = wq->name.len;
144 		memcpy(packet->name, wq->name.name, wq->name.len);
145 		packet->name[wq->name.len] = '\0';
146 		packet->dev = wq->dev;
147 		packet->ino = wq->ino;
148 		packet->uid = from_kuid_munged(user_ns, wq->uid);
149 		packet->gid = from_kgid_munged(user_ns, wq->gid);
150 		packet->pid = wq->pid;
151 		packet->tgid = wq->tgid;
152 		break;
153 	}
154 	default:
155 		pr_warn("bad type %d!\n", type);
156 		mutex_unlock(&sbi->wq_mutex);
157 		return;
158 	}
159 
160 	pipe = get_file(sbi->pipe);
161 
162 	mutex_unlock(&sbi->wq_mutex);
163 
164 	switch (ret = autofs_write(sbi, pipe, &pkt, pktsz)) {
165 	case 0:
166 		break;
167 	case -ENOMEM:
168 	case -ERESTARTSYS:
169 		/* Just fail this one */
170 		autofs_wait_release(sbi, wq->wait_queue_token, ret);
171 		break;
172 	default:
173 		autofs_catatonic_mode(sbi);
174 		break;
175 	}
176 	fput(pipe);
177 }
178 
179 static struct autofs_wait_queue *
180 autofs_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr)
181 {
182 	struct autofs_wait_queue *wq;
183 
184 	for (wq = sbi->queues; wq; wq = wq->next) {
185 		if (wq->name.hash == qstr->hash &&
186 		    wq->name.len == qstr->len &&
187 		    wq->name.name &&
188 		    !memcmp(wq->name.name, qstr->name, qstr->len))
189 			break;
190 	}
191 	return wq;
192 }
193 
194 /*
195  * Check if we have a valid request.
196  * Returns
197  * 1 if the request should continue.
198  *   In this case we can return an autofs_wait_queue entry if one is
199  *   found or NULL to idicate a new wait needs to be created.
200  * 0 or a negative errno if the request shouldn't continue.
201  */
202 static int validate_request(struct autofs_wait_queue **wait,
203 			    struct autofs_sb_info *sbi,
204 			    const struct qstr *qstr,
205 			    const struct path *path, enum autofs_notify notify)
206 {
207 	struct dentry *dentry = path->dentry;
208 	struct autofs_wait_queue *wq;
209 	struct autofs_info *ino;
210 
211 	if (sbi->flags & AUTOFS_SBI_CATATONIC)
212 		return -ENOENT;
213 
214 	/* Wait in progress, continue; */
215 	wq = autofs_find_wait(sbi, qstr);
216 	if (wq) {
217 		*wait = wq;
218 		return 1;
219 	}
220 
221 	*wait = NULL;
222 
223 	/* If we don't yet have any info this is a new request */
224 	ino = autofs_dentry_ino(dentry);
225 	if (!ino)
226 		return 1;
227 
228 	/*
229 	 * If we've been asked to wait on an existing expire (NFY_NONE)
230 	 * but there is no wait in the queue ...
231 	 */
232 	if (notify == NFY_NONE) {
233 		/*
234 		 * Either we've betean the pending expire to post it's
235 		 * wait or it finished while we waited on the mutex.
236 		 * So we need to wait till either, the wait appears
237 		 * or the expire finishes.
238 		 */
239 
240 		while (ino->flags & AUTOFS_INF_EXPIRING) {
241 			mutex_unlock(&sbi->wq_mutex);
242 			schedule_timeout_interruptible(HZ/10);
243 			if (mutex_lock_interruptible(&sbi->wq_mutex))
244 				return -EINTR;
245 
246 			if (sbi->flags & AUTOFS_SBI_CATATONIC)
247 				return -ENOENT;
248 
249 			wq = autofs_find_wait(sbi, qstr);
250 			if (wq) {
251 				*wait = wq;
252 				return 1;
253 			}
254 		}
255 
256 		/*
257 		 * Not ideal but the status has already gone. Of the two
258 		 * cases where we wait on NFY_NONE neither depend on the
259 		 * return status of the wait.
260 		 */
261 		return 0;
262 	}
263 
264 	/*
265 	 * If we've been asked to trigger a mount and the request
266 	 * completed while we waited on the mutex ...
267 	 */
268 	if (notify == NFY_MOUNT) {
269 		struct dentry *new = NULL;
270 		struct path this;
271 		int valid = 1;
272 
273 		/*
274 		 * If the dentry was successfully mounted while we slept
275 		 * on the wait queue mutex we can return success. If it
276 		 * isn't mounted (doesn't have submounts for the case of
277 		 * a multi-mount with no mount at it's base) we can
278 		 * continue on and create a new request.
279 		 */
280 		if (!IS_ROOT(dentry)) {
281 			if (d_unhashed(dentry) &&
282 			    d_really_is_positive(dentry)) {
283 				struct dentry *parent = dentry->d_parent;
284 
285 				new = d_lookup(parent, &dentry->d_name);
286 				if (new)
287 					dentry = new;
288 			}
289 		}
290 		this.mnt = path->mnt;
291 		this.dentry = dentry;
292 		if (path_has_submounts(&this))
293 			valid = 0;
294 
295 		if (new)
296 			dput(new);
297 		return valid;
298 	}
299 
300 	return 1;
301 }
302 
303 int autofs_wait(struct autofs_sb_info *sbi,
304 		 const struct path *path, enum autofs_notify notify)
305 {
306 	struct dentry *dentry = path->dentry;
307 	struct autofs_wait_queue *wq;
308 	struct qstr qstr;
309 	char *name;
310 	int status, ret, type;
311 	unsigned int offset = 0;
312 	pid_t pid;
313 	pid_t tgid;
314 
315 	/* In catatonic mode, we don't wait for nobody */
316 	if (sbi->flags & AUTOFS_SBI_CATATONIC)
317 		return -ENOENT;
318 
319 	/*
320 	 * Try translating pids to the namespace of the daemon.
321 	 *
322 	 * Zero means failure: we are in an unrelated pid namespace.
323 	 */
324 	pid = task_pid_nr_ns(current, ns_of_pid(sbi->oz_pgrp));
325 	tgid = task_tgid_nr_ns(current, ns_of_pid(sbi->oz_pgrp));
326 	if (pid == 0 || tgid == 0)
327 		return -ENOENT;
328 
329 	if (d_really_is_negative(dentry)) {
330 		/*
331 		 * A wait for a negative dentry is invalid for certain
332 		 * cases. A direct or offset mount "always" has its mount
333 		 * point directory created and so the request dentry must
334 		 * be positive or the map key doesn't exist. The situation
335 		 * is very similar for indirect mounts except only dentrys
336 		 * in the root of the autofs file system may be negative.
337 		 */
338 		if (autofs_type_trigger(sbi->type))
339 			return -ENOENT;
340 		else if (!IS_ROOT(dentry->d_parent))
341 			return -ENOENT;
342 	}
343 
344 	name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
345 	if (!name)
346 		return -ENOMEM;
347 
348 	/* If this is a direct mount request create a dummy name */
349 	if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type)) {
350 		qstr.name = name;
351 		qstr.len = sprintf(name, "%p", dentry);
352 	} else {
353 		char *p = dentry_path_raw(dentry, name, NAME_MAX);
354 		if (IS_ERR(p)) {
355 			kfree(name);
356 			return -ENOENT;
357 		}
358 		qstr.name = ++p; // skip the leading slash
359 		qstr.len = strlen(p);
360 		offset = p - name;
361 	}
362 	qstr.hash = full_name_hash(dentry, qstr.name, qstr.len);
363 
364 	if (mutex_lock_interruptible(&sbi->wq_mutex)) {
365 		kfree(name);
366 		return -EINTR;
367 	}
368 
369 	ret = validate_request(&wq, sbi, &qstr, path, notify);
370 	if (ret <= 0) {
371 		if (ret != -EINTR)
372 			mutex_unlock(&sbi->wq_mutex);
373 		kfree(name);
374 		return ret;
375 	}
376 
377 	if (!wq) {
378 		/* Create a new wait queue */
379 		wq = kmalloc(sizeof(struct autofs_wait_queue), GFP_KERNEL);
380 		if (!wq) {
381 			kfree(name);
382 			mutex_unlock(&sbi->wq_mutex);
383 			return -ENOMEM;
384 		}
385 
386 		wq->wait_queue_token = autofs_next_wait_queue;
387 		if (++autofs_next_wait_queue == 0)
388 			autofs_next_wait_queue = 1;
389 		wq->next = sbi->queues;
390 		sbi->queues = wq;
391 		init_waitqueue_head(&wq->queue);
392 		memcpy(&wq->name, &qstr, sizeof(struct qstr));
393 		wq->offset = offset;
394 		wq->dev = autofs_get_dev(sbi);
395 		wq->ino = autofs_get_ino(sbi);
396 		wq->uid = current_uid();
397 		wq->gid = current_gid();
398 		wq->pid = pid;
399 		wq->tgid = tgid;
400 		wq->status = -EINTR; /* Status return if interrupted */
401 		wq->wait_ctr = 2;
402 
403 		if (sbi->version < 5) {
404 			if (notify == NFY_MOUNT)
405 				type = autofs_ptype_missing;
406 			else
407 				type = autofs_ptype_expire_multi;
408 		} else {
409 			if (notify == NFY_MOUNT)
410 				type = autofs_type_trigger(sbi->type) ?
411 					autofs_ptype_missing_direct :
412 					 autofs_ptype_missing_indirect;
413 			else
414 				type = autofs_type_trigger(sbi->type) ?
415 					autofs_ptype_expire_direct :
416 					autofs_ptype_expire_indirect;
417 		}
418 
419 		pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n",
420 			 (unsigned long) wq->wait_queue_token, wq->name.len,
421 			 wq->name.name, notify);
422 
423 		/*
424 		 * autofs_notify_daemon() may block; it will unlock ->wq_mutex
425 		 */
426 		autofs_notify_daemon(sbi, wq, type);
427 	} else {
428 		wq->wait_ctr++;
429 		pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n",
430 			 (unsigned long) wq->wait_queue_token, wq->name.len,
431 			 wq->name.name, notify);
432 		mutex_unlock(&sbi->wq_mutex);
433 		kfree(name);
434 	}
435 
436 	/*
437 	 * wq->name.name is NULL iff the lock is already released
438 	 * or the mount has been made catatonic.
439 	 */
440 	wait_event_killable(wq->queue, wq->name.name == NULL);
441 	status = wq->status;
442 
443 	/*
444 	 * For direct and offset mounts we need to track the requester's
445 	 * uid and gid in the dentry info struct. This is so it can be
446 	 * supplied, on request, by the misc device ioctl interface.
447 	 * This is needed during daemon resatart when reconnecting
448 	 * to existing, active, autofs mounts. The uid and gid (and
449 	 * related string values) may be used for macro substitution
450 	 * in autofs mount maps.
451 	 */
452 	if (!status) {
453 		struct autofs_info *ino;
454 		struct dentry *de = NULL;
455 
456 		/* direct mount or browsable map */
457 		ino = autofs_dentry_ino(dentry);
458 		if (!ino) {
459 			/* If not lookup actual dentry used */
460 			de = d_lookup(dentry->d_parent, &dentry->d_name);
461 			if (de)
462 				ino = autofs_dentry_ino(de);
463 		}
464 
465 		/* Set mount requester */
466 		if (ino) {
467 			spin_lock(&sbi->fs_lock);
468 			ino->uid = wq->uid;
469 			ino->gid = wq->gid;
470 			spin_unlock(&sbi->fs_lock);
471 		}
472 
473 		if (de)
474 			dput(de);
475 	}
476 
477 	/* Are we the last process to need status? */
478 	mutex_lock(&sbi->wq_mutex);
479 	if (!--wq->wait_ctr)
480 		kfree(wq);
481 	mutex_unlock(&sbi->wq_mutex);
482 
483 	return status;
484 }
485 
486 
487 int autofs_wait_release(struct autofs_sb_info *sbi,
488 			autofs_wqt_t wait_queue_token, int status)
489 {
490 	struct autofs_wait_queue *wq, **wql;
491 
492 	mutex_lock(&sbi->wq_mutex);
493 	for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) {
494 		if (wq->wait_queue_token == wait_queue_token)
495 			break;
496 	}
497 
498 	if (!wq) {
499 		mutex_unlock(&sbi->wq_mutex);
500 		return -EINVAL;
501 	}
502 
503 	*wql = wq->next;	/* Unlink from chain */
504 	kfree(wq->name.name - wq->offset);
505 	wq->name.name = NULL;	/* Do not wait on this queue */
506 	wq->status = status;
507 	wake_up(&wq->queue);
508 	if (!--wq->wait_ctr)
509 		kfree(wq);
510 	mutex_unlock(&sbi->wq_mutex);
511 
512 	return 0;
513 }
514