Lines Matching full:fc
83 void fuse_set_initialized(struct fuse_conn *fc) in fuse_set_initialized() argument
87 fc->initialized = 1; in fuse_set_initialized()
90 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) in fuse_block_alloc() argument
92 return !fc->initialized || (for_background && fc->blocked); in fuse_block_alloc()
95 static void fuse_drop_waiting(struct fuse_conn *fc) in fuse_drop_waiting() argument
98 * lockess check of fc->connected is okay, because atomic_dec_and_test() in fuse_drop_waiting()
102 if (atomic_dec_and_test(&fc->num_waiting) && in fuse_drop_waiting()
103 !READ_ONCE(fc->connected)) { in fuse_drop_waiting()
105 wake_up_all(&fc->blocked_waitq); in fuse_drop_waiting()
115 struct fuse_conn *fc = fm->fc; in fuse_get_req() local
122 atomic_inc(&fc->num_waiting); in fuse_get_req()
124 if (fuse_block_alloc(fc, for_background)) { in fuse_get_req()
126 if (wait_event_killable_exclusive(fc->blocked_waitq, in fuse_get_req()
127 !fuse_block_alloc(fc, for_background))) in fuse_get_req()
134 if (!fc->connected) in fuse_get_req()
138 if (fc->conn_error) in fuse_get_req()
145 wake_up(&fc->blocked_waitq); in fuse_get_req()
149 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); in fuse_get_req()
164 fsuid = no_idmap ? current_fsuid() : mapped_fsuid(idmap, fc->user_ns); in fuse_get_req()
165 fsgid = no_idmap ? current_fsgid() : mapped_fsgid(idmap, fc->user_ns); in fuse_get_req()
166 req->in.h.uid = from_kuid(fc->user_ns, fsuid); in fuse_get_req()
167 req->in.h.gid = from_kgid(fc->user_ns, fsgid); in fuse_get_req()
178 fuse_drop_waiting(fc); in fuse_get_req()
184 struct fuse_conn *fc = req->fm->fc; in fuse_put_request() local
192 spin_lock(&fc->bg_lock); in fuse_put_request()
193 if (!fc->blocked) in fuse_put_request()
194 wake_up(&fc->blocked_waitq); in fuse_put_request()
195 spin_unlock(&fc->bg_lock); in fuse_put_request()
200 fuse_drop_waiting(fc); in fuse_put_request()
319 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, in fuse_queue_forget() argument
322 struct fuse_iqueue *fiq = &fc->iq; in fuse_queue_forget()
330 static void flush_bg_queue(struct fuse_conn *fc) in flush_bg_queue() argument
332 struct fuse_iqueue *fiq = &fc->iq; in flush_bg_queue()
334 while (fc->active_background < fc->max_background && in flush_bg_queue()
335 !list_empty(&fc->bg_queue)) { in flush_bg_queue()
338 req = list_first_entry(&fc->bg_queue, struct fuse_req, list); in flush_bg_queue()
340 fc->active_background++; in flush_bg_queue()
356 struct fuse_conn *fc = fm->fc; in fuse_request_end() local
357 struct fuse_iqueue *fiq = &fc->iq; in fuse_request_end()
376 spin_lock(&fc->bg_lock); in fuse_request_end()
378 if (fc->num_background == fc->max_background) { in fuse_request_end()
379 fc->blocked = 0; in fuse_request_end()
380 wake_up(&fc->blocked_waitq); in fuse_request_end()
381 } else if (!fc->blocked) { in fuse_request_end()
385 * fc->blocked with waiters with the wake_up() call in fuse_request_end()
388 if (waitqueue_active(&fc->blocked_waitq)) in fuse_request_end()
389 wake_up(&fc->blocked_waitq); in fuse_request_end()
392 fc->num_background--; in fuse_request_end()
393 fc->active_background--; in fuse_request_end()
394 flush_bg_queue(fc); in fuse_request_end()
395 spin_unlock(&fc->bg_lock); in fuse_request_end()
410 struct fuse_iqueue *fiq = &req->fm->fc->iq; in queue_interrupt()
423 struct fuse_conn *fc = req->fm->fc; in request_wait_answer() local
424 struct fuse_iqueue *fiq = &fc->iq; in request_wait_answer()
427 if (!fc->no_interrupt) { in request_wait_answer()
469 struct fuse_iqueue *fiq = &req->fm->fc->iq; in __fuse_request_send()
483 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) in fuse_adjust_compat() argument
485 if (fc->minor < 4 && args->opcode == FUSE_STATFS) in fuse_adjust_compat()
488 if (fc->minor < 9) { in fuse_adjust_compat()
504 if (fc->minor < 12) { in fuse_adjust_compat()
518 struct fuse_conn *fc = req->fm->fc; in fuse_force_creds() local
521 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); in fuse_force_creds()
522 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); in fuse_force_creds()
528 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); in fuse_force_creds()
546 struct fuse_conn *fc = fm->fc; in __fuse_simple_request() local
551 atomic_inc(&fc->num_waiting); in __fuse_simple_request()
566 /* Needs to be done after fuse_get_req() so that fc->minor is valid */ in __fuse_simple_request()
567 fuse_adjust_compat(fc, args); in __fuse_simple_request()
586 struct fuse_conn *fc = fm->fc; in fuse_request_queue_background() local
592 atomic_inc(&fc->num_waiting); in fuse_request_queue_background()
595 spin_lock(&fc->bg_lock); in fuse_request_queue_background()
596 if (likely(fc->connected)) { in fuse_request_queue_background()
597 fc->num_background++; in fuse_request_queue_background()
598 if (fc->num_background == fc->max_background) in fuse_request_queue_background()
599 fc->blocked = 1; in fuse_request_queue_background()
600 list_add_tail(&req->list, &fc->bg_queue); in fuse_request_queue_background()
601 flush_bg_queue(fc); in fuse_request_queue_background()
604 spin_unlock(&fc->bg_lock); in fuse_request_queue_background()
642 struct fuse_iqueue *fiq = &fm->fc->iq; in fuse_simple_notify_reply()
1231 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, in fuse_read_forget() argument
1236 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) in fuse_read_forget()
1255 struct fuse_conn *fc = fud->fc; in fuse_dev_do_read() local
1256 struct fuse_iqueue *fiq = &fc->iq; in fuse_dev_do_read()
1278 fc->max_write)) in fuse_dev_do_read()
1297 err = fc->aborted ? -ECONNABORTED : -ENODEV; in fuse_dev_do_read()
1309 return fuse_read_forget(fc, fiq, cs, nbytes); in fuse_dev_do_read()
1353 err = fc->aborted ? -ECONNABORTED : -ENODEV; in fuse_dev_do_read()
1468 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, in fuse_notify_poll() argument
1482 return fuse_notify_poll_wakeup(fc, &outarg); in fuse_notify_poll()
1489 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, in fuse_notify_inval_inode() argument
1503 down_read(&fc->killsb); in fuse_notify_inval_inode()
1504 err = fuse_reverse_inval_inode(fc, outarg.ino, in fuse_notify_inval_inode()
1506 up_read(&fc->killsb); in fuse_notify_inval_inode()
1514 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, in fuse_notify_inval_entry() argument
1550 down_read(&fc->killsb); in fuse_notify_inval_entry()
1551 err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name, outarg.flags); in fuse_notify_inval_entry()
1552 up_read(&fc->killsb); in fuse_notify_inval_entry()
1562 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size, in fuse_notify_delete() argument
1598 down_read(&fc->killsb); in fuse_notify_delete()
1599 err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name, 0); in fuse_notify_delete()
1600 up_read(&fc->killsb); in fuse_notify_delete()
1610 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, in fuse_notify_store() argument
1638 down_read(&fc->killsb); in fuse_notify_store()
1641 inode = fuse_ilookup(fc, nodeid, NULL); in fuse_notify_store()
1689 up_read(&fc->killsb); in fuse_notify_store()
1721 struct fuse_conn *fc = fm->fc; in fuse_retrieve() local
1730 num = min(outarg->size, fc->max_write); in fuse_retrieve()
1737 num_pages = min(num_pages, fc->max_pages); in fuse_retrieve()
1790 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, in fuse_notify_retrieve() argument
1809 down_read(&fc->killsb); in fuse_notify_retrieve()
1813 inode = fuse_ilookup(fc, nodeid, &fm); in fuse_notify_retrieve()
1818 up_read(&fc->killsb); in fuse_notify_retrieve()
1840 static void fuse_resend(struct fuse_conn *fc) in fuse_resend() argument
1844 struct fuse_iqueue *fiq = &fc->iq; in fuse_resend()
1848 spin_lock(&fc->lock); in fuse_resend()
1849 if (!fc->connected) { in fuse_resend()
1850 spin_unlock(&fc->lock); in fuse_resend()
1854 list_for_each_entry(fud, &fc->devices, entry) { in fuse_resend()
1862 spin_unlock(&fc->lock); in fuse_resend()
1884 static int fuse_notify_resend(struct fuse_conn *fc) in fuse_notify_resend() argument
1886 fuse_resend(fc); in fuse_notify_resend()
1890 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, in fuse_notify() argument
1898 return fuse_notify_poll(fc, size, cs); in fuse_notify()
1901 return fuse_notify_inval_inode(fc, size, cs); in fuse_notify()
1904 return fuse_notify_inval_entry(fc, size, cs); in fuse_notify()
1907 return fuse_notify_store(fc, size, cs); in fuse_notify()
1910 return fuse_notify_retrieve(fc, size, cs); in fuse_notify()
1913 return fuse_notify_delete(fc, size, cs); in fuse_notify()
1916 return fuse_notify_resend(fc); in fuse_notify()
1969 struct fuse_conn *fc = fud->fc; in fuse_dev_do_write() local
1991 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); in fuse_dev_do_write()
2019 fc->no_interrupt = 1; in fuse_dev_do_write()
2181 fiq = &fud->fc->iq; in fuse_dev_poll()
2207 static void end_polls(struct fuse_conn *fc) in end_polls() argument
2211 p = rb_first(&fc->polled_files); in end_polls()
2240 void fuse_abort_conn(struct fuse_conn *fc) in fuse_abort_conn() argument
2242 struct fuse_iqueue *fiq = &fc->iq; in fuse_abort_conn()
2244 spin_lock(&fc->lock); in fuse_abort_conn()
2245 if (fc->connected) { in fuse_abort_conn()
2251 /* Background queuing checks fc->connected under bg_lock */ in fuse_abort_conn()
2252 spin_lock(&fc->bg_lock); in fuse_abort_conn()
2253 fc->connected = 0; in fuse_abort_conn()
2254 spin_unlock(&fc->bg_lock); in fuse_abort_conn()
2256 fuse_set_initialized(fc); in fuse_abort_conn()
2257 list_for_each_entry(fud, &fc->devices, entry) { in fuse_abort_conn()
2278 spin_lock(&fc->bg_lock); in fuse_abort_conn()
2279 fc->blocked = 0; in fuse_abort_conn()
2280 fc->max_background = UINT_MAX; in fuse_abort_conn()
2281 flush_bg_queue(fc); in fuse_abort_conn()
2282 spin_unlock(&fc->bg_lock); in fuse_abort_conn()
2294 end_polls(fc); in fuse_abort_conn()
2295 wake_up_all(&fc->blocked_waitq); in fuse_abort_conn()
2296 spin_unlock(&fc->lock); in fuse_abort_conn()
2300 spin_unlock(&fc->lock); in fuse_abort_conn()
2305 void fuse_wait_aborted(struct fuse_conn *fc) in fuse_wait_aborted() argument
2309 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); in fuse_wait_aborted()
2317 struct fuse_conn *fc = fud->fc; in fuse_dev_release() local
2331 if (atomic_dec_and_test(&fc->dev_count)) { in fuse_dev_release()
2332 WARN_ON(fc->iq.fasync != NULL); in fuse_dev_release()
2333 fuse_abort_conn(fc); in fuse_dev_release()
2349 return fasync_helper(fd, file, on, &fud->fc->iq.fasync); in fuse_dev_fasync()
2352 static int fuse_device_clone(struct fuse_conn *fc, struct file *new) in fuse_device_clone() argument
2359 fud = fuse_dev_alloc_install(fc); in fuse_device_clone()
2364 atomic_inc(&fc->dev_count); in fuse_device_clone()
2393 res = fuse_device_clone(fud->fc, file); in fuse_dev_ioctl_clone()
2416 return fuse_backing_open(fud->fc, &map); in fuse_dev_ioctl_backing_open()
2433 return fuse_backing_close(fud->fc, backing_id); in fuse_dev_ioctl_backing_close()