1 /* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7 */ 8 9 #include "fuse_i.h" 10 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/kernel.h> 14 15 static const struct file_operations fuse_direct_io_file_operations; 16 17 static int fuse_send_open(struct inode *inode, struct file *file, int isdir, 18 struct fuse_open_out *outargp) 19 { 20 struct fuse_conn *fc = get_fuse_conn(inode); 21 struct fuse_open_in inarg; 22 struct fuse_req *req; 23 int err; 24 25 req = fuse_get_req(fc); 26 if (IS_ERR(req)) 27 return PTR_ERR(req); 28 29 memset(&inarg, 0, sizeof(inarg)); 30 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); 31 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 32 req->in.h.nodeid = get_node_id(inode); 33 req->in.numargs = 1; 34 req->in.args[0].size = sizeof(inarg); 35 req->in.args[0].value = &inarg; 36 req->out.numargs = 1; 37 req->out.args[0].size = sizeof(*outargp); 38 req->out.args[0].value = outargp; 39 request_send(fc, req); 40 err = req->out.h.error; 41 fuse_put_request(fc, req); 42 43 return err; 44 } 45 46 struct fuse_file *fuse_file_alloc(void) 47 { 48 struct fuse_file *ff; 49 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 50 if (ff) { 51 ff->reserved_req = fuse_request_alloc(); 52 if (!ff->reserved_req) { 53 kfree(ff); 54 ff = NULL; 55 } 56 } 57 return ff; 58 } 59 60 void fuse_file_free(struct fuse_file *ff) 61 { 62 fuse_request_free(ff->reserved_req); 63 kfree(ff); 64 } 65 66 void fuse_finish_open(struct inode *inode, struct file *file, 67 struct fuse_file *ff, struct fuse_open_out *outarg) 68 { 69 if (outarg->open_flags & FOPEN_DIRECT_IO) 70 file->f_op = &fuse_direct_io_file_operations; 71 if (!(outarg->open_flags & FOPEN_KEEP_CACHE)) 72 invalidate_inode_pages(inode->i_mapping); 73 ff->fh = outarg->fh; 74 file->private_data = ff; 75 } 76 77 int fuse_open_common(struct inode *inode, struct file *file, int isdir) 78 { 79 struct fuse_open_out outarg; 80 struct fuse_file *ff; 81 int err; 82 83 /* VFS checks this, but only _after_ ->open() */ 84 if (file->f_flags & O_DIRECT) 85 return -EINVAL; 86 87 err = generic_file_open(inode, file); 88 if (err) 89 return err; 90 91 /* If opening the root node, no lookup has been performed on 92 it, so the attributes must be refreshed */ 93 if (get_node_id(inode) == FUSE_ROOT_ID) { 94 err = fuse_do_getattr(inode); 95 if (err) 96 return err; 97 } 98 99 ff = fuse_file_alloc(); 100 if (!ff) 101 return -ENOMEM; 102 103 err = fuse_send_open(inode, file, isdir, &outarg); 104 if (err) 105 fuse_file_free(ff); 106 else { 107 if (isdir) 108 outarg.open_flags &= ~FOPEN_DIRECT_IO; 109 fuse_finish_open(inode, file, ff, &outarg); 110 } 111 112 return err; 113 } 114 115 struct fuse_req *fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, 116 int opcode) 117 { 118 struct fuse_req *req = ff->reserved_req; 119 struct fuse_release_in *inarg = &req->misc.release_in; 120 121 inarg->fh = ff->fh; 122 inarg->flags = flags; 123 req->in.h.opcode = opcode; 124 req->in.h.nodeid = nodeid; 125 req->in.numargs = 1; 126 req->in.args[0].size = sizeof(struct fuse_release_in); 127 req->in.args[0].value = inarg; 128 kfree(ff); 129 130 return req; 131 } 132 133 int fuse_release_common(struct inode *inode, struct file *file, int isdir) 134 { 135 struct fuse_file *ff = file->private_data; 136 if (ff) { 137 struct fuse_conn *fc = get_fuse_conn(inode); 138 struct fuse_req *req; 139 140 req = fuse_release_fill(ff, get_node_id(inode), file->f_flags, 141 isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); 142 143 /* Hold vfsmount and dentry until release is finished */ 144 req->vfsmount = mntget(file->f_vfsmnt); 145 req->dentry = dget(file->f_dentry); 146 request_send_background(fc, req); 147 } 148 149 /* Return value is ignored by VFS */ 150 return 0; 151 } 152 153 static int fuse_open(struct inode *inode, struct file *file) 154 { 155 return fuse_open_common(inode, file, 0); 156 } 157 158 static int fuse_release(struct inode *inode, struct file *file) 159 { 160 return fuse_release_common(inode, file, 0); 161 } 162 163 /* 164 * Scramble the ID space with XTEA, so that the value of the files_struct 165 * pointer is not exposed to userspace. 166 */ 167 static u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 168 { 169 u32 *k = fc->scramble_key; 170 u64 v = (unsigned long) id; 171 u32 v0 = v; 172 u32 v1 = v >> 32; 173 u32 sum = 0; 174 int i; 175 176 for (i = 0; i < 32; i++) { 177 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 178 sum += 0x9E3779B9; 179 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 180 } 181 182 return (u64) v0 + ((u64) v1 << 32); 183 } 184 185 static int fuse_flush(struct file *file, fl_owner_t id) 186 { 187 struct inode *inode = file->f_dentry->d_inode; 188 struct fuse_conn *fc = get_fuse_conn(inode); 189 struct fuse_file *ff = file->private_data; 190 struct fuse_req *req; 191 struct fuse_flush_in inarg; 192 int err; 193 194 if (is_bad_inode(inode)) 195 return -EIO; 196 197 if (fc->no_flush) 198 return 0; 199 200 req = fuse_get_req_nofail(fc, file); 201 memset(&inarg, 0, sizeof(inarg)); 202 inarg.fh = ff->fh; 203 inarg.lock_owner = fuse_lock_owner_id(fc, id); 204 req->in.h.opcode = FUSE_FLUSH; 205 req->in.h.nodeid = get_node_id(inode); 206 req->in.numargs = 1; 207 req->in.args[0].size = sizeof(inarg); 208 req->in.args[0].value = &inarg; 209 req->force = 1; 210 request_send(fc, req); 211 err = req->out.h.error; 212 fuse_put_request(fc, req); 213 if (err == -ENOSYS) { 214 fc->no_flush = 1; 215 err = 0; 216 } 217 return err; 218 } 219 220 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, 221 int isdir) 222 { 223 struct inode *inode = de->d_inode; 224 struct fuse_conn *fc = get_fuse_conn(inode); 225 struct fuse_file *ff = file->private_data; 226 struct fuse_req *req; 227 struct fuse_fsync_in inarg; 228 int err; 229 230 if (is_bad_inode(inode)) 231 return -EIO; 232 233 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 234 return 0; 235 236 req = fuse_get_req(fc); 237 if (IS_ERR(req)) 238 return PTR_ERR(req); 239 240 memset(&inarg, 0, sizeof(inarg)); 241 inarg.fh = ff->fh; 242 inarg.fsync_flags = datasync ? 1 : 0; 243 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; 244 req->in.h.nodeid = get_node_id(inode); 245 req->in.numargs = 1; 246 req->in.args[0].size = sizeof(inarg); 247 req->in.args[0].value = &inarg; 248 request_send(fc, req); 249 err = req->out.h.error; 250 fuse_put_request(fc, req); 251 if (err == -ENOSYS) { 252 if (isdir) 253 fc->no_fsyncdir = 1; 254 else 255 fc->no_fsync = 1; 256 err = 0; 257 } 258 return err; 259 } 260 261 static int fuse_fsync(struct file *file, struct dentry *de, int datasync) 262 { 263 return fuse_fsync_common(file, de, datasync, 0); 264 } 265 266 void fuse_read_fill(struct fuse_req *req, struct file *file, 267 struct inode *inode, loff_t pos, size_t count, int opcode) 268 { 269 struct fuse_file *ff = file->private_data; 270 struct fuse_read_in *inarg = &req->misc.read_in; 271 272 inarg->fh = ff->fh; 273 inarg->offset = pos; 274 inarg->size = count; 275 req->in.h.opcode = opcode; 276 req->in.h.nodeid = get_node_id(inode); 277 req->in.numargs = 1; 278 req->in.args[0].size = sizeof(struct fuse_read_in); 279 req->in.args[0].value = inarg; 280 req->out.argpages = 1; 281 req->out.argvar = 1; 282 req->out.numargs = 1; 283 req->out.args[0].size = count; 284 } 285 286 static size_t fuse_send_read(struct fuse_req *req, struct file *file, 287 struct inode *inode, loff_t pos, size_t count) 288 { 289 struct fuse_conn *fc = get_fuse_conn(inode); 290 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 291 request_send(fc, req); 292 return req->out.args[0].size; 293 } 294 295 static int fuse_readpage(struct file *file, struct page *page) 296 { 297 struct inode *inode = page->mapping->host; 298 struct fuse_conn *fc = get_fuse_conn(inode); 299 struct fuse_req *req; 300 int err; 301 302 err = -EIO; 303 if (is_bad_inode(inode)) 304 goto out; 305 306 req = fuse_get_req(fc); 307 err = PTR_ERR(req); 308 if (IS_ERR(req)) 309 goto out; 310 311 req->out.page_zeroing = 1; 312 req->num_pages = 1; 313 req->pages[0] = page; 314 fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE); 315 err = req->out.h.error; 316 fuse_put_request(fc, req); 317 if (!err) 318 SetPageUptodate(page); 319 fuse_invalidate_attr(inode); /* atime changed */ 320 out: 321 unlock_page(page); 322 return err; 323 } 324 325 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) 326 { 327 int i; 328 329 fuse_invalidate_attr(req->pages[0]->mapping->host); /* atime changed */ 330 331 for (i = 0; i < req->num_pages; i++) { 332 struct page *page = req->pages[i]; 333 if (!req->out.h.error) 334 SetPageUptodate(page); 335 else 336 SetPageError(page); 337 unlock_page(page); 338 } 339 fuse_put_request(fc, req); 340 } 341 342 static void fuse_send_readpages(struct fuse_req *req, struct file *file, 343 struct inode *inode) 344 { 345 struct fuse_conn *fc = get_fuse_conn(inode); 346 loff_t pos = page_offset(req->pages[0]); 347 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 348 req->out.page_zeroing = 1; 349 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 350 if (fc->async_read) { 351 get_file(file); 352 req->file = file; 353 req->end = fuse_readpages_end; 354 request_send_background(fc, req); 355 } else { 356 request_send(fc, req); 357 fuse_readpages_end(fc, req); 358 } 359 } 360 361 struct fuse_readpages_data { 362 struct fuse_req *req; 363 struct file *file; 364 struct inode *inode; 365 }; 366 367 static int fuse_readpages_fill(void *_data, struct page *page) 368 { 369 struct fuse_readpages_data *data = _data; 370 struct fuse_req *req = data->req; 371 struct inode *inode = data->inode; 372 struct fuse_conn *fc = get_fuse_conn(inode); 373 374 if (req->num_pages && 375 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 376 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 377 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 378 fuse_send_readpages(req, data->file, inode); 379 data->req = req = fuse_get_req(fc); 380 if (IS_ERR(req)) { 381 unlock_page(page); 382 return PTR_ERR(req); 383 } 384 } 385 req->pages[req->num_pages] = page; 386 req->num_pages ++; 387 return 0; 388 } 389 390 static int fuse_readpages(struct file *file, struct address_space *mapping, 391 struct list_head *pages, unsigned nr_pages) 392 { 393 struct inode *inode = mapping->host; 394 struct fuse_conn *fc = get_fuse_conn(inode); 395 struct fuse_readpages_data data; 396 int err; 397 398 err = -EIO; 399 if (is_bad_inode(inode)) 400 goto clean_pages_up; 401 402 data.file = file; 403 data.inode = inode; 404 data.req = fuse_get_req(fc); 405 err = PTR_ERR(data.req); 406 if (IS_ERR(data.req)) 407 goto clean_pages_up; 408 409 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 410 if (!err) { 411 if (data.req->num_pages) 412 fuse_send_readpages(data.req, file, inode); 413 else 414 fuse_put_request(fc, data.req); 415 } 416 return err; 417 418 clean_pages_up: 419 put_pages_list(pages); 420 return err; 421 } 422 423 static size_t fuse_send_write(struct fuse_req *req, struct file *file, 424 struct inode *inode, loff_t pos, size_t count) 425 { 426 struct fuse_conn *fc = get_fuse_conn(inode); 427 struct fuse_file *ff = file->private_data; 428 struct fuse_write_in inarg; 429 struct fuse_write_out outarg; 430 431 memset(&inarg, 0, sizeof(struct fuse_write_in)); 432 inarg.fh = ff->fh; 433 inarg.offset = pos; 434 inarg.size = count; 435 req->in.h.opcode = FUSE_WRITE; 436 req->in.h.nodeid = get_node_id(inode); 437 req->in.argpages = 1; 438 req->in.numargs = 2; 439 req->in.args[0].size = sizeof(struct fuse_write_in); 440 req->in.args[0].value = &inarg; 441 req->in.args[1].size = count; 442 req->out.numargs = 1; 443 req->out.args[0].size = sizeof(struct fuse_write_out); 444 req->out.args[0].value = &outarg; 445 request_send(fc, req); 446 return outarg.size; 447 } 448 449 static int fuse_prepare_write(struct file *file, struct page *page, 450 unsigned offset, unsigned to) 451 { 452 /* No op */ 453 return 0; 454 } 455 456 static int fuse_commit_write(struct file *file, struct page *page, 457 unsigned offset, unsigned to) 458 { 459 int err; 460 size_t nres; 461 unsigned count = to - offset; 462 struct inode *inode = page->mapping->host; 463 struct fuse_conn *fc = get_fuse_conn(inode); 464 loff_t pos = page_offset(page) + offset; 465 struct fuse_req *req; 466 467 if (is_bad_inode(inode)) 468 return -EIO; 469 470 req = fuse_get_req(fc); 471 if (IS_ERR(req)) 472 return PTR_ERR(req); 473 474 req->num_pages = 1; 475 req->pages[0] = page; 476 req->page_offset = offset; 477 nres = fuse_send_write(req, file, inode, pos, count); 478 err = req->out.h.error; 479 fuse_put_request(fc, req); 480 if (!err && nres != count) 481 err = -EIO; 482 if (!err) { 483 pos += count; 484 if (pos > i_size_read(inode)) 485 i_size_write(inode, pos); 486 487 if (offset == 0 && to == PAGE_CACHE_SIZE) { 488 clear_page_dirty(page); 489 SetPageUptodate(page); 490 } 491 } 492 fuse_invalidate_attr(inode); 493 return err; 494 } 495 496 static void fuse_release_user_pages(struct fuse_req *req, int write) 497 { 498 unsigned i; 499 500 for (i = 0; i < req->num_pages; i++) { 501 struct page *page = req->pages[i]; 502 if (write) 503 set_page_dirty_lock(page); 504 put_page(page); 505 } 506 } 507 508 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, 509 unsigned nbytes, int write) 510 { 511 unsigned long user_addr = (unsigned long) buf; 512 unsigned offset = user_addr & ~PAGE_MASK; 513 int npages; 514 515 /* This doesn't work with nfsd */ 516 if (!current->mm) 517 return -EPERM; 518 519 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 520 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 521 npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ); 522 down_read(¤t->mm->mmap_sem); 523 npages = get_user_pages(current, current->mm, user_addr, npages, write, 524 0, req->pages, NULL); 525 up_read(¤t->mm->mmap_sem); 526 if (npages < 0) 527 return npages; 528 529 req->num_pages = npages; 530 req->page_offset = offset; 531 return 0; 532 } 533 534 static ssize_t fuse_direct_io(struct file *file, const char __user *buf, 535 size_t count, loff_t *ppos, int write) 536 { 537 struct inode *inode = file->f_dentry->d_inode; 538 struct fuse_conn *fc = get_fuse_conn(inode); 539 size_t nmax = write ? fc->max_write : fc->max_read; 540 loff_t pos = *ppos; 541 ssize_t res = 0; 542 struct fuse_req *req; 543 544 if (is_bad_inode(inode)) 545 return -EIO; 546 547 req = fuse_get_req(fc); 548 if (IS_ERR(req)) 549 return PTR_ERR(req); 550 551 while (count) { 552 size_t nres; 553 size_t nbytes = min(count, nmax); 554 int err = fuse_get_user_pages(req, buf, nbytes, !write); 555 if (err) { 556 res = err; 557 break; 558 } 559 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset; 560 nbytes = min(count, nbytes); 561 if (write) 562 nres = fuse_send_write(req, file, inode, pos, nbytes); 563 else 564 nres = fuse_send_read(req, file, inode, pos, nbytes); 565 fuse_release_user_pages(req, !write); 566 if (req->out.h.error) { 567 if (!res) 568 res = req->out.h.error; 569 break; 570 } else if (nres > nbytes) { 571 res = -EIO; 572 break; 573 } 574 count -= nres; 575 res += nres; 576 pos += nres; 577 buf += nres; 578 if (nres != nbytes) 579 break; 580 if (count) { 581 fuse_put_request(fc, req); 582 req = fuse_get_req(fc); 583 if (IS_ERR(req)) 584 break; 585 } 586 } 587 fuse_put_request(fc, req); 588 if (res > 0) { 589 if (write && pos > i_size_read(inode)) 590 i_size_write(inode, pos); 591 *ppos = pos; 592 } 593 fuse_invalidate_attr(inode); 594 595 return res; 596 } 597 598 static ssize_t fuse_direct_read(struct file *file, char __user *buf, 599 size_t count, loff_t *ppos) 600 { 601 return fuse_direct_io(file, buf, count, ppos, 0); 602 } 603 604 static ssize_t fuse_direct_write(struct file *file, const char __user *buf, 605 size_t count, loff_t *ppos) 606 { 607 struct inode *inode = file->f_dentry->d_inode; 608 ssize_t res; 609 /* Don't allow parallel writes to the same file */ 610 mutex_lock(&inode->i_mutex); 611 res = fuse_direct_io(file, buf, count, ppos, 1); 612 mutex_unlock(&inode->i_mutex); 613 return res; 614 } 615 616 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 617 { 618 if ((vma->vm_flags & VM_SHARED)) { 619 if ((vma->vm_flags & VM_WRITE)) 620 return -ENODEV; 621 else 622 vma->vm_flags &= ~VM_MAYWRITE; 623 } 624 return generic_file_mmap(file, vma); 625 } 626 627 static int fuse_set_page_dirty(struct page *page) 628 { 629 printk("fuse_set_page_dirty: should not happen\n"); 630 dump_stack(); 631 return 0; 632 } 633 634 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, 635 struct file_lock *fl) 636 { 637 switch (ffl->type) { 638 case F_UNLCK: 639 break; 640 641 case F_RDLCK: 642 case F_WRLCK: 643 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 644 ffl->end < ffl->start) 645 return -EIO; 646 647 fl->fl_start = ffl->start; 648 fl->fl_end = ffl->end; 649 fl->fl_pid = ffl->pid; 650 break; 651 652 default: 653 return -EIO; 654 } 655 fl->fl_type = ffl->type; 656 return 0; 657 } 658 659 static void fuse_lk_fill(struct fuse_req *req, struct file *file, 660 const struct file_lock *fl, int opcode, pid_t pid) 661 { 662 struct inode *inode = file->f_dentry->d_inode; 663 struct fuse_conn *fc = get_fuse_conn(inode); 664 struct fuse_file *ff = file->private_data; 665 struct fuse_lk_in *arg = &req->misc.lk_in; 666 667 arg->fh = ff->fh; 668 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); 669 arg->lk.start = fl->fl_start; 670 arg->lk.end = fl->fl_end; 671 arg->lk.type = fl->fl_type; 672 arg->lk.pid = pid; 673 req->in.h.opcode = opcode; 674 req->in.h.nodeid = get_node_id(inode); 675 req->in.numargs = 1; 676 req->in.args[0].size = sizeof(*arg); 677 req->in.args[0].value = arg; 678 } 679 680 static int fuse_getlk(struct file *file, struct file_lock *fl) 681 { 682 struct inode *inode = file->f_dentry->d_inode; 683 struct fuse_conn *fc = get_fuse_conn(inode); 684 struct fuse_req *req; 685 struct fuse_lk_out outarg; 686 int err; 687 688 req = fuse_get_req(fc); 689 if (IS_ERR(req)) 690 return PTR_ERR(req); 691 692 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0); 693 req->out.numargs = 1; 694 req->out.args[0].size = sizeof(outarg); 695 req->out.args[0].value = &outarg; 696 request_send(fc, req); 697 err = req->out.h.error; 698 fuse_put_request(fc, req); 699 if (!err) 700 err = convert_fuse_file_lock(&outarg.lk, fl); 701 702 return err; 703 } 704 705 static int fuse_setlk(struct file *file, struct file_lock *fl) 706 { 707 struct inode *inode = file->f_dentry->d_inode; 708 struct fuse_conn *fc = get_fuse_conn(inode); 709 struct fuse_req *req; 710 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 711 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; 712 int err; 713 714 /* Unlock on close is handled by the flush method */ 715 if (fl->fl_flags & FL_CLOSE) 716 return 0; 717 718 req = fuse_get_req(fc); 719 if (IS_ERR(req)) 720 return PTR_ERR(req); 721 722 fuse_lk_fill(req, file, fl, opcode, pid); 723 request_send(fc, req); 724 err = req->out.h.error; 725 /* locking is restartable */ 726 if (err == -EINTR) 727 err = -ERESTARTSYS; 728 fuse_put_request(fc, req); 729 return err; 730 } 731 732 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 733 { 734 struct inode *inode = file->f_dentry->d_inode; 735 struct fuse_conn *fc = get_fuse_conn(inode); 736 int err; 737 738 if (cmd == F_GETLK) { 739 if (fc->no_lock) { 740 if (!posix_test_lock(file, fl, fl)) 741 fl->fl_type = F_UNLCK; 742 err = 0; 743 } else 744 err = fuse_getlk(file, fl); 745 } else { 746 if (fc->no_lock) 747 err = posix_lock_file_wait(file, fl); 748 else 749 err = fuse_setlk(file, fl); 750 } 751 return err; 752 } 753 754 static const struct file_operations fuse_file_operations = { 755 .llseek = generic_file_llseek, 756 .read = generic_file_read, 757 .write = generic_file_write, 758 .mmap = fuse_file_mmap, 759 .open = fuse_open, 760 .flush = fuse_flush, 761 .release = fuse_release, 762 .fsync = fuse_fsync, 763 .lock = fuse_file_lock, 764 .sendfile = generic_file_sendfile, 765 }; 766 767 static const struct file_operations fuse_direct_io_file_operations = { 768 .llseek = generic_file_llseek, 769 .read = fuse_direct_read, 770 .write = fuse_direct_write, 771 .open = fuse_open, 772 .flush = fuse_flush, 773 .release = fuse_release, 774 .fsync = fuse_fsync, 775 .lock = fuse_file_lock, 776 /* no mmap and sendfile */ 777 }; 778 779 static const struct address_space_operations fuse_file_aops = { 780 .readpage = fuse_readpage, 781 .prepare_write = fuse_prepare_write, 782 .commit_write = fuse_commit_write, 783 .readpages = fuse_readpages, 784 .set_page_dirty = fuse_set_page_dirty, 785 }; 786 787 void fuse_init_file_inode(struct inode *inode) 788 { 789 inode->i_fop = &fuse_file_operations; 790 inode->i_data.a_ops = &fuse_file_aops; 791 } 792