1 /* file-nommu.c: no-MMU version of ramfs 2 * 3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/fs.h> 14 #include <linux/mm.h> 15 #include <linux/pagemap.h> 16 #include <linux/highmem.h> 17 #include <linux/init.h> 18 #include <linux/string.h> 19 #include <linux/backing-dev.h> 20 #include <linux/ramfs.h> 21 #include <linux/pagevec.h> 22 #include <linux/mman.h> 23 #include <linux/sched.h> 24 #include <linux/slab.h> 25 26 #include <asm/uaccess.h> 27 #include "internal.h" 28 29 static int ramfs_nommu_setattr(struct dentry *, struct iattr *); 30 static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, 31 unsigned long addr, 32 unsigned long len, 33 unsigned long pgoff, 34 unsigned long flags); 35 static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); 36 37 static unsigned ramfs_mmap_capabilities(struct file *file) 38 { 39 return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ | 40 NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; 41 } 42 43 const struct file_operations ramfs_file_operations = { 44 .mmap_capabilities = ramfs_mmap_capabilities, 45 .mmap = ramfs_nommu_mmap, 46 .get_unmapped_area = ramfs_nommu_get_unmapped_area, 47 .read_iter = generic_file_read_iter, 48 .write_iter = generic_file_write_iter, 49 .fsync = noop_fsync, 50 .splice_read = generic_file_splice_read, 51 .splice_write = iter_file_splice_write, 52 .llseek = generic_file_llseek, 53 }; 54 55 const struct inode_operations ramfs_file_inode_operations = { 56 .setattr = ramfs_nommu_setattr, 57 .getattr = simple_getattr, 58 }; 59 60 /*****************************************************************************/ 61 /* 62 * add a contiguous set of pages into a ramfs inode when it's truncated from 63 * size 0 on the assumption that it's going to be used for an mmap of shared 64 * memory 65 */ 66 int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) 67 { 68 unsigned long npages, xpages, loop; 69 struct page *pages; 70 unsigned order; 71 void *data; 72 int ret; 73 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 74 75 /* make various checks */ 76 order = get_order(newsize); 77 if (unlikely(order >= MAX_ORDER)) 78 return -EFBIG; 79 80 ret = inode_newsize_ok(inode, newsize); 81 if (ret) 82 return ret; 83 84 i_size_write(inode, newsize); 85 86 /* allocate enough contiguous pages to be able to satisfy the 87 * request */ 88 pages = alloc_pages(gfp, order); 89 if (!pages) 90 return -ENOMEM; 91 92 /* split the high-order page into an array of single pages */ 93 xpages = 1UL << order; 94 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; 95 96 split_page(pages, order); 97 98 /* trim off any pages we don't actually require */ 99 for (loop = npages; loop < xpages; loop++) 100 __free_page(pages + loop); 101 102 /* clear the memory we allocated */ 103 newsize = PAGE_SIZE * npages; 104 data = page_address(pages); 105 memset(data, 0, newsize); 106 107 /* attach all the pages to the inode's address space */ 108 for (loop = 0; loop < npages; loop++) { 109 struct page *page = pages + loop; 110 111 ret = add_to_page_cache_lru(page, inode->i_mapping, loop, 112 gfp); 113 if (ret < 0) 114 goto add_error; 115 116 /* prevent the page from being discarded on memory pressure */ 117 SetPageDirty(page); 118 SetPageUptodate(page); 119 120 unlock_page(page); 121 put_page(page); 122 } 123 124 return 0; 125 126 add_error: 127 while (loop < npages) 128 __free_page(pages + loop++); 129 return ret; 130 } 131 132 /*****************************************************************************/ 133 /* 134 * 135 */ 136 static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) 137 { 138 int ret; 139 140 /* assume a truncate from zero size is going to be for the purposes of 141 * shared mmap */ 142 if (size == 0) { 143 if (unlikely(newsize >> 32)) 144 return -EFBIG; 145 146 return ramfs_nommu_expand_for_mapping(inode, newsize); 147 } 148 149 /* check that a decrease in size doesn't cut off any shared mappings */ 150 if (newsize < size) { 151 ret = nommu_shrink_inode_mappings(inode, size, newsize); 152 if (ret < 0) 153 return ret; 154 } 155 156 truncate_setsize(inode, newsize); 157 return 0; 158 } 159 160 /*****************************************************************************/ 161 /* 162 * handle a change of attributes 163 * - we're specifically interested in a change of size 164 */ 165 static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) 166 { 167 struct inode *inode = d_inode(dentry); 168 unsigned int old_ia_valid = ia->ia_valid; 169 int ret = 0; 170 171 /* POSIX UID/GID verification for setting inode attributes */ 172 ret = setattr_prepare(dentry, ia); 173 if (ret) 174 return ret; 175 176 /* pick out size-changing events */ 177 if (ia->ia_valid & ATTR_SIZE) { 178 loff_t size = inode->i_size; 179 180 if (ia->ia_size != size) { 181 ret = ramfs_nommu_resize(inode, ia->ia_size, size); 182 if (ret < 0 || ia->ia_valid == ATTR_SIZE) 183 goto out; 184 } else { 185 /* we skipped the truncate but must still update 186 * timestamps 187 */ 188 ia->ia_valid |= ATTR_MTIME|ATTR_CTIME; 189 } 190 } 191 192 setattr_copy(inode, ia); 193 out: 194 ia->ia_valid = old_ia_valid; 195 return ret; 196 } 197 198 /*****************************************************************************/ 199 /* 200 * try to determine where a shared mapping can be made 201 * - we require that: 202 * - the pages to be mapped must exist 203 * - the pages be physically contiguous in sequence 204 */ 205 static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, 206 unsigned long addr, unsigned long len, 207 unsigned long pgoff, unsigned long flags) 208 { 209 unsigned long maxpages, lpages, nr, loop, ret; 210 struct inode *inode = file_inode(file); 211 struct page **pages = NULL, **ptr, *page; 212 loff_t isize; 213 214 /* the mapping mustn't extend beyond the EOF */ 215 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 216 isize = i_size_read(inode); 217 218 ret = -ENOSYS; 219 maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; 220 if (pgoff >= maxpages) 221 goto out; 222 223 if (maxpages - pgoff < lpages) 224 goto out; 225 226 /* gang-find the pages */ 227 pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL); 228 if (!pages) 229 goto out_free; 230 231 nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); 232 if (nr != lpages) 233 goto out_free_pages; /* leave if some pages were missing */ 234 235 /* check the pages for physical adjacency */ 236 ptr = pages; 237 page = *ptr++; 238 page++; 239 for (loop = lpages; loop > 1; loop--) 240 if (*ptr++ != page++) 241 goto out_free_pages; 242 243 /* okay - all conditions fulfilled */ 244 ret = (unsigned long) page_address(pages[0]); 245 246 out_free_pages: 247 ptr = pages; 248 for (loop = nr; loop > 0; loop--) 249 put_page(*ptr++); 250 out_free: 251 kfree(pages); 252 out: 253 return ret; 254 } 255 256 /*****************************************************************************/ 257 /* 258 * set up a mapping for shared memory segments 259 */ 260 static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) 261 { 262 if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) 263 return -ENOSYS; 264 265 file_accessed(file); 266 vma->vm_ops = &generic_file_vm_ops; 267 return 0; 268 } 269