1 /* file-nommu.c: no-MMU version of ramfs 2 * 3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/fs.h> 14 #include <linux/mm.h> 15 #include <linux/pagemap.h> 16 #include <linux/highmem.h> 17 #include <linux/init.h> 18 #include <linux/string.h> 19 #include <linux/backing-dev.h> 20 #include <linux/ramfs.h> 21 #include <linux/pagevec.h> 22 #include <linux/mman.h> 23 #include <linux/sched.h> 24 #include <linux/slab.h> 25 26 #include <asm/uaccess.h> 27 #include "internal.h" 28 29 static int ramfs_nommu_setattr(struct dentry *, struct iattr *); 30 static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, 31 unsigned long addr, 32 unsigned long len, 33 unsigned long pgoff, 34 unsigned long flags); 35 static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); 36 37 static unsigned ramfs_mmap_capabilities(struct file *file) 38 { 39 return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ | 40 NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; 41 } 42 43 const struct file_operations ramfs_file_operations = { 44 .mmap_capabilities = ramfs_mmap_capabilities, 45 .mmap = ramfs_nommu_mmap, 46 .get_unmapped_area = ramfs_nommu_get_unmapped_area, 47 .read_iter = generic_file_read_iter, 48 .write_iter = generic_file_write_iter, 49 .fsync = noop_fsync, 50 .splice_read = generic_file_splice_read, 51 .splice_write = iter_file_splice_write, 52 .llseek = generic_file_llseek, 53 }; 54 55 const struct inode_operations ramfs_file_inode_operations = { 56 .setattr = ramfs_nommu_setattr, 57 .getattr = simple_getattr, 58 }; 59 60 /*****************************************************************************/ 61 /* 62 * add a contiguous set of pages into a ramfs inode when it's truncated from 63 * size 0 on the assumption that it's going to be used for an mmap of shared 64 * memory 65 */ 66 int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) 67 { 68 unsigned long npages, xpages, loop; 69 struct page *pages; 70 unsigned order; 71 void *data; 72 int ret; 73 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 74 75 /* make various checks */ 76 order = get_order(newsize); 77 if (unlikely(order >= MAX_ORDER)) 78 return -EFBIG; 79 80 ret = inode_newsize_ok(inode, newsize); 81 if (ret) 82 return ret; 83 84 i_size_write(inode, newsize); 85 86 /* allocate enough contiguous pages to be able to satisfy the 87 * request */ 88 pages = alloc_pages(gfp, order); 89 if (!pages) 90 return -ENOMEM; 91 92 /* split the high-order page into an array of single pages */ 93 xpages = 1UL << order; 94 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; 95 96 split_page(pages, order); 97 98 /* trim off any pages we don't actually require */ 99 for (loop = npages; loop < xpages; loop++) 100 __free_page(pages + loop); 101 102 /* clear the memory we allocated */ 103 newsize = PAGE_SIZE * npages; 104 data = page_address(pages); 105 memset(data, 0, newsize); 106 107 /* attach all the pages to the inode's address space */ 108 for (loop = 0; loop < npages; loop++) { 109 struct page *page = pages + loop; 110 111 ret = add_to_page_cache_lru(page, inode->i_mapping, loop, 112 gfp); 113 if (ret < 0) 114 goto add_error; 115 116 /* prevent the page from being discarded on memory pressure */ 117 SetPageDirty(page); 118 SetPageUptodate(page); 119 120 unlock_page(page); 121 put_page(page); 122 } 123 124 return 0; 125 126 add_error: 127 while (loop < npages) 128 __free_page(pages + loop++); 129 return ret; 130 } 131 132 /*****************************************************************************/ 133 /* 134 * 135 */ 136 static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) 137 { 138 int ret; 139 140 /* assume a truncate from zero size is going to be for the purposes of 141 * shared mmap */ 142 if (size == 0) { 143 if (unlikely(newsize >> 32)) 144 return -EFBIG; 145 146 return ramfs_nommu_expand_for_mapping(inode, newsize); 147 } 148 149 /* check that a decrease in size doesn't cut off any shared mappings */ 150 if (newsize < size) { 151 ret = nommu_shrink_inode_mappings(inode, size, newsize); 152 if (ret < 0) 153 return ret; 154 } 155 156 truncate_setsize(inode, newsize); 157 return 0; 158 } 159 160 /*****************************************************************************/ 161 /* 162 * handle a change of attributes 163 * - we're specifically interested in a change of size 164 */ 165 static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) 166 { 167 struct inode *inode = d_inode(dentry); 168 unsigned int old_ia_valid = ia->ia_valid; 169 int ret = 0; 170 171 /* POSIX UID/GID verification for setting inode attributes */ 172 ret = inode_change_ok(inode, ia); 173 if (ret) 174 return ret; 175 176 /* pick out size-changing events */ 177 if (ia->ia_valid & ATTR_SIZE) { 178 loff_t size = inode->i_size; 179 180 if (ia->ia_size != size) { 181 ret = ramfs_nommu_resize(inode, ia->ia_size, size); 182 if (ret < 0 || ia->ia_valid == ATTR_SIZE) 183 goto out; 184 } else { 185 /* we skipped the truncate but must still update 186 * timestamps 187 */ 188 ia->ia_valid |= ATTR_MTIME|ATTR_CTIME; 189 } 190 } 191 192 setattr_copy(inode, ia); 193 out: 194 ia->ia_valid = old_ia_valid; 195 return ret; 196 } 197 198 /*****************************************************************************/ 199 /* 200 * try to determine where a shared mapping can be made 201 * - we require that: 202 * - the pages to be mapped must exist 203 * - the pages be physically contiguous in sequence 204 */ 205 static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, 206 unsigned long addr, unsigned long len, 207 unsigned long pgoff, unsigned long flags) 208 { 209 unsigned long maxpages, lpages, nr, loop, ret; 210 struct inode *inode = file_inode(file); 211 struct page **pages = NULL, **ptr, *page; 212 loff_t isize; 213 214 if (!(flags & MAP_SHARED)) 215 return addr; 216 217 /* the mapping mustn't extend beyond the EOF */ 218 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 219 isize = i_size_read(inode); 220 221 ret = -EINVAL; 222 maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; 223 if (pgoff >= maxpages) 224 goto out; 225 226 if (maxpages - pgoff < lpages) 227 goto out; 228 229 /* gang-find the pages */ 230 ret = -ENOMEM; 231 pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL); 232 if (!pages) 233 goto out_free; 234 235 nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); 236 if (nr != lpages) 237 goto out_free_pages; /* leave if some pages were missing */ 238 239 /* check the pages for physical adjacency */ 240 ptr = pages; 241 page = *ptr++; 242 page++; 243 for (loop = lpages; loop > 1; loop--) 244 if (*ptr++ != page++) 245 goto out_free_pages; 246 247 /* okay - all conditions fulfilled */ 248 ret = (unsigned long) page_address(pages[0]); 249 250 out_free_pages: 251 ptr = pages; 252 for (loop = nr; loop > 0; loop--) 253 put_page(*ptr++); 254 out_free: 255 kfree(pages); 256 out: 257 return ret; 258 } 259 260 /*****************************************************************************/ 261 /* 262 * set up a mapping for shared memory segments 263 */ 264 static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) 265 { 266 if (!(vma->vm_flags & VM_SHARED)) 267 return -ENOSYS; 268 269 file_accessed(file); 270 vma->vm_ops = &generic_file_vm_ops; 271 return 0; 272 } 273