1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/mm.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/sched/signal.h> 38 #include <linux/sched/mm.h> 39 #include <linux/export.h> 40 #include <linux/hugetlb.h> 41 #include <linux/slab.h> 42 #include <rdma/ib_umem_odp.h> 43 44 #include "uverbs.h" 45 46 47 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) 48 { 49 struct scatterlist *sg; 50 struct page *page; 51 int i; 52 53 if (umem->nmap > 0) 54 ib_dma_unmap_sg(dev, umem->sg_head.sgl, 55 umem->npages, 56 DMA_BIDIRECTIONAL); 57 58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { 59 60 page = sg_page(sg); 61 if (!PageDirty(page) && umem->writable && dirty) 62 set_page_dirty_lock(page); 63 put_page(page); 64 } 65 66 sg_free_table(&umem->sg_head); 67 } 68 69 /** 70 * ib_umem_get - Pin and DMA map userspace memory. 71 * 72 * If access flags indicate ODP memory, avoid pinning. Instead, stores 73 * the mm for future page fault handling in conjunction with MMU notifiers. 74 * 75 * @context: userspace context to pin memory for 76 * @addr: userspace virtual address to start at 77 * @size: length of region to pin 78 * @access: IB_ACCESS_xxx flags for memory being pinned 79 * @dmasync: flush in-flight DMA when the memory region is written 80 */ 81 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, 82 size_t size, int access, int dmasync) 83 { 84 struct ib_umem *umem; 85 struct page **page_list; 86 struct vm_area_struct **vma_list; 87 unsigned long lock_limit; 88 unsigned long cur_base; 89 unsigned long npages; 90 int ret; 91 int i; 92 unsigned long dma_attrs = 0; 93 struct scatterlist *sg, *sg_list_start; 94 unsigned int gup_flags = FOLL_WRITE; 95 96 if (dmasync) 97 dma_attrs |= DMA_ATTR_WRITE_BARRIER; 98 99 /* 100 * If the combination of the addr and size requested for this memory 101 * region causes an integer overflow, return error. 102 */ 103 if (((addr + size) < addr) || 104 PAGE_ALIGN(addr + size) < (addr + size)) 105 return ERR_PTR(-EINVAL); 106 107 if (!can_do_mlock()) 108 return ERR_PTR(-EPERM); 109 110 umem = kzalloc(sizeof *umem, GFP_KERNEL); 111 if (!umem) 112 return ERR_PTR(-ENOMEM); 113 114 umem->context = context; 115 umem->length = size; 116 umem->address = addr; 117 umem->page_shift = PAGE_SHIFT; 118 umem->writable = ib_access_writable(access); 119 120 if (access & IB_ACCESS_ON_DEMAND) { 121 ret = ib_umem_odp_get(context, umem, access); 122 if (ret) 123 goto umem_kfree; 124 return umem; 125 } 126 127 umem->odp_data = NULL; 128 129 /* We assume the memory is from hugetlb until proved otherwise */ 130 umem->hugetlb = 1; 131 132 page_list = (struct page **) __get_free_page(GFP_KERNEL); 133 if (!page_list) { 134 ret = -ENOMEM; 135 goto umem_kfree; 136 } 137 138 /* 139 * if we can't alloc the vma_list, it's not so bad; 140 * just assume the memory is not hugetlb memory 141 */ 142 vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL); 143 if (!vma_list) 144 umem->hugetlb = 0; 145 146 npages = ib_umem_num_pages(umem); 147 148 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 149 150 down_write(¤t->mm->mmap_sem); 151 current->mm->pinned_vm += npages; 152 if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) { 153 up_write(¤t->mm->mmap_sem); 154 ret = -ENOMEM; 155 goto vma; 156 } 157 up_write(¤t->mm->mmap_sem); 158 159 cur_base = addr & PAGE_MASK; 160 161 if (npages == 0 || npages > UINT_MAX) { 162 ret = -EINVAL; 163 goto vma; 164 } 165 166 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); 167 if (ret) 168 goto vma; 169 170 if (!umem->writable) 171 gup_flags |= FOLL_FORCE; 172 173 sg_list_start = umem->sg_head.sgl; 174 175 down_read(¤t->mm->mmap_sem); 176 while (npages) { 177 ret = get_user_pages_longterm(cur_base, 178 min_t(unsigned long, npages, 179 PAGE_SIZE / sizeof (struct page *)), 180 gup_flags, page_list, vma_list); 181 if (ret < 0) { 182 up_read(¤t->mm->mmap_sem); 183 goto umem_release; 184 } 185 186 umem->npages += ret; 187 cur_base += ret * PAGE_SIZE; 188 npages -= ret; 189 190 for_each_sg(sg_list_start, sg, ret, i) { 191 if (vma_list && !is_vm_hugetlb_page(vma_list[i])) 192 umem->hugetlb = 0; 193 194 sg_set_page(sg, page_list[i], PAGE_SIZE, 0); 195 } 196 197 /* preparing for next loop */ 198 sg_list_start = sg; 199 } 200 up_read(¤t->mm->mmap_sem); 201 202 umem->nmap = ib_dma_map_sg_attrs(context->device, 203 umem->sg_head.sgl, 204 umem->npages, 205 DMA_BIDIRECTIONAL, 206 dma_attrs); 207 208 if (!umem->nmap) { 209 ret = -ENOMEM; 210 goto umem_release; 211 } 212 213 ret = 0; 214 goto out; 215 216 umem_release: 217 __ib_umem_release(context->device, umem, 0); 218 vma: 219 down_write(¤t->mm->mmap_sem); 220 current->mm->pinned_vm -= ib_umem_num_pages(umem); 221 up_write(¤t->mm->mmap_sem); 222 out: 223 if (vma_list) 224 free_page((unsigned long) vma_list); 225 free_page((unsigned long) page_list); 226 umem_kfree: 227 if (ret) 228 kfree(umem); 229 return ret ? ERR_PTR(ret) : umem; 230 } 231 EXPORT_SYMBOL(ib_umem_get); 232 233 static void ib_umem_account(struct work_struct *work) 234 { 235 struct ib_umem *umem = container_of(work, struct ib_umem, work); 236 237 down_write(&umem->mm->mmap_sem); 238 umem->mm->pinned_vm -= umem->diff; 239 up_write(&umem->mm->mmap_sem); 240 mmput(umem->mm); 241 kfree(umem); 242 } 243 244 /** 245 * ib_umem_release - release memory pinned with ib_umem_get 246 * @umem: umem struct to release 247 */ 248 void ib_umem_release(struct ib_umem *umem) 249 { 250 struct ib_ucontext *context = umem->context; 251 struct mm_struct *mm; 252 struct task_struct *task; 253 unsigned long diff; 254 255 if (umem->odp_data) { 256 ib_umem_odp_release(umem); 257 return; 258 } 259 260 __ib_umem_release(umem->context->device, umem, 1); 261 262 task = get_pid_task(umem->context->tgid, PIDTYPE_PID); 263 if (!task) 264 goto out; 265 mm = get_task_mm(task); 266 put_task_struct(task); 267 if (!mm) 268 goto out; 269 270 diff = ib_umem_num_pages(umem); 271 272 /* 273 * We may be called with the mm's mmap_sem already held. This 274 * can happen when a userspace munmap() is the call that drops 275 * the last reference to our file and calls our release 276 * method. If there are memory regions to destroy, we'll end 277 * up here and not be able to take the mmap_sem. In that case 278 * we defer the vm_locked accounting to the system workqueue. 279 */ 280 if (context->closing) { 281 if (!down_write_trylock(&mm->mmap_sem)) { 282 INIT_WORK(&umem->work, ib_umem_account); 283 umem->mm = mm; 284 umem->diff = diff; 285 286 queue_work(ib_wq, &umem->work); 287 return; 288 } 289 } else 290 down_write(&mm->mmap_sem); 291 292 mm->pinned_vm -= diff; 293 up_write(&mm->mmap_sem); 294 mmput(mm); 295 out: 296 kfree(umem); 297 } 298 EXPORT_SYMBOL(ib_umem_release); 299 300 int ib_umem_page_count(struct ib_umem *umem) 301 { 302 int i; 303 int n; 304 struct scatterlist *sg; 305 306 if (umem->odp_data) 307 return ib_umem_num_pages(umem); 308 309 n = 0; 310 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) 311 n += sg_dma_len(sg) >> umem->page_shift; 312 313 return n; 314 } 315 EXPORT_SYMBOL(ib_umem_page_count); 316 317 /* 318 * Copy from the given ib_umem's pages to the given buffer. 319 * 320 * umem - the umem to copy from 321 * offset - offset to start copying from 322 * dst - destination buffer 323 * length - buffer length 324 * 325 * Returns 0 on success, or an error code. 326 */ 327 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, 328 size_t length) 329 { 330 size_t end = offset + length; 331 int ret; 332 333 if (offset > umem->length || length > umem->length - offset) { 334 pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n", 335 offset, umem->length, end); 336 return -EINVAL; 337 } 338 339 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length, 340 offset + ib_umem_offset(umem)); 341 342 if (ret < 0) 343 return ret; 344 else if (ret != length) 345 return -EINVAL; 346 else 347 return 0; 348 } 349 EXPORT_SYMBOL(ib_umem_copy_from); 350