1 /*- 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/kref.h> 29 #include <linux/slab.h> 30 #include <rdma/ib_umem.h> 31 32 #include "mlx5_ib.h" 33 34 struct mlx5_ib_user_db_page { 35 struct list_head list; 36 struct ib_umem *umem; 37 uintptr_t user_virt; 38 int refcnt; 39 }; 40 41 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, uintptr_t virt, 42 struct mlx5_db *db) 43 { 44 struct mlx5_ib_user_db_page *page; 45 int err = 0; 46 47 mutex_lock(&context->db_page_mutex); 48 49 list_for_each_entry(page, &context->db_page_list, list) 50 if (page->user_virt == (virt & PAGE_MASK)) 51 goto found; 52 53 page = kmalloc(sizeof(*page), GFP_KERNEL); 54 if (!page) { 55 err = -ENOMEM; 56 goto out; 57 } 58 59 page->user_virt = (virt & PAGE_MASK); 60 page->refcnt = 0; 61 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, 62 PAGE_SIZE, 0, 0); 63 if (IS_ERR(page->umem)) { 64 err = PTR_ERR(page->umem); 65 kfree(page); 66 goto out; 67 } 68 69 list_add(&page->list, &context->db_page_list); 70 71 found: 72 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); 73 db->u.user_page = page; 74 ++page->refcnt; 75 76 out: 77 mutex_unlock(&context->db_page_mutex); 78 79 return err; 80 } 81 82 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db) 83 { 84 mutex_lock(&context->db_page_mutex); 85 86 if (!--db->u.user_page->refcnt) { 87 list_del(&db->u.user_page->list); 88 ib_umem_release(db->u.user_page->umem); 89 kfree(db->u.user_page); 90 } 91 92 mutex_unlock(&context->db_page_mutex); 93 } 94