1 /*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28
29 #include <linux/kref.h>
30 #include <linux/slab.h>
31 #include <rdma/ib_umem.h>
32
33 #include <dev/mlx5/mlx5_ib/mlx5_ib.h>
34
35 struct mlx5_ib_user_db_page {
36 struct list_head list;
37 struct ib_umem *umem;
38 unsigned long user_virt;
39 int refcnt;
40 };
41
mlx5_ib_db_map_user(struct mlx5_ib_ucontext * context,unsigned long virt,struct mlx5_db * db)42 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
43 struct mlx5_db *db)
44 {
45 struct mlx5_ib_user_db_page *page;
46 int err = 0;
47
48 mutex_lock(&context->db_page_mutex);
49
50 list_for_each_entry(page, &context->db_page_list, list)
51 if (page->user_virt == (virt & PAGE_MASK))
52 goto found;
53
54 page = kmalloc(sizeof(*page), GFP_KERNEL);
55 if (!page) {
56 err = -ENOMEM;
57 goto out;
58 }
59
60 page->user_virt = (virt & PAGE_MASK);
61 page->refcnt = 0;
62 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
63 PAGE_SIZE, 0, 0);
64 if (IS_ERR(page->umem)) {
65 err = PTR_ERR(page->umem);
66 kfree(page);
67 goto out;
68 }
69
70 list_add(&page->list, &context->db_page_list);
71
72 found:
73 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
74 db->u.user_page = page;
75 ++page->refcnt;
76
77 out:
78 mutex_unlock(&context->db_page_mutex);
79
80 return err;
81 }
82
mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext * context,struct mlx5_db * db)83 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
84 {
85 mutex_lock(&context->db_page_mutex);
86
87 if (!--db->u.user_page->refcnt) {
88 list_del(&db->u.user_page->list);
89 ib_umem_release(db->u.user_page->umem);
90 kfree(db->u.user_page);
91 }
92
93 mutex_unlock(&context->db_page_mutex);
94 }
95