xref: /linux/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c (revision b4ada0618eed0fbd1b1630f73deb048c592b06a1)
1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #include <linux/list.h>
47 #include <linux/slab.h>
48 
49 #include "pvrdma.h"
50 
51 /**
52  * pvrdma_get_dma_mr - get a DMA memory region
53  * @pd: protection domain
54  * @acc: access flags
55  *
56  * @return: ib_mr pointer on success, otherwise returns an errno.
57  */
58 struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
59 {
60 	struct pvrdma_dev *dev = to_vdev(pd->device);
61 	struct pvrdma_user_mr *mr;
62 	union pvrdma_cmd_req req;
63 	union pvrdma_cmd_resp rsp;
64 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
65 	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
66 	int ret;
67 
68 	/* Support only LOCAL_WRITE flag for DMA MRs */
69 	if (acc & ~IB_ACCESS_LOCAL_WRITE) {
70 		dev_warn(&dev->pdev->dev,
71 			 "unsupported dma mr access flags %#x\n", acc);
72 		return ERR_PTR(-EOPNOTSUPP);
73 	}
74 
75 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
76 	if (!mr)
77 		return ERR_PTR(-ENOMEM);
78 
79 	memset(cmd, 0, sizeof(*cmd));
80 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
81 	cmd->pd_handle = to_vpd(pd)->pd_handle;
82 	cmd->access_flags = acc;
83 	cmd->flags = PVRDMA_MR_FLAG_DMA;
84 
85 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
86 	if (ret < 0) {
87 		dev_warn(&dev->pdev->dev,
88 			 "could not get DMA mem region, error: %d\n", ret);
89 		kfree(mr);
90 		return ERR_PTR(ret);
91 	}
92 
93 	mr->mmr.mr_handle = resp->mr_handle;
94 	mr->ibmr.lkey = resp->lkey;
95 	mr->ibmr.rkey = resp->rkey;
96 
97 	return &mr->ibmr;
98 }
99 
100 /**
101  * pvrdma_reg_user_mr - register a userspace memory region
102  * @pd: protection domain
103  * @start: starting address
104  * @length: length of region
105  * @virt_addr: I/O virtual address
106  * @access_flags: access flags for memory region
107  * @dmah: dma handle
108  * @udata: user data
109  *
110  * @return: ib_mr pointer on success, otherwise returns an errno.
111  */
112 struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
113 				 u64 virt_addr, int access_flags,
114 				 struct ib_dmah *dmah,
115 				 struct ib_udata *udata)
116 {
117 	struct pvrdma_dev *dev = to_vdev(pd->device);
118 	struct pvrdma_user_mr *mr = NULL;
119 	struct ib_umem *umem;
120 	union pvrdma_cmd_req req;
121 	union pvrdma_cmd_resp rsp;
122 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
123 	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
124 	int ret, npages;
125 
126 	if (dmah)
127 		return ERR_PTR(-EOPNOTSUPP);
128 
129 	if (length == 0 || length > dev->dsr->caps.max_mr_size) {
130 		dev_warn(&dev->pdev->dev, "invalid mem region length\n");
131 		return ERR_PTR(-EINVAL);
132 	}
133 
134 	umem = ib_umem_get(pd->device, start, length, access_flags);
135 	if (IS_ERR(umem)) {
136 		dev_warn(&dev->pdev->dev,
137 			 "could not get umem for mem region\n");
138 		return ERR_CAST(umem);
139 	}
140 
141 	npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
142 	if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
143 		dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
144 			 npages);
145 		ret = -EINVAL;
146 		goto err_umem;
147 	}
148 
149 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
150 	if (!mr) {
151 		ret = -ENOMEM;
152 		goto err_umem;
153 	}
154 
155 	mr->mmr.iova = virt_addr;
156 	mr->mmr.size = length;
157 	mr->umem = umem;
158 
159 	ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false);
160 	if (ret) {
161 		dev_warn(&dev->pdev->dev,
162 			 "could not allocate page directory\n");
163 		goto err_umem;
164 	}
165 
166 	ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
167 	if (ret)
168 		goto err_pdir;
169 
170 	memset(cmd, 0, sizeof(*cmd));
171 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
172 	cmd->start = start;
173 	cmd->length = length;
174 	cmd->pd_handle = to_vpd(pd)->pd_handle;
175 	cmd->access_flags = access_flags;
176 	cmd->nchunks = npages;
177 	cmd->pdir_dma = mr->pdir.dir_dma;
178 
179 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
180 	if (ret < 0) {
181 		dev_warn(&dev->pdev->dev,
182 			 "could not register mem region, error: %d\n", ret);
183 		goto err_pdir;
184 	}
185 
186 	mr->mmr.mr_handle = resp->mr_handle;
187 	mr->ibmr.lkey = resp->lkey;
188 	mr->ibmr.rkey = resp->rkey;
189 
190 	return &mr->ibmr;
191 
192 err_pdir:
193 	pvrdma_page_dir_cleanup(dev, &mr->pdir);
194 err_umem:
195 	ib_umem_release(umem);
196 	kfree(mr);
197 
198 	return ERR_PTR(ret);
199 }
200 
201 /**
202  * pvrdma_alloc_mr - allocate a memory region
203  * @pd: protection domain
204  * @mr_type: type of memory region
205  * @max_num_sg: maximum number of pages
206  *
207  * @return: ib_mr pointer on success, otherwise returns an errno.
208  */
209 struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
210 			      u32 max_num_sg)
211 {
212 	struct pvrdma_dev *dev = to_vdev(pd->device);
213 	struct pvrdma_user_mr *mr;
214 	union pvrdma_cmd_req req;
215 	union pvrdma_cmd_resp rsp;
216 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
217 	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
218 	int size = max_num_sg * sizeof(u64);
219 	int ret;
220 
221 	if (mr_type != IB_MR_TYPE_MEM_REG ||
222 	    max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
223 		return ERR_PTR(-EINVAL);
224 
225 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
226 	if (!mr)
227 		return ERR_PTR(-ENOMEM);
228 
229 	mr->pages = kzalloc(size, GFP_KERNEL);
230 	if (!mr->pages) {
231 		ret = -ENOMEM;
232 		goto freemr;
233 	}
234 
235 	ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
236 	if (ret) {
237 		dev_warn(&dev->pdev->dev,
238 			 "failed to allocate page dir for mr\n");
239 		ret = -ENOMEM;
240 		goto freepages;
241 	}
242 
243 	memset(cmd, 0, sizeof(*cmd));
244 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
245 	cmd->pd_handle = to_vpd(pd)->pd_handle;
246 	cmd->access_flags = 0;
247 	cmd->flags = PVRDMA_MR_FLAG_FRMR;
248 	cmd->nchunks = max_num_sg;
249 
250 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
251 	if (ret < 0) {
252 		dev_warn(&dev->pdev->dev,
253 			 "could not create FR mem region, error: %d\n", ret);
254 		goto freepdir;
255 	}
256 
257 	mr->max_pages = max_num_sg;
258 	mr->mmr.mr_handle = resp->mr_handle;
259 	mr->ibmr.lkey = resp->lkey;
260 	mr->ibmr.rkey = resp->rkey;
261 	mr->page_shift = PAGE_SHIFT;
262 	mr->umem = NULL;
263 
264 	return &mr->ibmr;
265 
266 freepdir:
267 	pvrdma_page_dir_cleanup(dev, &mr->pdir);
268 freepages:
269 	kfree(mr->pages);
270 freemr:
271 	kfree(mr);
272 	return ERR_PTR(ret);
273 }
274 
275 /**
276  * pvrdma_dereg_mr - deregister a memory region
277  * @ibmr: memory region
278  * @udata: pointer to user data
279  *
280  * @return: 0 on success.
281  */
282 int pvrdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
283 {
284 	struct pvrdma_user_mr *mr = to_vmr(ibmr);
285 	struct pvrdma_dev *dev = to_vdev(ibmr->device);
286 	union pvrdma_cmd_req req;
287 	struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
288 	int ret;
289 
290 	memset(cmd, 0, sizeof(*cmd));
291 	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
292 	cmd->mr_handle = mr->mmr.mr_handle;
293 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
294 	if (ret < 0)
295 		dev_warn(&dev->pdev->dev,
296 			 "could not deregister mem region, error: %d\n", ret);
297 
298 	pvrdma_page_dir_cleanup(dev, &mr->pdir);
299 	ib_umem_release(mr->umem);
300 
301 	kfree(mr->pages);
302 	kfree(mr);
303 
304 	return 0;
305 }
306 
307 static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
308 {
309 	struct pvrdma_user_mr *mr = to_vmr(ibmr);
310 
311 	if (mr->npages == mr->max_pages)
312 		return -ENOMEM;
313 
314 	mr->pages[mr->npages++] = addr;
315 	return 0;
316 }
317 
318 int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
319 		     unsigned int *sg_offset)
320 {
321 	struct pvrdma_user_mr *mr = to_vmr(ibmr);
322 	struct pvrdma_dev *dev = to_vdev(ibmr->device);
323 	int ret;
324 
325 	mr->npages = 0;
326 
327 	ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
328 	if (ret < 0)
329 		dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
330 
331 	return ret;
332 }
333