1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/vmalloc.h>
35 #include <linux/count_zeros.h>
36 #include <rdma/ib_umem.h>
37 #include <linux/math.h>
38 #include "hns_roce_device.h"
39 #include "hns_roce_cmd.h"
40 #include "hns_roce_hem.h"
41
hw_index_to_key(int ind)42 static u32 hw_index_to_key(int ind)
43 {
44 return ((u32)ind >> 24) | ((u32)ind << 8);
45 }
46
key_to_hw_index(u32 key)47 unsigned long key_to_hw_index(u32 key)
48 {
49 return (key << 24) | (key >> 8);
50 }
51
alloc_mr_key(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)52 static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
53 {
54 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
55 struct ib_device *ibdev = &hr_dev->ib_dev;
56 int err;
57 int id;
58
59 /* Allocate a key for mr from mr_table */
60 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
61 GFP_KERNEL);
62 if (id < 0) {
63 ibdev_err(ibdev, "failed to alloc id for MR key, id(%d)\n", id);
64 return -ENOMEM;
65 }
66
67 mr->key = hw_index_to_key(id); /* MR key */
68
69 err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
70 (unsigned long)id);
71 if (err) {
72 ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
73 goto err_free_bitmap;
74 }
75
76 return 0;
77 err_free_bitmap:
78 ida_free(&mtpt_ida->ida, id);
79 return err;
80 }
81
free_mr_key(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)82 static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
83 {
84 unsigned long obj = key_to_hw_index(mr->key);
85
86 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
87 ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)obj);
88 }
89
alloc_mr_pbl(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr,struct ib_udata * udata,u64 start)90 static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
91 struct ib_udata *udata, u64 start)
92 {
93 struct ib_device *ibdev = &hr_dev->ib_dev;
94 bool is_fast = mr->type == MR_TYPE_FRMR;
95 struct hns_roce_buf_attr buf_attr = {};
96 int err;
97
98 mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
99 buf_attr.page_shift = is_fast ? PAGE_SHIFT :
100 hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
101 buf_attr.region[0].size = mr->size;
102 buf_attr.region[0].hopnum = mr->pbl_hop_num;
103 buf_attr.region_count = 1;
104 buf_attr.user_access = mr->access;
105 /* fast MR's buffer is alloced before mapping, not at creation */
106 buf_attr.mtt_only = is_fast;
107 buf_attr.iova = mr->iova;
108 /* pagesize and hopnum is fixed for fast MR */
109 buf_attr.adaptive = !is_fast;
110 buf_attr.type = MTR_PBL;
111
112 err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
113 hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
114 udata, start);
115 if (err) {
116 ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
117 return err;
118 }
119
120 mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
121 mr->pbl_hop_num = buf_attr.region[0].hopnum;
122
123 return err;
124 }
125
free_mr_pbl(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)126 static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
127 {
128 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
129 }
130
hns_roce_mr_free(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)131 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
132 {
133 struct ib_device *ibdev = &hr_dev->ib_dev;
134 int ret;
135
136 if (mr->enabled) {
137 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
138 key_to_hw_index(mr->key) &
139 (hr_dev->caps.num_mtpts - 1));
140 if (ret)
141 ibdev_warn_ratelimited(ibdev, "failed to destroy mpt, ret = %d.\n",
142 ret);
143 }
144
145 free_mr_pbl(hr_dev, mr);
146 free_mr_key(hr_dev, mr);
147 }
148
hns_roce_mr_enable(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)149 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
150 struct hns_roce_mr *mr)
151 {
152 unsigned long mtpt_idx = key_to_hw_index(mr->key);
153 struct hns_roce_cmd_mailbox *mailbox;
154 struct device *dev = hr_dev->dev;
155 int ret;
156
157 /* Allocate mailbox memory */
158 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
159 if (IS_ERR(mailbox))
160 return PTR_ERR(mailbox);
161
162 if (mr->type != MR_TYPE_FRMR)
163 ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
164 else
165 ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
166 if (ret) {
167 dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
168 goto err_page;
169 }
170
171 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
172 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
173 if (ret) {
174 dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
175 goto err_page;
176 }
177
178 mr->enabled = 1;
179
180 err_page:
181 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
182
183 return ret;
184 }
185
hns_roce_init_mr_table(struct hns_roce_dev * hr_dev)186 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
187 {
188 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
189
190 ida_init(&mtpt_ida->ida);
191 mtpt_ida->max = hr_dev->caps.num_mtpts - 1;
192 mtpt_ida->min = hr_dev->caps.reserved_mrws;
193 }
194
hns_roce_get_dma_mr(struct ib_pd * pd,int acc)195 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
196 {
197 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
198 struct hns_roce_mr *mr;
199 int ret;
200
201 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
202 if (!mr)
203 return ERR_PTR(-ENOMEM);
204
205 mr->type = MR_TYPE_DMA;
206 mr->pd = to_hr_pd(pd)->pdn;
207 mr->access = acc;
208
209 /* Allocate memory region key */
210 hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
211 ret = alloc_mr_key(hr_dev, mr);
212 if (ret)
213 goto err_free;
214
215 ret = hns_roce_mr_enable(hr_dev, mr);
216 if (ret)
217 goto err_mr;
218
219 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
220
221 return &mr->ibmr;
222 err_mr:
223 free_mr_key(hr_dev, mr);
224
225 err_free:
226 kfree(mr);
227 return ERR_PTR(ret);
228 }
229
hns_roce_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)230 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
231 u64 virt_addr, int access_flags,
232 struct ib_udata *udata)
233 {
234 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
235 struct hns_roce_mr *mr;
236 int ret;
237
238 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
239 if (!mr) {
240 ret = -ENOMEM;
241 goto err_out;
242 }
243
244 mr->iova = virt_addr;
245 mr->size = length;
246 mr->pd = to_hr_pd(pd)->pdn;
247 mr->access = access_flags;
248 mr->type = MR_TYPE_MR;
249
250 ret = alloc_mr_key(hr_dev, mr);
251 if (ret)
252 goto err_alloc_mr;
253
254 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
255 if (ret)
256 goto err_alloc_key;
257
258 ret = hns_roce_mr_enable(hr_dev, mr);
259 if (ret)
260 goto err_alloc_pbl;
261
262 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
263
264 return &mr->ibmr;
265
266 err_alloc_pbl:
267 free_mr_pbl(hr_dev, mr);
268 err_alloc_key:
269 free_mr_key(hr_dev, mr);
270 err_alloc_mr:
271 kfree(mr);
272 err_out:
273 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MR_REG_ERR_CNT]);
274
275 return ERR_PTR(ret);
276 }
277
hns_roce_rereg_user_mr(struct ib_mr * ibmr,int flags,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_pd * pd,struct ib_udata * udata)278 struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
279 u64 length, u64 virt_addr,
280 int mr_access_flags, struct ib_pd *pd,
281 struct ib_udata *udata)
282 {
283 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
284 struct ib_device *ib_dev = &hr_dev->ib_dev;
285 struct hns_roce_mr *mr = to_hr_mr(ibmr);
286 struct hns_roce_cmd_mailbox *mailbox;
287 unsigned long mtpt_idx;
288 int ret;
289
290 if (!mr->enabled) {
291 ret = -EINVAL;
292 goto err_out;
293 }
294
295 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
296 ret = PTR_ERR_OR_ZERO(mailbox);
297 if (ret)
298 goto err_out;
299
300 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
301
302 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
303 mtpt_idx);
304 if (ret)
305 goto free_cmd_mbox;
306
307 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
308 mtpt_idx);
309 if (ret)
310 ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
311
312 mr->enabled = 0;
313 mr->iova = virt_addr;
314 mr->size = length;
315
316 if (flags & IB_MR_REREG_PD)
317 mr->pd = to_hr_pd(pd)->pdn;
318
319 if (flags & IB_MR_REREG_ACCESS)
320 mr->access = mr_access_flags;
321
322 if (flags & IB_MR_REREG_TRANS) {
323 free_mr_pbl(hr_dev, mr);
324 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
325 if (ret) {
326 ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n",
327 ret);
328 goto free_cmd_mbox;
329 }
330 }
331
332 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, mailbox->buf);
333 if (ret) {
334 ibdev_err(ib_dev, "failed to write mtpt, ret = %d.\n", ret);
335 goto free_cmd_mbox;
336 }
337
338 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
339 mtpt_idx);
340 if (ret) {
341 ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
342 goto free_cmd_mbox;
343 }
344
345 mr->enabled = 1;
346
347 free_cmd_mbox:
348 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
349
350 err_out:
351 if (ret) {
352 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MR_REREG_ERR_CNT]);
353 return ERR_PTR(ret);
354 }
355
356 return NULL;
357 }
358
hns_roce_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)359 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
360 {
361 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
362 struct hns_roce_mr *mr = to_hr_mr(ibmr);
363
364 if (hr_dev->hw->dereg_mr)
365 hr_dev->hw->dereg_mr(hr_dev);
366
367 hns_roce_mr_free(hr_dev, mr);
368 kfree(mr);
369
370 return 0;
371 }
372
hns_roce_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)373 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
374 u32 max_num_sg)
375 {
376 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
377 struct device *dev = hr_dev->dev;
378 struct hns_roce_mr *mr;
379 int ret;
380
381 if (mr_type != IB_MR_TYPE_MEM_REG)
382 return ERR_PTR(-EINVAL);
383
384 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
385 dev_err(dev, "max_num_sg larger than %d\n",
386 HNS_ROCE_FRMR_MAX_PA);
387 return ERR_PTR(-EINVAL);
388 }
389
390 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
391 if (!mr)
392 return ERR_PTR(-ENOMEM);
393
394 mr->type = MR_TYPE_FRMR;
395 mr->pd = to_hr_pd(pd)->pdn;
396 mr->size = max_num_sg * (1 << PAGE_SHIFT);
397
398 /* Allocate memory region key */
399 ret = alloc_mr_key(hr_dev, mr);
400 if (ret)
401 goto err_free;
402
403 ret = alloc_mr_pbl(hr_dev, mr, NULL, 0);
404 if (ret)
405 goto err_key;
406
407 ret = hns_roce_mr_enable(hr_dev, mr);
408 if (ret)
409 goto err_pbl;
410
411 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
412 mr->ibmr.length = mr->size;
413
414 return &mr->ibmr;
415
416 err_pbl:
417 free_mr_pbl(hr_dev, mr);
418 err_key:
419 free_mr_key(hr_dev, mr);
420 err_free:
421 kfree(mr);
422 return ERR_PTR(ret);
423 }
424
hns_roce_set_page(struct ib_mr * ibmr,u64 addr)425 static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
426 {
427 struct hns_roce_mr *mr = to_hr_mr(ibmr);
428
429 if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
430 mr->page_list[mr->npages++] = addr;
431 return 0;
432 }
433
434 return -ENOBUFS;
435 }
436
hns_roce_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset_p)437 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
438 unsigned int *sg_offset_p)
439 {
440 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
441 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
442 struct ib_device *ibdev = &hr_dev->ib_dev;
443 struct hns_roce_mr *mr = to_hr_mr(ibmr);
444 struct hns_roce_mtr *mtr = &mr->pbl_mtr;
445 int ret, sg_num = 0;
446
447 if (!IS_ALIGNED(sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) ||
448 ibmr->page_size < HNS_HW_PAGE_SIZE ||
449 ibmr->page_size > HNS_HW_MAX_PAGE_SIZE)
450 return sg_num;
451
452 mr->npages = 0;
453 mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
454 sizeof(dma_addr_t), GFP_KERNEL);
455 if (!mr->page_list)
456 return sg_num;
457
458 sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset_p, hns_roce_set_page);
459 if (sg_num < 1) {
460 ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
461 mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
462 goto err_page_list;
463 }
464
465 mtr->hem_cfg.region[0].offset = 0;
466 mtr->hem_cfg.region[0].count = mr->npages;
467 mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
468 mtr->hem_cfg.region_count = 1;
469 ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
470 if (ret) {
471 ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
472 sg_num = 0;
473 } else {
474 mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
475 }
476
477 err_page_list:
478 kvfree(mr->page_list);
479 mr->page_list = NULL;
480
481 return sg_num;
482 }
483
hns_roce_mw_free(struct hns_roce_dev * hr_dev,struct hns_roce_mw * mw)484 static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
485 struct hns_roce_mw *mw)
486 {
487 struct device *dev = hr_dev->dev;
488 int ret;
489
490 if (mw->enabled) {
491 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
492 key_to_hw_index(mw->rkey) &
493 (hr_dev->caps.num_mtpts - 1));
494 if (ret)
495 dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
496
497 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
498 key_to_hw_index(mw->rkey));
499 }
500
501 ida_free(&hr_dev->mr_table.mtpt_ida.ida,
502 (int)key_to_hw_index(mw->rkey));
503 }
504
hns_roce_mw_enable(struct hns_roce_dev * hr_dev,struct hns_roce_mw * mw)505 static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
506 struct hns_roce_mw *mw)
507 {
508 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
509 struct hns_roce_cmd_mailbox *mailbox;
510 struct device *dev = hr_dev->dev;
511 unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
512 int ret;
513
514 /* prepare HEM entry memory */
515 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
516 if (ret)
517 return ret;
518
519 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
520 if (IS_ERR(mailbox)) {
521 ret = PTR_ERR(mailbox);
522 goto err_table;
523 }
524
525 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
526 if (ret) {
527 dev_err(dev, "MW write mtpt fail!\n");
528 goto err_page;
529 }
530
531 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
532 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
533 if (ret) {
534 dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
535 goto err_page;
536 }
537
538 mw->enabled = 1;
539
540 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
541
542 return 0;
543
544 err_page:
545 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
546
547 err_table:
548 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
549
550 return ret;
551 }
552
hns_roce_alloc_mw(struct ib_mw * ibmw,struct ib_udata * udata)553 int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
554 {
555 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
556 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
557 struct ib_device *ibdev = &hr_dev->ib_dev;
558 struct hns_roce_mw *mw = to_hr_mw(ibmw);
559 int ret;
560 int id;
561
562 /* Allocate a key for mw from mr_table */
563 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
564 GFP_KERNEL);
565 if (id < 0) {
566 ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id);
567 return -ENOMEM;
568 }
569
570 mw->rkey = hw_index_to_key(id);
571
572 ibmw->rkey = mw->rkey;
573 mw->pdn = to_hr_pd(ibmw->pd)->pdn;
574 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
575 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
576 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
577
578 ret = hns_roce_mw_enable(hr_dev, mw);
579 if (ret)
580 goto err_mw;
581
582 return 0;
583
584 err_mw:
585 hns_roce_mw_free(hr_dev, mw);
586 return ret;
587 }
588
hns_roce_dealloc_mw(struct ib_mw * ibmw)589 int hns_roce_dealloc_mw(struct ib_mw *ibmw)
590 {
591 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
592 struct hns_roce_mw *mw = to_hr_mw(ibmw);
593
594 hns_roce_mw_free(hr_dev, mw);
595 return 0;
596 }
597
mtr_map_region(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_region * region,dma_addr_t * pages,int max_count)598 static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
599 struct hns_roce_buf_region *region, dma_addr_t *pages,
600 int max_count)
601 {
602 int count, npage;
603 int offset, end;
604 __le64 *mtts;
605 u64 addr;
606 int i;
607
608 offset = region->offset;
609 end = offset + region->count;
610 npage = 0;
611 while (offset < end && npage < max_count) {
612 count = 0;
613 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
614 offset, &count);
615 if (!mtts)
616 return -ENOBUFS;
617
618 for (i = 0; i < count && npage < max_count; i++) {
619 addr = pages[npage];
620
621 mtts[i] = cpu_to_le64(addr);
622 npage++;
623 }
624 offset += count;
625 }
626
627 return npage;
628 }
629
mtr_has_mtt(struct hns_roce_buf_attr * attr)630 static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
631 {
632 int i;
633
634 for (i = 0; i < attr->region_count; i++)
635 if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
636 attr->region[i].hopnum > 0)
637 return true;
638
639 /* because the mtr only one root base address, when hopnum is 0 means
640 * root base address equals the first buffer address, thus all alloced
641 * memory must in a continuous space accessed by direct mode.
642 */
643 return false;
644 }
645
mtr_bufs_size(struct hns_roce_buf_attr * attr)646 static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
647 {
648 size_t size = 0;
649 int i;
650
651 for (i = 0; i < attr->region_count; i++)
652 size += attr->region[i].size;
653
654 return size;
655 }
656
657 /*
658 * check the given pages in continuous address space
659 * Returns 0 on success, or the error page num.
660 */
mtr_check_direct_pages(dma_addr_t * pages,int page_count,unsigned int page_shift)661 static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
662 unsigned int page_shift)
663 {
664 size_t page_size = 1 << page_shift;
665 int i;
666
667 for (i = 1; i < page_count; i++)
668 if (pages[i] - pages[i - 1] != page_size)
669 return i;
670
671 return 0;
672 }
673
mtr_free_bufs(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)674 static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
675 {
676 /* release user buffers */
677 if (mtr->umem) {
678 ib_umem_release(mtr->umem);
679 mtr->umem = NULL;
680 }
681
682 /* release kernel buffers */
683 if (mtr->kmem) {
684 hns_roce_buf_free(hr_dev, mtr->kmem);
685 mtr->kmem = NULL;
686 }
687 }
688
mtr_alloc_bufs(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr,struct ib_udata * udata,unsigned long user_addr)689 static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
690 struct hns_roce_buf_attr *buf_attr,
691 struct ib_udata *udata, unsigned long user_addr)
692 {
693 struct ib_device *ibdev = &hr_dev->ib_dev;
694 size_t total_size;
695
696 total_size = mtr_bufs_size(buf_attr);
697
698 if (udata) {
699 mtr->kmem = NULL;
700 mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
701 buf_attr->user_access);
702 if (IS_ERR(mtr->umem)) {
703 ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
704 PTR_ERR(mtr->umem));
705 return -ENOMEM;
706 }
707 } else {
708 mtr->umem = NULL;
709 mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
710 buf_attr->page_shift,
711 !mtr_has_mtt(buf_attr) ?
712 HNS_ROCE_BUF_DIRECT : 0);
713 if (IS_ERR(mtr->kmem)) {
714 ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
715 PTR_ERR(mtr->kmem));
716 return PTR_ERR(mtr->kmem);
717 }
718 }
719
720 return 0;
721 }
722
cal_mtr_pg_cnt(struct hns_roce_mtr * mtr)723 static int cal_mtr_pg_cnt(struct hns_roce_mtr *mtr)
724 {
725 struct hns_roce_buf_region *region;
726 int page_cnt = 0;
727 int i;
728
729 for (i = 0; i < mtr->hem_cfg.region_count; i++) {
730 region = &mtr->hem_cfg.region[i];
731 page_cnt += region->count;
732 }
733
734 return page_cnt;
735 }
736
need_split_huge_page(struct hns_roce_mtr * mtr)737 static bool need_split_huge_page(struct hns_roce_mtr *mtr)
738 {
739 /* When HEM buffer uses 0-level addressing, the page size is
740 * equal to the whole buffer size. If the current MTR has multiple
741 * regions, we split the buffer into small pages(4k, required by hns
742 * ROCEE). These pages will be used in multiple regions.
743 */
744 return mtr->hem_cfg.is_direct && mtr->hem_cfg.region_count > 1;
745 }
746
mtr_map_bufs(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)747 static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
748 {
749 struct ib_device *ibdev = &hr_dev->ib_dev;
750 int page_count = cal_mtr_pg_cnt(mtr);
751 unsigned int page_shift;
752 dma_addr_t *pages;
753 int npage;
754 int ret;
755
756 page_shift = need_split_huge_page(mtr) ? HNS_HW_PAGE_SHIFT :
757 mtr->hem_cfg.buf_pg_shift;
758 /* alloc a tmp array to store buffer's dma address */
759 pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
760 if (!pages)
761 return -ENOMEM;
762
763 if (mtr->umem)
764 npage = hns_roce_get_umem_bufs(pages, page_count,
765 mtr->umem, page_shift);
766 else
767 npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
768 mtr->kmem, page_shift);
769
770 if (npage != page_count) {
771 ibdev_err(ibdev, "failed to get mtr page %d != %d.\n", npage,
772 page_count);
773 ret = -ENOBUFS;
774 goto err_alloc_list;
775 }
776
777 if (need_split_huge_page(mtr) && npage > 1) {
778 ret = mtr_check_direct_pages(pages, npage, page_shift);
779 if (ret) {
780 ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
781 mtr->umem ? "umtr" : "kmtr", ret, npage);
782 ret = -ENOBUFS;
783 goto err_alloc_list;
784 }
785 }
786
787 ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
788 if (ret)
789 ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
790
791 err_alloc_list:
792 kvfree(pages);
793
794 return ret;
795 }
796
hns_roce_mtr_map(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,dma_addr_t * pages,unsigned int page_cnt)797 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
798 dma_addr_t *pages, unsigned int page_cnt)
799 {
800 struct ib_device *ibdev = &hr_dev->ib_dev;
801 struct hns_roce_buf_region *r;
802 unsigned int i, mapped_cnt;
803 int ret = 0;
804
805 /*
806 * Only use the first page address as root ba when hopnum is 0, this
807 * is because the addresses of all pages are consecutive in this case.
808 */
809 if (mtr->hem_cfg.is_direct) {
810 mtr->hem_cfg.root_ba = pages[0];
811 return 0;
812 }
813
814 for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
815 mapped_cnt < page_cnt; i++) {
816 r = &mtr->hem_cfg.region[i];
817
818 if (r->offset + r->count > page_cnt) {
819 ret = -EINVAL;
820 ibdev_err(ibdev,
821 "failed to check mtr%u count %u + %u > %u.\n",
822 i, r->offset, r->count, page_cnt);
823 return ret;
824 }
825
826 ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
827 page_cnt - mapped_cnt);
828 if (ret < 0) {
829 ibdev_err(ibdev,
830 "failed to map mtr%u offset %u, ret = %d.\n",
831 i, r->offset, ret);
832 return ret;
833 }
834 mapped_cnt += ret;
835 ret = 0;
836 }
837
838 if (mapped_cnt < page_cnt) {
839 ret = -ENOBUFS;
840 ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
841 mapped_cnt, page_cnt);
842 }
843
844 return ret;
845 }
846
hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg * cfg,u32 start_index,u64 * mtt_buf,int mtt_cnt)847 static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg,
848 u32 start_index, u64 *mtt_buf,
849 int mtt_cnt)
850 {
851 int mtt_count;
852 int total = 0;
853 u32 npage;
854 u64 addr;
855
856 if (mtt_cnt > cfg->region_count)
857 return -EINVAL;
858
859 for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt;
860 mtt_count++) {
861 npage = cfg->region[mtt_count].offset;
862 if (npage < start_index)
863 continue;
864
865 addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
866 mtt_buf[total] = addr;
867
868 total++;
869 }
870
871 if (!total)
872 return -ENOENT;
873
874 return 0;
875 }
876
hns_roce_get_mhop_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,u32 start_index,u64 * mtt_buf,int mtt_cnt)877 static int hns_roce_get_mhop_mtt(struct hns_roce_dev *hr_dev,
878 struct hns_roce_mtr *mtr, u32 start_index,
879 u64 *mtt_buf, int mtt_cnt)
880 {
881 int left = mtt_cnt;
882 int total = 0;
883 int mtt_count;
884 __le64 *mtts;
885 u32 npage;
886
887 while (left > 0) {
888 mtt_count = 0;
889 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
890 start_index + total,
891 &mtt_count);
892 if (!mtts || !mtt_count)
893 break;
894
895 npage = min(mtt_count, left);
896 left -= npage;
897 for (mtt_count = 0; mtt_count < npage; mtt_count++)
898 mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
899 }
900
901 if (!total)
902 return -ENOENT;
903
904 return 0;
905 }
906
hns_roce_mtr_find(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,u32 offset,u64 * mtt_buf,int mtt_max)907 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
908 u32 offset, u64 *mtt_buf, int mtt_max)
909 {
910 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
911 u32 start_index;
912 int ret;
913
914 if (!mtt_buf || mtt_max < 1)
915 return -EINVAL;
916
917 /* no mtt memory in direct mode, so just return the buffer address */
918 if (cfg->is_direct) {
919 start_index = offset >> HNS_HW_PAGE_SHIFT;
920 ret = hns_roce_get_direct_addr_mtt(cfg, start_index,
921 mtt_buf, mtt_max);
922 } else {
923 start_index = offset >> cfg->buf_pg_shift;
924 ret = hns_roce_get_mhop_mtt(hr_dev, mtr, start_index,
925 mtt_buf, mtt_max);
926 }
927 return ret;
928 }
929
get_best_page_shift(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr)930 static int get_best_page_shift(struct hns_roce_dev *hr_dev,
931 struct hns_roce_mtr *mtr,
932 struct hns_roce_buf_attr *buf_attr)
933 {
934 unsigned int page_sz;
935
936 if (!buf_attr->adaptive || buf_attr->type != MTR_PBL || !mtr->umem)
937 return 0;
938
939 page_sz = ib_umem_find_best_pgsz(mtr->umem,
940 hr_dev->caps.page_size_cap,
941 buf_attr->iova);
942 if (!page_sz)
943 return -EINVAL;
944
945 buf_attr->page_shift = order_base_2(page_sz);
946 return 0;
947 }
948
get_best_hop_num(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr,unsigned int ba_pg_shift)949 static int get_best_hop_num(struct hns_roce_dev *hr_dev,
950 struct hns_roce_mtr *mtr,
951 struct hns_roce_buf_attr *buf_attr,
952 unsigned int ba_pg_shift)
953 {
954 #define INVALID_HOPNUM -1
955 #define MIN_BA_CNT 1
956 size_t buf_pg_sz = 1 << buf_attr->page_shift;
957 struct ib_device *ibdev = &hr_dev->ib_dev;
958 size_t ba_pg_sz = 1 << ba_pg_shift;
959 int hop_num = INVALID_HOPNUM;
960 size_t unit = MIN_BA_CNT;
961 size_t ba_cnt;
962 int j;
963
964 if (!buf_attr->adaptive || buf_attr->type != MTR_PBL)
965 return 0;
966
967 /* Caculating the number of buf pages, each buf page need a BA */
968 if (mtr->umem)
969 ba_cnt = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
970 else
971 ba_cnt = DIV_ROUND_UP(buf_attr->region[0].size, buf_pg_sz);
972
973 for (j = 0; j <= HNS_ROCE_MAX_HOP_NUM; j++) {
974 if (ba_cnt <= unit) {
975 hop_num = j;
976 break;
977 }
978 /* Number of BAs can be represented at per hop */
979 unit *= ba_pg_sz / BA_BYTE_LEN;
980 }
981
982 if (hop_num < 0) {
983 ibdev_err(ibdev,
984 "failed to calculate a valid hopnum.\n");
985 return -EINVAL;
986 }
987
988 buf_attr->region[0].hopnum = hop_num;
989
990 return 0;
991 }
992
is_buf_attr_valid(struct hns_roce_dev * hr_dev,struct hns_roce_buf_attr * attr)993 static bool is_buf_attr_valid(struct hns_roce_dev *hr_dev,
994 struct hns_roce_buf_attr *attr)
995 {
996 struct ib_device *ibdev = &hr_dev->ib_dev;
997
998 if (attr->region_count > ARRAY_SIZE(attr->region) ||
999 attr->region_count < 1 || attr->page_shift < HNS_HW_PAGE_SHIFT) {
1000 ibdev_err(ibdev,
1001 "invalid buf attr, region count %d, page shift %u.\n",
1002 attr->region_count, attr->page_shift);
1003 return false;
1004 }
1005
1006 return true;
1007 }
1008
mtr_init_buf_cfg(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * attr)1009 static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
1010 struct hns_roce_mtr *mtr,
1011 struct hns_roce_buf_attr *attr)
1012 {
1013 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
1014 struct hns_roce_buf_region *r;
1015 size_t buf_pg_sz;
1016 size_t buf_size;
1017 int page_cnt, i;
1018 u64 pgoff = 0;
1019
1020 if (!is_buf_attr_valid(hr_dev, attr))
1021 return -EINVAL;
1022
1023 /* If mtt is disabled, all pages must be within a continuous range */
1024 cfg->is_direct = !mtr_has_mtt(attr);
1025 cfg->region_count = attr->region_count;
1026 buf_size = mtr_bufs_size(attr);
1027 if (need_split_huge_page(mtr)) {
1028 buf_pg_sz = HNS_HW_PAGE_SIZE;
1029 cfg->buf_pg_count = 1;
1030 /* The ROCEE requires the page size to be 4K * 2 ^ N. */
1031 cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
1032 order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
1033 } else {
1034 buf_pg_sz = 1 << attr->page_shift;
1035 cfg->buf_pg_count = mtr->umem ?
1036 ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz) :
1037 DIV_ROUND_UP(buf_size, buf_pg_sz);
1038 cfg->buf_pg_shift = attr->page_shift;
1039 pgoff = mtr->umem ? mtr->umem->address & ~PAGE_MASK : 0;
1040 }
1041
1042 /* Convert buffer size to page index and page count for each region and
1043 * the buffer's offset needs to be appended to the first region.
1044 */
1045 for (page_cnt = 0, i = 0; i < attr->region_count; i++) {
1046 r = &cfg->region[i];
1047 r->offset = page_cnt;
1048 buf_size = hr_hw_page_align(attr->region[i].size + pgoff);
1049 if (attr->type == MTR_PBL && mtr->umem)
1050 r->count = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
1051 else
1052 r->count = DIV_ROUND_UP(buf_size, buf_pg_sz);
1053
1054 pgoff = 0;
1055 page_cnt += r->count;
1056 r->hopnum = to_hr_hem_hopnum(attr->region[i].hopnum, r->count);
1057 }
1058
1059 return 0;
1060 }
1061
cal_pages_per_l1ba(unsigned int ba_per_bt,unsigned int hopnum)1062 static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
1063 {
1064 return int_pow(ba_per_bt, hopnum - 1);
1065 }
1066
cal_best_bt_pg_sz(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,unsigned int pg_shift)1067 static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev,
1068 struct hns_roce_mtr *mtr,
1069 unsigned int pg_shift)
1070 {
1071 unsigned long cap = hr_dev->caps.page_size_cap;
1072 struct hns_roce_buf_region *re;
1073 unsigned int pgs_per_l1ba;
1074 unsigned int ba_per_bt;
1075 unsigned int ba_num;
1076 int i;
1077
1078 for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) {
1079 if (!(BIT(pg_shift) & cap))
1080 continue;
1081
1082 ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN;
1083 ba_num = 0;
1084 for (i = 0; i < mtr->hem_cfg.region_count; i++) {
1085 re = &mtr->hem_cfg.region[i];
1086 if (re->hopnum == 0)
1087 continue;
1088
1089 pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum);
1090 ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba);
1091 }
1092
1093 if (ba_num <= ba_per_bt)
1094 return pg_shift;
1095 }
1096
1097 return 0;
1098 }
1099
mtr_alloc_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,unsigned int ba_page_shift)1100 static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1101 unsigned int ba_page_shift)
1102 {
1103 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
1104 int ret;
1105
1106 hns_roce_hem_list_init(&mtr->hem_list);
1107 if (!cfg->is_direct) {
1108 ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift);
1109 if (!ba_page_shift)
1110 return -ERANGE;
1111
1112 ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
1113 cfg->region, cfg->region_count,
1114 ba_page_shift);
1115 if (ret)
1116 return ret;
1117 cfg->root_ba = mtr->hem_list.root_ba;
1118 cfg->ba_pg_shift = ba_page_shift;
1119 } else {
1120 cfg->ba_pg_shift = cfg->buf_pg_shift;
1121 }
1122
1123 return 0;
1124 }
1125
mtr_free_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)1126 static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1127 {
1128 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1129 }
1130
1131 /**
1132 * hns_roce_mtr_create - Create hns memory translate region.
1133 *
1134 * @hr_dev: RoCE device struct pointer
1135 * @mtr: memory translate region
1136 * @buf_attr: buffer attribute for creating mtr
1137 * @ba_page_shift: page shift for multi-hop base address table
1138 * @udata: user space context, if it's NULL, means kernel space
1139 * @user_addr: userspace virtual address to start at
1140 */
hns_roce_mtr_create(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr,unsigned int ba_page_shift,struct ib_udata * udata,unsigned long user_addr)1141 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1142 struct hns_roce_buf_attr *buf_attr,
1143 unsigned int ba_page_shift, struct ib_udata *udata,
1144 unsigned long user_addr)
1145 {
1146 struct ib_device *ibdev = &hr_dev->ib_dev;
1147 int ret;
1148
1149 /* The caller has its own buffer list and invokes the hns_roce_mtr_map()
1150 * to finish the MTT configuration.
1151 */
1152 if (buf_attr->mtt_only) {
1153 mtr->umem = NULL;
1154 mtr->kmem = NULL;
1155 } else {
1156 ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
1157 if (ret) {
1158 ibdev_err(ibdev,
1159 "failed to alloc mtr bufs, ret = %d.\n", ret);
1160 return ret;
1161 }
1162
1163 ret = get_best_page_shift(hr_dev, mtr, buf_attr);
1164 if (ret)
1165 goto err_init_buf;
1166
1167 ret = get_best_hop_num(hr_dev, mtr, buf_attr, ba_page_shift);
1168 if (ret)
1169 goto err_init_buf;
1170 }
1171
1172 ret = mtr_init_buf_cfg(hr_dev, mtr, buf_attr);
1173 if (ret)
1174 goto err_init_buf;
1175
1176 ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
1177 if (ret) {
1178 ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
1179 goto err_init_buf;
1180 }
1181
1182 if (buf_attr->mtt_only)
1183 return 0;
1184
1185 /* Write buffer's dma address to MTT */
1186 ret = mtr_map_bufs(hr_dev, mtr);
1187 if (ret) {
1188 ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
1189 goto err_alloc_mtt;
1190 }
1191
1192 return 0;
1193
1194 err_alloc_mtt:
1195 mtr_free_mtt(hr_dev, mtr);
1196 err_init_buf:
1197 mtr_free_bufs(hr_dev, mtr);
1198
1199 return ret;
1200 }
1201
hns_roce_mtr_destroy(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)1202 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1203 {
1204 /* release multi-hop addressing resource */
1205 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1206
1207 /* free buffers */
1208 mtr_free_bufs(hr_dev, mtr);
1209 }
1210