odp.c (627122280c878cf5d3cda2d2c5a0a8f6a7e35cb7) odp.c (a419bfb7632095410adc3aecb1e863568f049add)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 403 unchanged lines hidden (view full) ---

412 int err;
413
414 odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
415 idx * MLX5_IMR_MTT_SIZE,
416 MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
417 if (IS_ERR(odp))
418 return ERR_CAST(odp);
419
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 403 unchanged lines hidden (view full) ---

412 int err;
413
414 odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
415 idx * MLX5_IMR_MTT_SIZE,
416 MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
417 if (IS_ERR(odp))
418 return ERR_CAST(odp);
419
420 mr = mlx5_mr_cache_alloc(dev, imr->access_flags,
421 MLX5_MKC_ACCESS_MODE_MTT,
422 MLX5_IMR_MTT_ENTRIES);
420 mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[MLX5_IMR_MTT_CACHE_ENTRY],
421 imr->access_flags);
423 if (IS_ERR(mr)) {
424 ib_umem_odp_release(odp);
425 return mr;
426 }
427
428 mr->access_flags = imr->access_flags;
429 mr->ibmr.pd = imr->ibmr.pd;
430 mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;

--- 57 unchanged lines hidden (view full) ---

488
489 if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
490 return ERR_PTR(-EOPNOTSUPP);
491
492 umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
493 if (IS_ERR(umem_odp))
494 return ERR_CAST(umem_odp);
495
422 if (IS_ERR(mr)) {
423 ib_umem_odp_release(odp);
424 return mr;
425 }
426
427 mr->access_flags = imr->access_flags;
428 mr->ibmr.pd = imr->ibmr.pd;
429 mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;

--- 57 unchanged lines hidden (view full) ---

487
488 if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
489 return ERR_PTR(-EOPNOTSUPP);
490
491 umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
492 if (IS_ERR(umem_odp))
493 return ERR_CAST(umem_odp);
494
496 imr = mlx5_mr_cache_alloc(dev, access_flags, MLX5_MKC_ACCESS_MODE_KSM,
497 mlx5_imr_ksm_entries);
495 imr = mlx5_mr_cache_alloc(dev,
496 &dev->cache.ent[MLX5_IMR_KSM_CACHE_ENTRY],
497 access_flags);
498 if (IS_ERR(imr)) {
499 ib_umem_odp_release(umem_odp);
500 return imr;
501 }
502
503 imr->access_flags = access_flags;
504 imr->ibmr.pd = &pd->ibpd;
505 imr->ibmr.iova = 0;

--- 475 unchanged lines hidden (view full) ---

981static int pagefault_data_segments(struct mlx5_ib_dev *dev,
982 struct mlx5_pagefault *pfault,
983 void *wqe,
984 void *wqe_end, u32 *bytes_mapped,
985 u32 *total_wqe_bytes, bool receive_queue)
986{
987 int ret = 0, npages = 0;
988 u64 io_virt;
498 if (IS_ERR(imr)) {
499 ib_umem_odp_release(umem_odp);
500 return imr;
501 }
502
503 imr->access_flags = access_flags;
504 imr->ibmr.pd = &pd->ibpd;
505 imr->ibmr.iova = 0;

--- 475 unchanged lines hidden (view full) ---

981static int pagefault_data_segments(struct mlx5_ib_dev *dev,
982 struct mlx5_pagefault *pfault,
983 void *wqe,
984 void *wqe_end, u32 *bytes_mapped,
985 u32 *total_wqe_bytes, bool receive_queue)
986{
987 int ret = 0, npages = 0;
988 u64 io_virt;
989 u32 key;
989 __be32 key;
990 u32 byte_count;
991 size_t bcnt;
992 int inline_segment;
993
994 if (bytes_mapped)
995 *bytes_mapped = 0;
996 if (total_wqe_bytes)
997 *total_wqe_bytes = 0;
998
999 while (wqe < wqe_end) {
1000 struct mlx5_wqe_data_seg *dseg = wqe;
1001
1002 io_virt = be64_to_cpu(dseg->addr);
990 u32 byte_count;
991 size_t bcnt;
992 int inline_segment;
993
994 if (bytes_mapped)
995 *bytes_mapped = 0;
996 if (total_wqe_bytes)
997 *total_wqe_bytes = 0;
998
999 while (wqe < wqe_end) {
1000 struct mlx5_wqe_data_seg *dseg = wqe;
1001
1002 io_virt = be64_to_cpu(dseg->addr);
1003 key = be32_to_cpu(dseg->lkey);
1003 key = dseg->lkey;
1004 byte_count = be32_to_cpu(dseg->byte_count);
1005 inline_segment = !!(byte_count & MLX5_INLINE_SEG);
1006 bcnt = byte_count & ~MLX5_INLINE_SEG;
1007
1008 if (inline_segment) {
1009 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
1010 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
1011 16);
1012 } else {
1013 wqe += sizeof(*dseg);
1014 }
1015
1016 /* receive WQE end of sg list. */
1004 byte_count = be32_to_cpu(dseg->byte_count);
1005 inline_segment = !!(byte_count & MLX5_INLINE_SEG);
1006 bcnt = byte_count & ~MLX5_INLINE_SEG;
1007
1008 if (inline_segment) {
1009 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
1010 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
1011 16);
1012 } else {
1013 wqe += sizeof(*dseg);
1014 }
1015
1016 /* receive WQE end of sg list. */
1017 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
1018 io_virt == 0)
1017 if (receive_queue && bcnt == 0 &&
1018 key == MLX5_TERMINATE_SCATTER_LIST_LKEY && io_virt == 0)
1019 break;
1020
1021 if (!inline_segment && total_wqe_bytes) {
1022 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
1023 pfault->bytes_committed);
1024 }
1025
1026 /* A zero length data segment designates a length of 2GB. */
1027 if (bcnt == 0)
1028 bcnt = 1U << 31;
1029
1030 if (inline_segment || bcnt <= pfault->bytes_committed) {
1031 pfault->bytes_committed -=
1032 min_t(size_t, bcnt,
1033 pfault->bytes_committed);
1034 continue;
1035 }
1036
1019 break;
1020
1021 if (!inline_segment && total_wqe_bytes) {
1022 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
1023 pfault->bytes_committed);
1024 }
1025
1026 /* A zero length data segment designates a length of 2GB. */
1027 if (bcnt == 0)
1028 bcnt = 1U << 31;
1029
1030 if (inline_segment || bcnt <= pfault->bytes_committed) {
1031 pfault->bytes_committed -=
1032 min_t(size_t, bcnt,
1033 pfault->bytes_committed);
1034 continue;
1035 }
1036
1037 ret = pagefault_single_data_segment(dev, NULL, key,
1037 ret = pagefault_single_data_segment(dev, NULL, be32_to_cpu(key),
1038 io_virt, bcnt,
1039 &pfault->bytes_committed,
1040 bytes_mapped);
1041 if (ret < 0)
1042 break;
1043 npages += ret;
1044 }
1045

--- 536 unchanged lines hidden (view full) ---

1582 err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
1583 cancel_work_sync(&eq->work);
1584 destroy_workqueue(eq->wq);
1585 mempool_destroy(eq->pool);
1586
1587 return err;
1588}
1589
1038 io_virt, bcnt,
1039 &pfault->bytes_committed,
1040 bytes_mapped);
1041 if (ret < 0)
1042 break;
1043 npages += ret;
1044 }
1045

--- 536 unchanged lines hidden (view full) ---

1582 err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
1583 cancel_work_sync(&eq->work);
1584 destroy_workqueue(eq->wq);
1585 mempool_destroy(eq->pool);
1586
1587 return err;
1588}
1589
1590int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
1590void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
1591{
1591{
1592 struct mlx5r_cache_rb_key rb_key = {
1593 .access_mode = MLX5_MKC_ACCESS_MODE_KSM,
1594 .ndescs = mlx5_imr_ksm_entries,
1595 };
1596 struct mlx5_cache_ent *ent;
1592 if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1593 return;
1597
1594
1598 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1599 return 0;
1595 switch (ent->order - 2) {
1596 case MLX5_IMR_MTT_CACHE_ENTRY:
1597 ent->page = PAGE_SHIFT;
1598 ent->ndescs = MLX5_IMR_MTT_ENTRIES;
1599 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1600 ent->limit = 0;
1601 break;
1600
1602
1601 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
1602 if (IS_ERR(ent))
1603 return PTR_ERR(ent);
1604
1605 return 0;
1603 case MLX5_IMR_KSM_CACHE_ENTRY:
1604 ent->page = MLX5_KSM_PAGE_SHIFT;
1605 ent->ndescs = mlx5_imr_ksm_entries;
1606 ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
1607 ent->limit = 0;
1608 break;
1609 }
1606}
1607
1608static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
1609 .advise_mr = mlx5_ib_advise_mr,
1610};
1611
1612int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1613{

--- 203 unchanged lines hidden ---
1610}
1611
1612static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
1613 .advise_mr = mlx5_ib_advise_mr,
1614};
1615
1616int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1617{

--- 203 unchanged lines hidden ---