1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2019-2020, Mellanox Technologies Ltd. All rights reserved. 4 */ 5 6 #include <uapi/rdma/rdma_netlink.h> 7 #include <linux/mlx5/rsc_dump.h> 8 #include <rdma/ib_umem_odp.h> 9 #include <rdma/restrack.h> 10 #include "mlx5_ib.h" 11 #include "restrack.h" 12 13 #define MAX_DUMP_SIZE 1024 14 15 static int dump_rsc(struct mlx5_core_dev *dev, enum mlx5_sgmt_type type, 16 int index, void *data, int *data_len) 17 { 18 struct mlx5_core_dev *mdev = dev; 19 struct mlx5_rsc_dump_cmd *cmd; 20 struct mlx5_rsc_key key = {}; 21 struct page *page; 22 int offset = 0; 23 int err = 0; 24 int cmd_err; 25 int size; 26 27 page = alloc_page(GFP_KERNEL); 28 if (!page) 29 return -ENOMEM; 30 31 key.size = PAGE_SIZE; 32 key.rsc = type; 33 key.index1 = index; 34 key.num_of_obj1 = 1; 35 36 cmd = mlx5_rsc_dump_cmd_create(mdev, &key); 37 if (IS_ERR(cmd)) { 38 err = PTR_ERR(cmd); 39 goto free_page; 40 } 41 42 do { 43 cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size); 44 if (cmd_err < 0 || size + offset > MAX_DUMP_SIZE) { 45 err = cmd_err; 46 goto destroy_cmd; 47 } 48 memcpy(data + offset, page_address(page), size); 49 offset += size; 50 } while (cmd_err > 0); 51 *data_len = offset; 52 53 destroy_cmd: 54 mlx5_rsc_dump_cmd_destroy(cmd); 55 free_page: 56 __free_page(page); 57 return err; 58 } 59 60 static int fill_res_raw(struct sk_buff *msg, struct mlx5_ib_dev *dev, 61 enum mlx5_sgmt_type type, u32 key) 62 { 63 int len = 0; 64 void *data; 65 int err; 66 67 data = kzalloc(MAX_DUMP_SIZE, GFP_KERNEL); 68 if (!data) 69 return -ENOMEM; 70 71 err = dump_rsc(dev->mdev, type, key, data, &len); 72 if (err) 73 goto out; 74 75 err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data); 76 out: 77 kfree(data); 78 return err; 79 } 80 81 static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr) 82 { 83 struct mlx5_ib_mr *mr = to_mmr(ibmr); 84 struct nlattr *table_attr; 85 86 if (!(mr->access_flags & IB_ACCESS_ON_DEMAND)) 87 return 0; 88 89 table_attr = nla_nest_start(msg, 90 RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); 91 92 if (!table_attr) 93 goto err; 94 95 if (rdma_nl_stat_hwcounter_entry(msg, "page_faults", 96 atomic64_read(&mr->odp_stats.faults))) 97 goto err_table; 98 if (rdma_nl_stat_hwcounter_entry( 99 msg, "page_invalidations", 100 atomic64_read(&mr->odp_stats.invalidations))) 101 goto err_table; 102 if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch", 103 atomic64_read(&mr->odp_stats.prefetch))) 104 goto err_table; 105 106 nla_nest_end(msg, table_attr); 107 return 0; 108 109 err_table: 110 nla_nest_cancel(msg, table_attr); 111 err: 112 return -EMSGSIZE; 113 } 114 115 static int fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ibmr) 116 { 117 struct mlx5_ib_mr *mr = to_mmr(ibmr); 118 119 return fill_res_raw(msg, mr_to_mdev(mr), MLX5_SGMT_TYPE_PRM_QUERY_MKEY, 120 mlx5_mkey_to_idx(mr->mmkey.key)); 121 } 122 123 static int fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr) 124 { 125 struct mlx5_ib_mr *mr = to_mmr(ibmr); 126 struct nlattr *table_attr; 127 128 if (!(mr->access_flags & IB_ACCESS_ON_DEMAND)) 129 return 0; 130 131 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 132 if (!table_attr) 133 goto err; 134 135 if (mr->is_odp_implicit) { 136 if (rdma_nl_put_driver_string(msg, "odp", "implicit")) 137 goto err; 138 } else { 139 if (rdma_nl_put_driver_string(msg, "odp", "explicit")) 140 goto err; 141 } 142 143 nla_nest_end(msg, table_attr); 144 return 0; 145 146 err: 147 nla_nest_cancel(msg, table_attr); 148 return -EMSGSIZE; 149 } 150 151 static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq) 152 { 153 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); 154 struct mlx5_ib_cq *cq = to_mcq(ibcq); 155 156 return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_CQ, cq->mcq.cqn); 157 } 158 159 static int fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp) 160 { 161 struct mlx5_ib_qp *qp = to_mqp(ibqp); 162 int ret; 163 164 if (qp->type < IB_QPT_DRIVER) 165 return 0; 166 167 switch (qp->type) { 168 case MLX5_IB_QPT_REG_UMR: 169 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE, 170 "REG_UMR"); 171 break; 172 case MLX5_IB_QPT_DCT: 173 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE, "DCT"); 174 break; 175 case MLX5_IB_QPT_DCI: 176 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE, "DCI"); 177 break; 178 default: 179 return 0; 180 } 181 if (ret) 182 return ret; 183 184 return nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, IB_QPT_DRIVER); 185 } 186 187 static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp) 188 { 189 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 190 191 return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_QP, 192 ibqp->qp_num); 193 } 194 195 static const struct ib_device_ops restrack_ops = { 196 .fill_res_cq_entry_raw = fill_res_cq_entry_raw, 197 .fill_res_mr_entry = fill_res_mr_entry, 198 .fill_res_mr_entry_raw = fill_res_mr_entry_raw, 199 .fill_res_qp_entry = fill_res_qp_entry, 200 .fill_res_qp_entry_raw = fill_res_qp_entry_raw, 201 .fill_stat_mr_entry = fill_stat_mr_entry, 202 }; 203 204 int mlx5_ib_restrack_init(struct mlx5_ib_dev *dev) 205 { 206 ib_set_device_ops(&dev->ib_dev, &restrack_ops); 207 return 0; 208 } 209