1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2019-2020, Mellanox Technologies Ltd. All rights reserved.
4 */
5
6 #include <uapi/rdma/rdma_netlink.h>
7 #include <linux/mlx5/rsc_dump.h>
8 #include <rdma/ib_umem_odp.h>
9 #include <rdma/restrack.h>
10 #include "mlx5_ib.h"
11 #include "restrack.h"
12
13 #define MAX_DUMP_SIZE 1024
14
dump_rsc(struct mlx5_core_dev * dev,enum mlx5_sgmt_type type,int index,void * data,int * data_len)15 static int dump_rsc(struct mlx5_core_dev *dev, enum mlx5_sgmt_type type,
16 int index, void *data, int *data_len)
17 {
18 struct mlx5_core_dev *mdev = dev;
19 struct mlx5_rsc_dump_cmd *cmd;
20 struct mlx5_rsc_key key = {};
21 struct page *page;
22 int offset = 0;
23 int err = 0;
24 int cmd_err;
25 int size;
26
27 page = alloc_page(GFP_KERNEL);
28 if (!page)
29 return -ENOMEM;
30
31 key.size = PAGE_SIZE;
32 key.rsc = type;
33 key.index1 = index;
34 key.num_of_obj1 = 1;
35
36 cmd = mlx5_rsc_dump_cmd_create(mdev, &key);
37 if (IS_ERR(cmd)) {
38 err = PTR_ERR(cmd);
39 goto free_page;
40 }
41
42 do {
43 cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size);
44 if (cmd_err < 0 || size + offset > MAX_DUMP_SIZE) {
45 err = cmd_err;
46 goto destroy_cmd;
47 }
48 memcpy(data + offset, page_address(page), size);
49 offset += size;
50 } while (cmd_err > 0);
51 *data_len = offset;
52
53 destroy_cmd:
54 mlx5_rsc_dump_cmd_destroy(cmd);
55 free_page:
56 __free_page(page);
57 return err;
58 }
59
fill_res_raw(struct sk_buff * msg,struct mlx5_ib_dev * dev,enum mlx5_sgmt_type type,u32 key)60 static int fill_res_raw(struct sk_buff *msg, struct mlx5_ib_dev *dev,
61 enum mlx5_sgmt_type type, u32 key)
62 {
63 int len = 0;
64 void *data;
65 int err;
66
67 data = kzalloc(MAX_DUMP_SIZE, GFP_KERNEL);
68 if (!data)
69 return -ENOMEM;
70
71 err = dump_rsc(dev->mdev, type, key, data, &len);
72 if (err)
73 goto out;
74
75 err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
76 out:
77 kfree(data);
78 return err;
79 }
80
fill_stat_mr_entry(struct sk_buff * msg,struct ib_mr * ibmr)81 static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
82 {
83 struct mlx5_ib_mr *mr = to_mmr(ibmr);
84 struct nlattr *table_attr;
85
86 if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
87 return 0;
88
89 table_attr = nla_nest_start(msg,
90 RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
91
92 if (!table_attr)
93 goto err;
94
95 if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
96 atomic64_read(&mr->odp_stats.faults)))
97 goto err_table;
98 if (rdma_nl_stat_hwcounter_entry(
99 msg, "page_faults_handled",
100 atomic64_read(&mr->odp_stats.faults_handled)))
101 goto err_table;
102 if (rdma_nl_stat_hwcounter_entry(
103 msg, "page_invalidations",
104 atomic64_read(&mr->odp_stats.invalidations)))
105 goto err_table;
106 if (rdma_nl_stat_hwcounter_entry(
107 msg, "page_invalidations_handled",
108 atomic64_read(&mr->odp_stats.invalidations_handled)))
109 goto err_table;
110
111 if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
112 atomic64_read(&mr->odp_stats.prefetch)))
113 goto err_table;
114
115 nla_nest_end(msg, table_attr);
116 return 0;
117
118 err_table:
119 nla_nest_cancel(msg, table_attr);
120 err:
121 return -EMSGSIZE;
122 }
123
fill_res_mr_entry_raw(struct sk_buff * msg,struct ib_mr * ibmr)124 static int fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ibmr)
125 {
126 struct mlx5_ib_mr *mr = to_mmr(ibmr);
127
128 return fill_res_raw(msg, mr_to_mdev(mr), MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
129 mlx5_mkey_to_idx(mr->mmkey.key));
130 }
131
fill_res_mr_entry(struct sk_buff * msg,struct ib_mr * ibmr)132 static int fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
133 {
134 struct mlx5_ib_mr *mr = to_mmr(ibmr);
135 struct nlattr *table_attr;
136
137 if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
138 return 0;
139
140 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
141 if (!table_attr)
142 goto err;
143
144 if (mr->is_odp_implicit) {
145 if (rdma_nl_put_driver_string(msg, "odp", "implicit"))
146 goto err;
147 } else {
148 if (rdma_nl_put_driver_string(msg, "odp", "explicit"))
149 goto err;
150 }
151
152 nla_nest_end(msg, table_attr);
153 return 0;
154
155 err:
156 nla_nest_cancel(msg, table_attr);
157 return -EMSGSIZE;
158 }
159
fill_res_cq_entry_raw(struct sk_buff * msg,struct ib_cq * ibcq)160 static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq)
161 {
162 struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
163 struct mlx5_ib_cq *cq = to_mcq(ibcq);
164
165 return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_CQ, cq->mcq.cqn);
166 }
167
fill_res_qp_entry(struct sk_buff * msg,struct ib_qp * ibqp)168 static int fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp)
169 {
170 struct mlx5_ib_qp *qp = to_mqp(ibqp);
171 int ret;
172
173 if (qp->type < IB_QPT_DRIVER)
174 return 0;
175
176 switch (qp->type) {
177 case MLX5_IB_QPT_REG_UMR:
178 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE,
179 "REG_UMR");
180 break;
181 case MLX5_IB_QPT_DCT:
182 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE, "DCT");
183 break;
184 case MLX5_IB_QPT_DCI:
185 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE, "DCI");
186 break;
187 default:
188 return 0;
189 }
190 if (ret)
191 return ret;
192
193 return nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, IB_QPT_DRIVER);
194 }
195
fill_res_qp_entry_raw(struct sk_buff * msg,struct ib_qp * ibqp)196 static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp)
197 {
198 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
199
200 return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_QP,
201 ibqp->qp_num);
202 }
203
204 static const struct ib_device_ops restrack_ops = {
205 .fill_res_cq_entry_raw = fill_res_cq_entry_raw,
206 .fill_res_mr_entry = fill_res_mr_entry,
207 .fill_res_mr_entry_raw = fill_res_mr_entry_raw,
208 .fill_res_qp_entry = fill_res_qp_entry,
209 .fill_res_qp_entry_raw = fill_res_qp_entry_raw,
210 .fill_stat_mr_entry = fill_stat_mr_entry,
211 };
212
mlx5_ib_restrack_init(struct mlx5_ib_dev * dev)213 int mlx5_ib_restrack_init(struct mlx5_ib_dev *dev)
214 {
215 ib_set_device_ops(&dev->ib_dev, &restrack_ops);
216 return 0;
217 }
218