1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2
3 /* Authors: Cheng Xu <[email protected]> */
4 /* Kai Shen <[email protected]> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6
7 #ifndef __ERDMA_H__
8 #define __ERDMA_H__
9
10 #include <linux/bitfield.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/xarray.h>
14 #include <rdma/ib_verbs.h>
15
16 #include "erdma_hw.h"
17
18 #define DRV_MODULE_NAME "erdma"
19 #define ERDMA_NODE_DESC "Elastic RDMA Adapter stack"
20
21 struct erdma_eq {
22 void *qbuf;
23 dma_addr_t qbuf_dma_addr;
24
25 spinlock_t lock;
26
27 u32 depth;
28
29 u16 ci;
30 u16 rsvd;
31
32 atomic64_t event_num;
33 atomic64_t notify_num;
34
35 void __iomem *db;
36 u64 *dbrec;
37 dma_addr_t dbrec_dma;
38 };
39
40 struct erdma_cmdq_sq {
41 void *qbuf;
42 dma_addr_t qbuf_dma_addr;
43
44 spinlock_t lock;
45
46 u32 depth;
47 u16 ci;
48 u16 pi;
49
50 u16 wqebb_cnt;
51
52 u64 *dbrec;
53 dma_addr_t dbrec_dma;
54 };
55
56 struct erdma_cmdq_cq {
57 void *qbuf;
58 dma_addr_t qbuf_dma_addr;
59
60 spinlock_t lock;
61
62 u32 depth;
63 u32 ci;
64 u32 cmdsn;
65
66 u64 *dbrec;
67 dma_addr_t dbrec_dma;
68
69 atomic64_t armed_num;
70 };
71
72 enum {
73 ERDMA_CMD_STATUS_INIT,
74 ERDMA_CMD_STATUS_ISSUED,
75 ERDMA_CMD_STATUS_FINISHED,
76 ERDMA_CMD_STATUS_TIMEOUT
77 };
78
79 struct erdma_comp_wait {
80 struct completion wait_event;
81 u32 cmd_status;
82 u32 ctx_id;
83 u16 sq_pi;
84 u8 comp_status;
85 u8 rsvd;
86 u32 comp_data[4];
87 };
88
89 enum {
90 ERDMA_CMDQ_STATE_OK_BIT = 0,
91 ERDMA_CMDQ_STATE_TIMEOUT_BIT = 1,
92 ERDMA_CMDQ_STATE_CTX_ERR_BIT = 2,
93 };
94
95 #define ERDMA_CMDQ_TIMEOUT_MS 15000
96 #define ERDMA_REG_ACCESS_WAIT_MS 20
97 #define ERDMA_WAIT_DEV_DONE_CNT 500
98
99 struct erdma_cmdq {
100 unsigned long *comp_wait_bitmap;
101 struct erdma_comp_wait *wait_pool;
102 spinlock_t lock;
103
104 struct erdma_cmdq_sq sq;
105 struct erdma_cmdq_cq cq;
106 struct erdma_eq eq;
107
108 unsigned long state;
109
110 struct semaphore credits;
111 u16 max_outstandings;
112 };
113
114 #define COMPROMISE_CC ERDMA_CC_CUBIC
115 enum erdma_cc_alg {
116 ERDMA_CC_NEWRENO = 0,
117 ERDMA_CC_CUBIC,
118 ERDMA_CC_HPCC_RTT,
119 ERDMA_CC_HPCC_ECN,
120 ERDMA_CC_HPCC_INT,
121 ERDMA_CC_METHODS_NUM
122 };
123
124 struct erdma_devattr {
125 u32 fw_version;
126
127 unsigned char peer_addr[ETH_ALEN];
128 unsigned long cap_flags;
129
130 int numa_node;
131 enum erdma_cc_alg cc;
132 u32 irq_num;
133
134 u32 max_qp;
135 u32 max_send_wr;
136 u32 max_recv_wr;
137 u32 max_ord;
138 u32 max_ird;
139
140 u32 max_send_sge;
141 u32 max_recv_sge;
142 u32 max_sge_rd;
143 u32 max_cq;
144 u32 max_cqe;
145 u64 max_mr_size;
146 u32 max_mr;
147 u32 max_pd;
148 u32 max_mw;
149 u32 max_gid;
150 u32 max_ah;
151 u32 local_dma_key;
152 };
153
154 #define ERDMA_IRQNAME_SIZE 50
155
156 struct erdma_irq {
157 char name[ERDMA_IRQNAME_SIZE];
158 u32 msix_vector;
159 cpumask_t affinity_hint_mask;
160 };
161
162 struct erdma_eq_cb {
163 bool ready;
164 void *dev; /* All EQs use this fields to get erdma_dev struct */
165 struct erdma_irq irq;
166 struct erdma_eq eq;
167 struct tasklet_struct tasklet;
168 };
169
170 struct erdma_resource_cb {
171 unsigned long *bitmap;
172 spinlock_t lock;
173 u32 next_alloc_idx;
174 u32 max_cap;
175 };
176
177 enum {
178 ERDMA_RES_TYPE_PD = 0,
179 ERDMA_RES_TYPE_STAG_IDX = 1,
180 ERDMA_RES_TYPE_AH = 2,
181 ERDMA_RES_CNT = 3,
182 };
183
184 struct erdma_dev {
185 struct ib_device ibdev;
186 struct net_device *netdev;
187 struct pci_dev *pdev;
188 struct notifier_block netdev_nb;
189 struct workqueue_struct *reflush_wq;
190
191 resource_size_t func_bar_addr;
192 resource_size_t func_bar_len;
193 u8 __iomem *func_bar;
194
195 struct erdma_devattr attrs;
196 u32 mtu;
197
198 /* cmdq and aeq use the same msix vector */
199 struct erdma_irq comm_irq;
200 struct erdma_cmdq cmdq;
201 struct erdma_eq aeq;
202 struct erdma_eq_cb ceqs[ERDMA_NUM_MSIX_VEC - 1];
203
204 spinlock_t lock;
205 struct erdma_resource_cb res_cb[ERDMA_RES_CNT];
206 struct xarray qp_xa;
207 struct xarray cq_xa;
208
209 u32 next_alloc_qpn;
210 u32 next_alloc_cqn;
211
212 atomic_t num_ctx;
213 struct list_head cep_list;
214
215 struct dma_pool *db_pool;
216 struct dma_pool *resp_pool;
217 enum erdma_proto_type proto;
218 };
219
get_queue_entry(void * qbuf,u32 idx,u32 depth,u32 shift)220 static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
221 {
222 idx &= (depth - 1);
223
224 return qbuf + (idx << shift);
225 }
226
to_edev(struct ib_device * ibdev)227 static inline struct erdma_dev *to_edev(struct ib_device *ibdev)
228 {
229 return container_of(ibdev, struct erdma_dev, ibdev);
230 }
231
erdma_reg_read32(struct erdma_dev * dev,u32 reg)232 static inline u32 erdma_reg_read32(struct erdma_dev *dev, u32 reg)
233 {
234 return readl(dev->func_bar + reg);
235 }
236
erdma_reg_read64(struct erdma_dev * dev,u32 reg)237 static inline u64 erdma_reg_read64(struct erdma_dev *dev, u32 reg)
238 {
239 return readq(dev->func_bar + reg);
240 }
241
erdma_reg_write32(struct erdma_dev * dev,u32 reg,u32 value)242 static inline void erdma_reg_write32(struct erdma_dev *dev, u32 reg, u32 value)
243 {
244 writel(value, dev->func_bar + reg);
245 }
246
erdma_reg_write64(struct erdma_dev * dev,u32 reg,u64 value)247 static inline void erdma_reg_write64(struct erdma_dev *dev, u32 reg, u64 value)
248 {
249 writeq(value, dev->func_bar + reg);
250 }
251
erdma_reg_read32_filed(struct erdma_dev * dev,u32 reg,u32 filed_mask)252 static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
253 u32 filed_mask)
254 {
255 u32 val = erdma_reg_read32(dev, reg);
256
257 return FIELD_GET(filed_mask, val);
258 }
259
260 #define ERDMA_GET(val, name) FIELD_GET(ERDMA_CMD_##name##_MASK, val)
261
262 int erdma_cmdq_init(struct erdma_dev *dev);
263 void erdma_finish_cmdq_init(struct erdma_dev *dev);
264 void erdma_cmdq_destroy(struct erdma_dev *dev);
265
266 void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
267 int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
268 u64 *resp0, u64 *resp1, bool sleepable);
269 void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
270
271 int erdma_ceqs_init(struct erdma_dev *dev);
272 void erdma_ceqs_uninit(struct erdma_dev *dev);
273 void notify_eq(struct erdma_eq *eq);
274 void *get_next_valid_eqe(struct erdma_eq *eq);
275
276 int erdma_aeq_init(struct erdma_dev *dev);
277 int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth);
278 void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq);
279
280 void erdma_aeq_event_handler(struct erdma_dev *dev);
281 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
282
283 #endif
284