1 /*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/slab.h>
35 #include <rdma/ib_user_verbs.h>
36
37 #include "mlx4_ib.h"
38
convert_access(int acc)39 static u32 convert_access(int acc)
40 {
41 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) |
42 (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
43 (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
44 (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
45 (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) |
46 MLX4_PERM_LOCAL_READ;
47 }
48
to_mlx4_type(enum ib_mw_type type)49 static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
50 {
51 switch (type) {
52 case IB_MW_TYPE_1: return MLX4_MW_TYPE_1;
53 case IB_MW_TYPE_2: return MLX4_MW_TYPE_2;
54 default: return -1;
55 }
56 }
57
mlx4_ib_get_dma_mr(struct ib_pd * pd,int acc)58 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
59 {
60 struct mlx4_ib_mr *mr;
61 int err;
62
63 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
64 if (!mr)
65 return ERR_PTR(-ENOMEM);
66
67 err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
68 ~0ull, convert_access(acc), 0, 0, &mr->mmr);
69 if (err)
70 goto err_free;
71
72 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
73 if (err)
74 goto err_mr;
75
76 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
77 mr->umem = NULL;
78
79 return &mr->ibmr;
80
81 err_mr:
82 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
83
84 err_free:
85 kfree(mr);
86
87 return ERR_PTR(err);
88 }
89
mlx4_ib_umem_write_mtt(struct mlx4_ib_dev * dev,struct mlx4_mtt * mtt,struct ib_umem * umem)90 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
91 struct ib_umem *umem)
92 {
93 struct ib_block_iter biter;
94 int err, i = 0;
95 u64 addr;
96
97 rdma_umem_for_each_dma_block(umem, &biter, BIT(mtt->page_shift)) {
98 addr = rdma_block_iter_dma_address(&biter);
99 err = mlx4_write_mtt(dev->dev, mtt, i++, 1, &addr);
100 if (err)
101 return err;
102 }
103 return 0;
104 }
105
mlx4_get_umem_mr(struct ib_device * device,u64 start,u64 length,int access_flags)106 static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
107 u64 length, int access_flags)
108 {
109 /*
110 * Force registering the memory as writable if the underlying pages
111 * are writable. This is so rereg can change the access permissions
112 * from readable to writable without having to run through ib_umem_get
113 * again
114 */
115 if (!ib_access_writable(access_flags)) {
116 unsigned long untagged_start = untagged_addr(start);
117 struct vm_area_struct *vma;
118
119 mmap_read_lock(current->mm);
120 /*
121 * FIXME: Ideally this would iterate over all the vmas that
122 * cover the memory, but for now it requires a single vma to
123 * entirely cover the MR to support RO mappings.
124 */
125 vma = find_vma(current->mm, untagged_start);
126 if (vma && vma->vm_end >= untagged_start + length &&
127 vma->vm_start <= untagged_start) {
128 if (vma->vm_flags & VM_WRITE)
129 access_flags |= IB_ACCESS_LOCAL_WRITE;
130 } else {
131 access_flags |= IB_ACCESS_LOCAL_WRITE;
132 }
133
134 mmap_read_unlock(current->mm);
135 }
136
137 return ib_umem_get(device, start, length, access_flags);
138 }
139
mlx4_ib_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)140 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
141 u64 virt_addr, int access_flags,
142 struct ib_udata *udata)
143 {
144 struct mlx4_ib_dev *dev = to_mdev(pd->device);
145 struct mlx4_ib_mr *mr;
146 int shift;
147 int err;
148 int n;
149
150 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
151 if (!mr)
152 return ERR_PTR(-ENOMEM);
153
154 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags);
155 if (IS_ERR(mr->umem)) {
156 err = PTR_ERR(mr->umem);
157 goto err_free;
158 }
159
160 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
161 if (shift < 0) {
162 err = shift;
163 goto err_umem;
164 }
165
166 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
167 convert_access(access_flags), n, shift, &mr->mmr);
168 if (err)
169 goto err_umem;
170
171 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
172 if (err)
173 goto err_mr;
174
175 err = mlx4_mr_enable(dev->dev, &mr->mmr);
176 if (err)
177 goto err_mr;
178
179 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
180 mr->ibmr.page_size = 1U << shift;
181
182 return &mr->ibmr;
183
184 err_mr:
185 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
186
187 err_umem:
188 ib_umem_release(mr->umem);
189
190 err_free:
191 kfree(mr);
192
193 return ERR_PTR(err);
194 }
195
mlx4_ib_rereg_user_mr(struct ib_mr * mr,int flags,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_pd * pd,struct ib_udata * udata)196 struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
197 u64 length, u64 virt_addr,
198 int mr_access_flags, struct ib_pd *pd,
199 struct ib_udata *udata)
200 {
201 struct mlx4_ib_dev *dev = to_mdev(mr->device);
202 struct mlx4_ib_mr *mmr = to_mmr(mr);
203 struct mlx4_mpt_entry *mpt_entry;
204 struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
205 int err;
206
207 /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
208 * we assume that the calls can't run concurrently. Otherwise, a
209 * race exists.
210 */
211 err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
212 if (err)
213 return ERR_PTR(err);
214
215 if (flags & IB_MR_REREG_PD) {
216 err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
217 to_mpd(pd)->pdn);
218
219 if (err)
220 goto release_mpt_entry;
221 }
222
223 if (flags & IB_MR_REREG_ACCESS) {
224 if (ib_access_writable(mr_access_flags) &&
225 !mmr->umem->writable) {
226 err = -EPERM;
227 goto release_mpt_entry;
228 }
229
230 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
231 convert_access(mr_access_flags));
232
233 if (err)
234 goto release_mpt_entry;
235 }
236
237 if (flags & IB_MR_REREG_TRANS) {
238 int shift;
239 int n;
240
241 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
242 ib_umem_release(mmr->umem);
243 mmr->umem = mlx4_get_umem_mr(mr->device, start, length,
244 mr_access_flags);
245 if (IS_ERR(mmr->umem)) {
246 err = PTR_ERR(mmr->umem);
247 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
248 mmr->umem = NULL;
249 goto release_mpt_entry;
250 }
251 n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE);
252 shift = PAGE_SHIFT;
253
254 err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
255 virt_addr, length, n, shift,
256 *pmpt_entry);
257 if (err) {
258 ib_umem_release(mmr->umem);
259 goto release_mpt_entry;
260 }
261 mmr->mmr.iova = virt_addr;
262 mmr->mmr.size = length;
263
264 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
265 if (err) {
266 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
267 ib_umem_release(mmr->umem);
268 goto release_mpt_entry;
269 }
270 }
271
272 /* If we couldn't transfer the MR to the HCA, just remember to
273 * return a failure. But dereg_mr will free the resources.
274 */
275 err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
276 if (!err && flags & IB_MR_REREG_ACCESS)
277 mmr->mmr.access = mr_access_flags;
278
279 release_mpt_entry:
280 mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
281 if (err)
282 return ERR_PTR(err);
283 return NULL;
284 }
285
286 static int
mlx4_alloc_priv_pages(struct ib_device * device,struct mlx4_ib_mr * mr,int max_pages)287 mlx4_alloc_priv_pages(struct ib_device *device,
288 struct mlx4_ib_mr *mr,
289 int max_pages)
290 {
291 int ret;
292
293 /* Ensure that size is aligned to DMA cacheline
294 * requirements.
295 * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
296 * so page_map_size will never cross PAGE_SIZE.
297 */
298 mr->page_map_size = roundup(max_pages * sizeof(u64),
299 MLX4_MR_PAGES_ALIGN);
300
301 /* Prevent cross page boundary allocation. */
302 mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
303 if (!mr->pages)
304 return -ENOMEM;
305
306 mr->page_map = dma_map_single(device->dev.parent, mr->pages,
307 mr->page_map_size, DMA_TO_DEVICE);
308
309 if (dma_mapping_error(device->dev.parent, mr->page_map)) {
310 ret = -ENOMEM;
311 goto err;
312 }
313
314 return 0;
315
316 err:
317 free_page((unsigned long)mr->pages);
318 return ret;
319 }
320
321 static void
mlx4_free_priv_pages(struct mlx4_ib_mr * mr)322 mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
323 {
324 if (mr->pages) {
325 struct ib_device *device = mr->ibmr.device;
326
327 dma_unmap_single(device->dev.parent, mr->page_map,
328 mr->page_map_size, DMA_TO_DEVICE);
329 free_page((unsigned long)mr->pages);
330 mr->pages = NULL;
331 }
332 }
333
mlx4_ib_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)334 int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
335 {
336 struct mlx4_ib_mr *mr = to_mmr(ibmr);
337 int ret;
338
339 mlx4_free_priv_pages(mr);
340
341 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
342 if (ret)
343 return ret;
344 if (mr->umem)
345 ib_umem_release(mr->umem);
346 kfree(mr);
347
348 return 0;
349 }
350
mlx4_ib_alloc_mw(struct ib_mw * ibmw,struct ib_udata * udata)351 int mlx4_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
352 {
353 struct mlx4_ib_dev *dev = to_mdev(ibmw->device);
354 struct mlx4_ib_mw *mw = to_mmw(ibmw);
355 int err;
356
357 err = mlx4_mw_alloc(dev->dev, to_mpd(ibmw->pd)->pdn,
358 to_mlx4_type(ibmw->type), &mw->mmw);
359 if (err)
360 return err;
361
362 err = mlx4_mw_enable(dev->dev, &mw->mmw);
363 if (err)
364 goto err_mw;
365
366 ibmw->rkey = mw->mmw.key;
367 return 0;
368
369 err_mw:
370 mlx4_mw_free(dev->dev, &mw->mmw);
371 return err;
372 }
373
mlx4_ib_dealloc_mw(struct ib_mw * ibmw)374 int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
375 {
376 struct mlx4_ib_mw *mw = to_mmw(ibmw);
377
378 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
379 return 0;
380 }
381
mlx4_ib_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)382 struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
383 u32 max_num_sg)
384 {
385 struct mlx4_ib_dev *dev = to_mdev(pd->device);
386 struct mlx4_ib_mr *mr;
387 int err;
388
389 if (mr_type != IB_MR_TYPE_MEM_REG ||
390 max_num_sg > MLX4_MAX_FAST_REG_PAGES)
391 return ERR_PTR(-EINVAL);
392
393 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
394 if (!mr)
395 return ERR_PTR(-ENOMEM);
396
397 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
398 max_num_sg, 0, &mr->mmr);
399 if (err)
400 goto err_free;
401
402 err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
403 if (err)
404 goto err_free_mr;
405
406 mr->max_pages = max_num_sg;
407 err = mlx4_mr_enable(dev->dev, &mr->mmr);
408 if (err)
409 goto err_free_pl;
410
411 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
412 mr->umem = NULL;
413
414 return &mr->ibmr;
415
416 err_free_pl:
417 mr->ibmr.device = pd->device;
418 mlx4_free_priv_pages(mr);
419 err_free_mr:
420 (void) mlx4_mr_free(dev->dev, &mr->mmr);
421 err_free:
422 kfree(mr);
423 return ERR_PTR(err);
424 }
425
mlx4_set_page(struct ib_mr * ibmr,u64 addr)426 static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
427 {
428 struct mlx4_ib_mr *mr = to_mmr(ibmr);
429
430 if (unlikely(mr->npages == mr->max_pages))
431 return -ENOMEM;
432
433 mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
434
435 return 0;
436 }
437
mlx4_ib_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)438 int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
439 unsigned int *sg_offset)
440 {
441 struct mlx4_ib_mr *mr = to_mmr(ibmr);
442 int rc;
443
444 mr->npages = 0;
445
446 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
447 mr->page_map_size, DMA_TO_DEVICE);
448
449 rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
450
451 ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
452 mr->page_map_size, DMA_TO_DEVICE);
453
454 return rc;
455 }
456