2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #include <linux/slab.h>
44 #include <rdma/ib_umem.h>
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
51 #define NUM_CHUNKS(length, chunk_size) \
52 (((length) + (chunk_size - 1)) / (chunk_size))
54 /* max number of rpages (per hcall register_rpages) */
55 #define MAX_RPAGES 512
57 /* DMEM toleration management */
58 #define EHCA_SECTSHIFT SECTION_SIZE_BITS
59 #define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
60 #define EHCA_HUGEPAGESHIFT 34
61 #define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
62 #define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
63 #define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
64 #define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
65 #define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
66 #define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
67 #define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
68 #define EHCA_DIR_MAP_SIZE (0x10000)
69 #define EHCA_ENT_MAP_SIZE (0x10000)
70 #define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
72 static unsigned long ehca_mr_len;
75 * Memory map data structures
77 struct ehca_dir_bmap {
78 u64 ent[EHCA_MAP_ENTRIES];
80 struct ehca_top_bmap {
81 struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
84 struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
87 static struct ehca_bmap *ehca_bmap;
89 static struct kmem_cache *mr_cache;
90 static struct kmem_cache *mw_cache;
93 EHCA_MR_PGSIZE4K = 0x1000L,
94 EHCA_MR_PGSIZE64K = 0x10000L,
95 EHCA_MR_PGSIZE1M = 0x100000L,
96 EHCA_MR_PGSIZE16M = 0x1000000L
99 #define EHCA_MR_PGSHIFT4K 12
100 #define EHCA_MR_PGSHIFT64K 16
101 #define EHCA_MR_PGSHIFT1M 20
102 #define EHCA_MR_PGSHIFT16M 24
104 static u64 ehca_map_vaddr(void *caddr);
106 static u32 ehca_encode_hwpage_size(u32 pgsize)
108 int log = ilog2(pgsize);
109 WARN_ON(log < 12 || log > 24 || log & 3);
110 return (log - 12) / 4;
113 static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
115 return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
118 static struct ehca_mr *ehca_mr_new(void)
122 me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
124 spin_lock_init(&me->mrlock);
126 ehca_gen_err("alloc failed");
131 static void ehca_mr_delete(struct ehca_mr *me)
133 kmem_cache_free(mr_cache, me);
136 static struct ehca_mw *ehca_mw_new(void)
140 me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
142 spin_lock_init(&me->mwlock);
144 ehca_gen_err("alloc failed");
149 static void ehca_mw_delete(struct ehca_mw *me)
151 kmem_cache_free(mw_cache, me);
154 /*----------------------------------------------------------------------*/
156 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
160 struct ehca_mr *e_maxmr;
161 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
162 struct ehca_shca *shca =
163 container_of(pd->device, struct ehca_shca, ib_device);
166 e_maxmr = ehca_mr_new();
168 ehca_err(&shca->ib_device, "out of memory");
169 ib_mr = ERR_PTR(-ENOMEM);
170 goto get_dma_mr_exit0;
173 ret = ehca_reg_maxmr(shca, e_maxmr,
174 (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
175 mr_access_flags, e_pd,
176 &e_maxmr->ib.ib_mr.lkey,
177 &e_maxmr->ib.ib_mr.rkey);
179 ehca_mr_delete(e_maxmr);
180 ib_mr = ERR_PTR(ret);
181 goto get_dma_mr_exit0;
183 ib_mr = &e_maxmr->ib.ib_mr;
185 ehca_err(&shca->ib_device, "no internal max-MR exist!");
186 ib_mr = ERR_PTR(-EINVAL);
187 goto get_dma_mr_exit0;
192 ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
193 PTR_ERR(ib_mr), pd, mr_access_flags);
195 } /* end ehca_get_dma_mr() */
197 /*----------------------------------------------------------------------*/
199 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
200 u64 virt, int mr_access_flags,
201 struct ib_udata *udata)
204 struct ehca_mr *e_mr;
205 struct ehca_shca *shca =
206 container_of(pd->device, struct ehca_shca, ib_device);
207 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
208 struct ehca_mr_pginfo pginfo;
215 ehca_gen_err("bad pd=%p", pd);
216 return ERR_PTR(-EFAULT);
219 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
220 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
221 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
222 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
224 * Remote Write Access requires Local Write Access
225 * Remote Atomic Access requires Local Write Access
227 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
229 ib_mr = ERR_PTR(-EINVAL);
230 goto reg_user_mr_exit0;
233 if (length == 0 || virt + length < virt) {
234 ehca_err(pd->device, "bad input values: length=%llx "
235 "virt_base=%llx", length, virt);
236 ib_mr = ERR_PTR(-EINVAL);
237 goto reg_user_mr_exit0;
240 e_mr = ehca_mr_new();
242 ehca_err(pd->device, "out of memory");
243 ib_mr = ERR_PTR(-ENOMEM);
244 goto reg_user_mr_exit0;
247 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
249 if (IS_ERR(e_mr->umem)) {
250 ib_mr = (void *)e_mr->umem;
251 goto reg_user_mr_exit1;
254 if (e_mr->umem->page_size != PAGE_SIZE) {
255 ehca_err(pd->device, "page size not supported, "
256 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
257 ib_mr = ERR_PTR(-EINVAL);
258 goto reg_user_mr_exit2;
261 /* determine number of MR pages */
262 num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
263 /* select proper hw_pgsize */
264 page_shift = PAGE_SHIFT;
265 if (e_mr->umem->hugetlb) {
266 /* determine page_shift, clamp between 4K and 16M */
267 page_shift = (fls64(length - 1) + 3) & ~3;
268 page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
271 hwpage_size = 1UL << page_shift;
273 /* now that we have the desired page size, shift until it's
274 * supported, too. 4K is always supported, so this terminates.
276 while (!(hwpage_size & shca->hca_cap_mr_pgsize))
279 reg_user_mr_fallback:
280 num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
281 /* register MR on HCA */
282 memset(&pginfo, 0, sizeof(pginfo));
283 pginfo.type = EHCA_MR_PGI_USER;
284 pginfo.hwpage_size = hwpage_size;
285 pginfo.num_kpages = num_kpages;
286 pginfo.num_hwpages = num_hwpages;
287 pginfo.u.usr.region = e_mr->umem;
288 pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
289 pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
290 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
291 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
292 &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
293 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
294 ehca_warn(pd->device, "failed to register mr "
295 "with hwpage_size=%llx", hwpage_size);
296 ehca_info(pd->device, "try to register mr with "
297 "kpage_size=%lx", PAGE_SIZE);
299 * this means kpages are not contiguous for a hw page
300 * try kernel page size as fallback solution
302 hwpage_size = PAGE_SIZE;
303 goto reg_user_mr_fallback;
306 ib_mr = ERR_PTR(ret);
307 goto reg_user_mr_exit2;
310 /* successful registration of all pages */
311 return &e_mr->ib.ib_mr;
314 ib_umem_release(e_mr->umem);
316 ehca_mr_delete(e_mr);
319 ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
320 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
322 } /* end ehca_reg_user_mr() */
324 /*----------------------------------------------------------------------*/
326 int ehca_dereg_mr(struct ib_mr *mr)
330 struct ehca_shca *shca =
331 container_of(mr->device, struct ehca_shca, ib_device);
332 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
334 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
335 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
336 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
339 } else if (e_mr == shca->maxmr) {
340 /* should be impossible, however reject to be sure */
341 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
342 "shca->maxmr=%p mr->lkey=%x",
343 mr, shca->maxmr, mr->lkey);
348 /* TODO: BUSY: MR still has bound window(s) */
349 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
350 if (h_ret != H_SUCCESS) {
351 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
352 "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
353 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
354 e_mr->ipz_mr_handle.handle, mr->lkey);
355 ret = ehca2ib_return_code(h_ret);
360 ib_umem_release(e_mr->umem);
362 /* successful deregistration */
363 ehca_mr_delete(e_mr);
367 ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
369 } /* end ehca_dereg_mr() */
371 /*----------------------------------------------------------------------*/
373 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
377 struct ehca_mw *e_mw;
378 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
379 struct ehca_shca *shca =
380 container_of(pd->device, struct ehca_shca, ib_device);
381 struct ehca_mw_hipzout_parms hipzout;
383 if (type != IB_MW_TYPE_1)
384 return ERR_PTR(-EINVAL);
386 e_mw = ehca_mw_new();
388 ib_mw = ERR_PTR(-ENOMEM);
392 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
393 e_pd->fw_pd, &hipzout);
394 if (h_ret != H_SUCCESS) {
395 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
396 "shca=%p hca_hndl=%llx mw=%p",
397 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
398 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
401 /* successful MW allocation */
402 e_mw->ipz_mw_handle = hipzout.handle;
403 e_mw->ib_mw.rkey = hipzout.rkey;
407 ehca_mw_delete(e_mw);
410 ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
412 } /* end ehca_alloc_mw() */
414 /*----------------------------------------------------------------------*/
416 int ehca_dealloc_mw(struct ib_mw *mw)
419 struct ehca_shca *shca =
420 container_of(mw->device, struct ehca_shca, ib_device);
421 struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
423 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
424 if (h_ret != H_SUCCESS) {
425 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
426 "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
427 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
428 e_mw->ipz_mw_handle.handle);
429 return ehca2ib_return_code(h_ret);
431 /* successful deallocation */
432 ehca_mw_delete(e_mw);
434 } /* end ehca_dealloc_mw() */
436 /*----------------------------------------------------------------------*/
438 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
440 struct ib_fmr_attr *fmr_attr)
442 struct ib_fmr *ib_fmr;
443 struct ehca_shca *shca =
444 container_of(pd->device, struct ehca_shca, ib_device);
445 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
446 struct ehca_mr *e_fmr;
448 u32 tmp_lkey, tmp_rkey;
449 struct ehca_mr_pginfo pginfo;
452 /* check other parameters */
453 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
454 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
455 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
456 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
458 * Remote Write Access requires Local Write Access
459 * Remote Atomic Access requires Local Write Access
461 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
463 ib_fmr = ERR_PTR(-EINVAL);
464 goto alloc_fmr_exit0;
466 if (mr_access_flags & IB_ACCESS_MW_BIND) {
467 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
469 ib_fmr = ERR_PTR(-EINVAL);
470 goto alloc_fmr_exit0;
472 if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
473 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
474 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
475 fmr_attr->max_pages, fmr_attr->max_maps,
476 fmr_attr->page_shift);
477 ib_fmr = ERR_PTR(-EINVAL);
478 goto alloc_fmr_exit0;
481 hw_pgsize = 1 << fmr_attr->page_shift;
482 if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
483 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
484 fmr_attr->page_shift);
485 ib_fmr = ERR_PTR(-EINVAL);
486 goto alloc_fmr_exit0;
489 e_fmr = ehca_mr_new();
491 ib_fmr = ERR_PTR(-ENOMEM);
492 goto alloc_fmr_exit0;
494 e_fmr->flags |= EHCA_MR_FLAG_FMR;
496 /* register MR on HCA */
497 memset(&pginfo, 0, sizeof(pginfo));
498 pginfo.hwpage_size = hw_pgsize;
500 * pginfo.num_hwpages==0, ie register_rpages() will not be called
501 * but deferred to map_phys_fmr()
503 ret = ehca_reg_mr(shca, e_fmr, NULL,
504 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
505 mr_access_flags, e_pd, &pginfo,
506 &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
508 ib_fmr = ERR_PTR(ret);
509 goto alloc_fmr_exit1;
513 e_fmr->hwpage_size = hw_pgsize;
514 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
515 e_fmr->fmr_max_pages = fmr_attr->max_pages;
516 e_fmr->fmr_max_maps = fmr_attr->max_maps;
517 e_fmr->fmr_map_cnt = 0;
518 return &e_fmr->ib.ib_fmr;
521 ehca_mr_delete(e_fmr);
524 } /* end ehca_alloc_fmr() */
526 /*----------------------------------------------------------------------*/
528 int ehca_map_phys_fmr(struct ib_fmr *fmr,
534 struct ehca_shca *shca =
535 container_of(fmr->device, struct ehca_shca, ib_device);
536 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
537 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
538 struct ehca_mr_pginfo pginfo;
539 u32 tmp_lkey, tmp_rkey;
541 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
542 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
543 e_fmr, e_fmr->flags);
545 goto map_phys_fmr_exit0;
547 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
549 goto map_phys_fmr_exit0;
550 if (iova % e_fmr->fmr_page_size) {
551 /* only whole-numbered pages */
552 ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
553 iova, e_fmr->fmr_page_size);
555 goto map_phys_fmr_exit0;
557 if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
558 /* HCAD does not limit the maps, however trace this anyway */
559 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
560 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
561 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
564 memset(&pginfo, 0, sizeof(pginfo));
565 pginfo.type = EHCA_MR_PGI_FMR;
566 pginfo.num_kpages = list_len;
567 pginfo.hwpage_size = e_fmr->hwpage_size;
569 list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
570 pginfo.u.fmr.page_list = page_list;
572 (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
573 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
575 ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
576 list_len * e_fmr->fmr_page_size,
577 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
579 goto map_phys_fmr_exit0;
581 /* successful reregistration */
582 e_fmr->fmr_map_cnt++;
583 e_fmr->ib.ib_fmr.lkey = tmp_lkey;
584 e_fmr->ib.ib_fmr.rkey = tmp_rkey;
589 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
590 "iova=%llx", ret, fmr, page_list, list_len, iova);
592 } /* end ehca_map_phys_fmr() */
594 /*----------------------------------------------------------------------*/
596 int ehca_unmap_fmr(struct list_head *fmr_list)
599 struct ib_fmr *ib_fmr;
600 struct ehca_shca *shca = NULL;
601 struct ehca_shca *prev_shca;
602 struct ehca_mr *e_fmr;
604 u32 unmap_fmr_cnt = 0;
606 /* check all FMR belong to same SHCA, and check internal flag */
607 list_for_each_entry(ib_fmr, fmr_list, list) {
609 shca = container_of(ib_fmr->device, struct ehca_shca,
611 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
612 if ((shca != prev_shca) && prev_shca) {
613 ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
614 "prev_shca=%p e_fmr=%p",
615 shca, prev_shca, e_fmr);
617 goto unmap_fmr_exit0;
619 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
620 ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
621 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
623 goto unmap_fmr_exit0;
628 /* loop over all FMRs to unmap */
629 list_for_each_entry(ib_fmr, fmr_list, list) {
631 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
632 shca = container_of(ib_fmr->device, struct ehca_shca,
634 ret = ehca_unmap_one_fmr(shca, e_fmr);
636 /* unmap failed, stop unmapping of rest of FMRs */
637 ehca_err(&shca->ib_device, "unmap of one FMR failed, "
638 "stop rest, e_fmr=%p num_fmr=%x "
639 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
640 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
641 goto unmap_fmr_exit0;
647 ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
648 ret, fmr_list, num_fmr, unmap_fmr_cnt);
650 } /* end ehca_unmap_fmr() */
652 /*----------------------------------------------------------------------*/
654 int ehca_dealloc_fmr(struct ib_fmr *fmr)
658 struct ehca_shca *shca =
659 container_of(fmr->device, struct ehca_shca, ib_device);
660 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
662 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
663 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
664 e_fmr, e_fmr->flags);
669 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
670 if (h_ret != H_SUCCESS) {
671 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
672 "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
673 h_ret, e_fmr, shca->ipz_hca_handle.handle,
674 e_fmr->ipz_mr_handle.handle, fmr->lkey);
675 ret = ehca2ib_return_code(h_ret);
678 /* successful deregistration */
679 ehca_mr_delete(e_fmr);
684 ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
686 } /* end ehca_dealloc_fmr() */
688 /*----------------------------------------------------------------------*/
690 static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
691 struct ehca_mr *e_mr,
692 struct ehca_mr_pginfo *pginfo);
694 int ehca_reg_mr(struct ehca_shca *shca,
695 struct ehca_mr *e_mr,
699 struct ehca_pd *e_pd,
700 struct ehca_mr_pginfo *pginfo,
703 enum ehca_reg_type reg_type)
708 struct ehca_mr_hipzout_parms hipzout;
710 ehca_mrmw_map_acl(acl, &hipz_acl);
711 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
712 if (ehca_use_hp_mr == 1)
713 hipz_acl |= 0x00000001;
715 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
716 (u64)iova_start, size, hipz_acl,
717 e_pd->fw_pd, &hipzout);
718 if (h_ret != H_SUCCESS) {
719 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
720 "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
721 ret = ehca2ib_return_code(h_ret);
722 goto ehca_reg_mr_exit0;
725 e_mr->ipz_mr_handle = hipzout.handle;
727 if (reg_type == EHCA_REG_BUSMAP_MR)
728 ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
729 else if (reg_type == EHCA_REG_MR)
730 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
735 goto ehca_reg_mr_exit1;
737 /* successful registration */
738 e_mr->num_kpages = pginfo->num_kpages;
739 e_mr->num_hwpages = pginfo->num_hwpages;
740 e_mr->hwpage_size = pginfo->hwpage_size;
741 e_mr->start = iova_start;
744 *lkey = hipzout.lkey;
745 *rkey = hipzout.rkey;
749 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
750 if (h_ret != H_SUCCESS) {
751 ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
752 "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
753 "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
754 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
755 hipzout.lkey, pginfo, pginfo->num_kpages,
756 pginfo->num_hwpages, ret);
757 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
762 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
763 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
764 "num_kpages=%llx num_hwpages=%llx",
765 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
766 pginfo->num_kpages, pginfo->num_hwpages);
768 } /* end ehca_reg_mr() */
770 /*----------------------------------------------------------------------*/
772 int ehca_reg_mr_rpages(struct ehca_shca *shca,
773 struct ehca_mr *e_mr,
774 struct ehca_mr_pginfo *pginfo)
783 if (!pginfo->num_hwpages) /* in case of fmr */
786 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
788 ehca_err(&shca->ib_device, "kpage alloc failed");
790 goto ehca_reg_mr_rpages_exit0;
793 /* max MAX_RPAGES ehca mr pages per register call */
794 for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
796 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
797 rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
799 rnum = MAX_RPAGES; /* last shot is full */
803 ret = ehca_set_pagebuf(pginfo, rnum, kpage);
805 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
806 "bad rc, ret=%i rnum=%x kpage=%p",
808 goto ehca_reg_mr_rpages_exit1;
814 ehca_err(&shca->ib_device, "kpage=%p i=%x",
817 goto ehca_reg_mr_rpages_exit1;
822 h_ret = hipz_h_register_rpage_mr(
823 shca->ipz_hca_handle, e_mr,
824 ehca_encode_hwpage_size(pginfo->hwpage_size),
827 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
829 * check for 'registration complete'==H_SUCCESS
830 * and for 'page registered'==H_PAGE_REGISTERED
832 if (h_ret != H_SUCCESS) {
833 ehca_err(&shca->ib_device, "last "
834 "hipz_reg_rpage_mr failed, h_ret=%lli "
835 "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
836 " lkey=%x", h_ret, e_mr, i,
837 shca->ipz_hca_handle.handle,
838 e_mr->ipz_mr_handle.handle,
839 e_mr->ib.ib_mr.lkey);
840 ret = ehca2ib_return_code(h_ret);
844 } else if (h_ret != H_PAGE_REGISTERED) {
845 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
846 "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
847 "mr_hndl=%llx", h_ret, e_mr, i,
849 shca->ipz_hca_handle.handle,
850 e_mr->ipz_mr_handle.handle);
851 ret = ehca2ib_return_code(h_ret);
858 ehca_reg_mr_rpages_exit1:
859 ehca_free_fw_ctrlblock(kpage);
860 ehca_reg_mr_rpages_exit0:
862 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
863 "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
864 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
866 } /* end ehca_reg_mr_rpages() */
868 /*----------------------------------------------------------------------*/
870 inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
871 struct ehca_mr *e_mr,
875 struct ehca_pd *e_pd,
876 struct ehca_mr_pginfo *pginfo,
885 struct ehca_mr_pginfo pginfo_save;
886 struct ehca_mr_hipzout_parms hipzout;
888 ehca_mrmw_map_acl(acl, &hipz_acl);
889 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
891 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
893 ehca_err(&shca->ib_device, "kpage alloc failed");
895 goto ehca_rereg_mr_rereg1_exit0;
898 pginfo_save = *pginfo;
899 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
901 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
902 "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
903 "kpage=%p", e_mr, pginfo, pginfo->type,
904 pginfo->num_kpages, pginfo->num_hwpages, kpage);
905 goto ehca_rereg_mr_rereg1_exit1;
909 ehca_err(&shca->ib_device, "kpage=%p", kpage);
911 goto ehca_rereg_mr_rereg1_exit1;
913 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
914 (u64)iova_start, size, hipz_acl,
915 e_pd->fw_pd, rpage, &hipzout);
916 if (h_ret != H_SUCCESS) {
918 * reregistration unsuccessful, try it again with the 3 hCalls,
919 * e.g. this is required in case H_MR_CONDITION
920 * (MW bound or MR is shared)
922 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
923 "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
924 *pginfo = pginfo_save;
926 } else if ((u64 *)hipzout.vaddr != iova_start) {
927 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
928 "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
929 "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
930 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
931 e_mr->ib.ib_mr.lkey, hipzout.lkey);
935 * successful reregistration
936 * note: start and start_out are identical for eServer HCAs
938 e_mr->num_kpages = pginfo->num_kpages;
939 e_mr->num_hwpages = pginfo->num_hwpages;
940 e_mr->hwpage_size = pginfo->hwpage_size;
941 e_mr->start = iova_start;
944 *lkey = hipzout.lkey;
945 *rkey = hipzout.rkey;
948 ehca_rereg_mr_rereg1_exit1:
949 ehca_free_fw_ctrlblock(kpage);
950 ehca_rereg_mr_rereg1_exit0:
951 if ( ret && (ret != -EAGAIN) )
952 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
953 "pginfo=%p num_kpages=%llx num_hwpages=%llx",
954 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
955 pginfo->num_hwpages);
957 } /* end ehca_rereg_mr_rereg1() */
959 /*----------------------------------------------------------------------*/
961 int ehca_rereg_mr(struct ehca_shca *shca,
962 struct ehca_mr *e_mr,
966 struct ehca_pd *e_pd,
967 struct ehca_mr_pginfo *pginfo,
973 int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
974 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
976 /* first determine reregistration hCall(s) */
977 if ((pginfo->num_hwpages > MAX_RPAGES) ||
978 (e_mr->num_hwpages > MAX_RPAGES) ||
979 (pginfo->num_hwpages > e_mr->num_hwpages)) {
980 ehca_dbg(&shca->ib_device, "Rereg3 case, "
981 "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
982 pginfo->num_hwpages, e_mr->num_hwpages);
987 if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
990 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
991 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
996 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
997 acl, e_pd, pginfo, lkey, rkey);
1002 goto ehca_rereg_mr_exit0;
1006 if (rereg_3_hcall) {
1007 struct ehca_mr save_mr;
1009 /* first deregister old MR */
1010 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1011 if (h_ret != H_SUCCESS) {
1012 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1013 "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
1015 h_ret, e_mr, shca->ipz_hca_handle.handle,
1016 e_mr->ipz_mr_handle.handle,
1017 e_mr->ib.ib_mr.lkey);
1018 ret = ehca2ib_return_code(h_ret);
1019 goto ehca_rereg_mr_exit0;
1021 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1023 ehca_mr_deletenew(e_mr);
1025 /* set some MR values */
1026 e_mr->flags = save_mr.flags;
1027 e_mr->hwpage_size = save_mr.hwpage_size;
1028 e_mr->fmr_page_size = save_mr.fmr_page_size;
1029 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1030 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1031 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1033 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1034 e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
1036 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1037 memcpy(&e_mr->flags, &(save_mr.flags),
1038 sizeof(struct ehca_mr) - offset);
1039 goto ehca_rereg_mr_exit0;
1043 ehca_rereg_mr_exit0:
1045 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
1046 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1047 "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
1048 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1049 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1050 rereg_1_hcall, rereg_3_hcall);
1052 } /* end ehca_rereg_mr() */
1054 /*----------------------------------------------------------------------*/
1056 int ehca_unmap_one_fmr(struct ehca_shca *shca,
1057 struct ehca_mr *e_fmr)
1061 struct ehca_pd *e_pd =
1062 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1063 struct ehca_mr save_fmr;
1064 u32 tmp_lkey, tmp_rkey;
1065 struct ehca_mr_pginfo pginfo;
1066 struct ehca_mr_hipzout_parms hipzout;
1067 struct ehca_mr save_mr;
1069 if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
1071 * note: after using rereg hcall with len=0,
1072 * rereg hcall must be used again for registering pages
1074 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1075 0, 0, e_pd->fw_pd, 0, &hipzout);
1076 if (h_ret == H_SUCCESS) {
1077 /* successful reregistration */
1078 e_fmr->start = NULL;
1080 tmp_lkey = hipzout.lkey;
1081 tmp_rkey = hipzout.rkey;
1085 * should not happen, because length checked above,
1086 * FMRs are not shared and no MW bound to FMRs
1088 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1089 "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
1090 "mr_hndl=%llx lkey=%x lkey_out=%x",
1091 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1092 e_fmr->ipz_mr_handle.handle,
1093 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1094 /* try free and rereg */
1097 /* first free old FMR */
1098 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1099 if (h_ret != H_SUCCESS) {
1100 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1101 "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
1103 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1104 e_fmr->ipz_mr_handle.handle,
1105 e_fmr->ib.ib_fmr.lkey);
1106 ret = ehca2ib_return_code(h_ret);
1107 goto ehca_unmap_one_fmr_exit0;
1109 /* clean ehca_mr_t, without changing lock */
1111 ehca_mr_deletenew(e_fmr);
1113 /* set some MR values */
1114 e_fmr->flags = save_fmr.flags;
1115 e_fmr->hwpage_size = save_fmr.hwpage_size;
1116 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1117 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1118 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1119 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1120 e_fmr->acl = save_fmr.acl;
1122 memset(&pginfo, 0, sizeof(pginfo));
1123 pginfo.type = EHCA_MR_PGI_FMR;
1124 ret = ehca_reg_mr(shca, e_fmr, NULL,
1125 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1126 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1127 &tmp_rkey, EHCA_REG_MR);
1129 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1130 memcpy(&e_fmr->flags, &(save_mr.flags),
1131 sizeof(struct ehca_mr) - offset);
1134 ehca_unmap_one_fmr_exit0:
1136 ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
1138 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
1140 } /* end ehca_unmap_one_fmr() */
1142 /*----------------------------------------------------------------------*/
1144 int ehca_reg_smr(struct ehca_shca *shca,
1145 struct ehca_mr *e_origmr,
1146 struct ehca_mr *e_newmr,
1149 struct ehca_pd *e_pd,
1156 struct ehca_mr_hipzout_parms hipzout;
1158 ehca_mrmw_map_acl(acl, &hipz_acl);
1159 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1161 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1162 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1164 if (h_ret != H_SUCCESS) {
1165 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1166 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1167 "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1168 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1169 shca->ipz_hca_handle.handle,
1170 e_origmr->ipz_mr_handle.handle,
1171 e_origmr->ib.ib_mr.lkey);
1172 ret = ehca2ib_return_code(h_ret);
1173 goto ehca_reg_smr_exit0;
1175 /* successful registration */
1176 e_newmr->num_kpages = e_origmr->num_kpages;
1177 e_newmr->num_hwpages = e_origmr->num_hwpages;
1178 e_newmr->hwpage_size = e_origmr->hwpage_size;
1179 e_newmr->start = iova_start;
1180 e_newmr->size = e_origmr->size;
1182 e_newmr->ipz_mr_handle = hipzout.handle;
1183 *lkey = hipzout.lkey;
1184 *rkey = hipzout.rkey;
1189 ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
1190 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1191 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1193 } /* end ehca_reg_smr() */
1195 /*----------------------------------------------------------------------*/
1196 static inline void *ehca_calc_sectbase(int top, int dir, int idx)
1198 unsigned long ret = idx;
1199 ret |= dir << EHCA_DIR_INDEX_SHIFT;
1200 ret |= top << EHCA_TOP_INDEX_SHIFT;
1201 return __va(ret << SECTION_SIZE_BITS);
1204 #define ehca_bmap_valid(entry) \
1205 ((u64)entry != (u64)EHCA_INVAL_ADDR)
1207 static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
1208 struct ehca_shca *shca, struct ehca_mr *mr,
1209 struct ehca_mr_pginfo *pginfo)
1212 unsigned long page = 0;
1213 u64 rpage = __pa(kpage);
1216 void *sectbase = ehca_calc_sectbase(top, dir, idx);
1217 if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
1218 ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
1219 "hwpage_size does not fit to "
1220 "section start address");
1222 page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
1224 while (page < page_count) {
1226 for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
1228 void *pg = sectbase + ((page++) * pginfo->hwpage_size);
1229 kpage[rnum] = __pa(pg);
1232 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
1233 ehca_encode_hwpage_size(pginfo->hwpage_size),
1236 if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
1237 ehca_err(&shca->ib_device, "register_rpage_mr failed");
1244 static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
1245 struct ehca_shca *shca, struct ehca_mr *mr,
1246 struct ehca_mr_pginfo *pginfo)
1248 u64 hret = H_SUCCESS;
1251 for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
1252 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
1255 hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
1257 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1263 static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
1265 struct ehca_mr_pginfo *pginfo)
1267 u64 hret = H_SUCCESS;
1270 for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1271 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1274 hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
1275 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1281 /* register internal max-MR to internal SHCA */
1282 int ehca_reg_internal_maxmr(
1283 struct ehca_shca *shca,
1284 struct ehca_pd *e_pd,
1285 struct ehca_mr **e_maxmr) /*OUT*/
1288 struct ehca_mr *e_mr;
1291 struct ehca_mr_pginfo pginfo;
1298 goto ehca_reg_internal_maxmr_exit0;
1301 e_mr = ehca_mr_new();
1303 ehca_err(&shca->ib_device, "out of memory");
1305 goto ehca_reg_internal_maxmr_exit0;
1307 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1309 /* register internal max-MR on HCA */
1310 size_maxmr = ehca_mr_len;
1311 iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
1312 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1314 hw_pgsize = ehca_get_max_hwpage_size(shca);
1315 num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
1318 memset(&pginfo, 0, sizeof(pginfo));
1319 pginfo.type = EHCA_MR_PGI_PHYS;
1320 pginfo.num_kpages = num_kpages;
1321 pginfo.num_hwpages = num_hwpages;
1322 pginfo.hwpage_size = hw_pgsize;
1323 pginfo.u.phy.addr = 0;
1324 pginfo.u.phy.size = size_maxmr;
1326 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1327 &pginfo, &e_mr->ib.ib_mr.lkey,
1328 &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
1330 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1331 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
1332 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1333 num_kpages, num_hwpages);
1334 goto ehca_reg_internal_maxmr_exit1;
1337 /* successful registration of all pages */
1338 e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1339 e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1340 e_mr->ib.ib_mr.uobject = NULL;
1341 atomic_inc(&(e_pd->ib_pd.usecnt));
1345 ehca_reg_internal_maxmr_exit1:
1346 ehca_mr_delete(e_mr);
1347 ehca_reg_internal_maxmr_exit0:
1349 ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
1350 ret, shca, e_pd, e_maxmr);
1352 } /* end ehca_reg_internal_maxmr() */
1354 /*----------------------------------------------------------------------*/
1356 int ehca_reg_maxmr(struct ehca_shca *shca,
1357 struct ehca_mr *e_newmr,
1360 struct ehca_pd *e_pd,
1365 struct ehca_mr *e_origmr = shca->maxmr;
1367 struct ehca_mr_hipzout_parms hipzout;
1369 ehca_mrmw_map_acl(acl, &hipz_acl);
1370 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1372 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1373 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1375 if (h_ret != H_SUCCESS) {
1376 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1377 "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1378 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1379 e_origmr->ipz_mr_handle.handle,
1380 e_origmr->ib.ib_mr.lkey);
1381 return ehca2ib_return_code(h_ret);
1383 /* successful registration */
1384 e_newmr->num_kpages = e_origmr->num_kpages;
1385 e_newmr->num_hwpages = e_origmr->num_hwpages;
1386 e_newmr->hwpage_size = e_origmr->hwpage_size;
1387 e_newmr->start = iova_start;
1388 e_newmr->size = e_origmr->size;
1390 e_newmr->ipz_mr_handle = hipzout.handle;
1391 *lkey = hipzout.lkey;
1392 *rkey = hipzout.rkey;
1394 } /* end ehca_reg_maxmr() */
1396 /*----------------------------------------------------------------------*/
1398 int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1401 struct ehca_mr *e_maxmr;
1402 struct ib_pd *ib_pd;
1405 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1407 goto ehca_dereg_internal_maxmr_exit0;
1410 e_maxmr = shca->maxmr;
1411 ib_pd = e_maxmr->ib.ib_mr.pd;
1412 shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1414 ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1416 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1417 "ret=%i e_maxmr=%p shca=%p lkey=%x",
1418 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1419 shca->maxmr = e_maxmr;
1420 goto ehca_dereg_internal_maxmr_exit0;
1423 atomic_dec(&ib_pd->usecnt);
1425 ehca_dereg_internal_maxmr_exit0:
1427 ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
1428 ret, shca, shca->maxmr);
1430 } /* end ehca_dereg_internal_maxmr() */
1432 /*----------------------------------------------------------------------*/
1434 /* check page list of map FMR verb for validness */
1435 int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1442 if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1443 ehca_gen_err("bad list_len, list_len=%x "
1444 "e_fmr->fmr_max_pages=%x fmr=%p",
1445 list_len, e_fmr->fmr_max_pages, e_fmr);
1449 /* each page must be aligned */
1451 for (i = 0; i < list_len; i++) {
1452 if (*page % e_fmr->fmr_page_size) {
1453 ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
1454 "fmr_page_size=%x", i, *page, page, e_fmr,
1455 e_fmr->fmr_page_size);
1462 } /* end ehca_fmr_check_page_list() */
1464 /*----------------------------------------------------------------------*/
1466 /* PAGE_SIZE >= pginfo->hwpage_size */
1467 static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1474 int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
1475 struct scatterlist **sg = &pginfo->u.usr.next_sg;
1477 while (*sg != NULL) {
1478 pgaddr = page_to_pfn(sg_page(*sg))
1480 *kpage = pgaddr + (pginfo->next_hwpage *
1481 pginfo->hwpage_size);
1483 ehca_gen_err("pgaddr=%llx "
1484 "sg_dma_address=%llx "
1485 "entry=%llx next_hwpage=%llx",
1486 pgaddr, (u64)sg_dma_address(*sg),
1487 pginfo->u.usr.next_nmap,
1488 pginfo->next_hwpage);
1491 (pginfo->hwpage_cnt)++;
1492 (pginfo->next_hwpage)++;
1494 if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
1495 (pginfo->kpage_cnt)++;
1496 (pginfo->u.usr.next_nmap)++;
1497 pginfo->next_hwpage = 0;
1509 * check given pages for contiguous layout
1510 * last page addr is returned in prev_pgaddr for further check
1512 static int ehca_check_kpages_per_ate(struct scatterlist **sg,
1516 for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
1517 u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
1518 if (ehca_debug_level >= 3)
1519 ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
1520 *(u64 *)__va(pgaddr));
1521 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1522 ehca_gen_err("uncontiguous page found pgaddr=%llx "
1523 "prev_pgaddr=%llx entries_left_in_hwpage=%x",
1524 pgaddr, *prev_pgaddr, num_pages);
1527 *prev_pgaddr = pgaddr;
1532 /* PAGE_SIZE < pginfo->hwpage_size */
1533 static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1538 u64 pgaddr, prev_pgaddr;
1540 int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
1541 int nr_kpages = kpages_per_hwpage;
1542 struct scatterlist **sg = &pginfo->u.usr.next_sg;
1544 while (*sg != NULL) {
1546 if (nr_kpages == kpages_per_hwpage) {
1547 pgaddr = (page_to_pfn(sg_page(*sg))
1551 ehca_gen_err("pgaddr=%llx entry=%llx",
1552 pgaddr, pginfo->u.usr.next_nmap);
1557 * The first page in a hwpage must be aligned;
1558 * the first MR page is exempt from this rule.
1560 if (pgaddr & (pginfo->hwpage_size - 1)) {
1561 if (pginfo->hwpage_cnt) {
1563 "invalid alignment "
1564 "pgaddr=%llx entry=%llx "
1566 pgaddr, pginfo->u.usr.next_nmap,
1567 pginfo->hwpage_size);
1574 (pginfo->hwpage_size - 1)) >>
1576 nr_kpages -= pginfo->kpage_cnt;
1578 ~(pginfo->hwpage_size - 1);
1580 if (ehca_debug_level >= 3) {
1581 u64 val = *(u64 *)__va(pgaddr);
1582 ehca_gen_dbg("kpage=%llx page=%llx "
1584 *kpage, pgaddr, val);
1586 prev_pgaddr = pgaddr;
1588 pginfo->kpage_cnt++;
1589 pginfo->u.usr.next_nmap++;
1596 ret = ehca_check_kpages_per_ate(sg, nr_kpages,
1600 pginfo->kpage_cnt += nr_kpages;
1601 pginfo->u.usr.next_nmap += nr_kpages;
1604 nr_kpages = kpages_per_hwpage;
1605 (pginfo->hwpage_cnt)++;
1615 static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1616 u32 number, u64 *kpage)
1619 u64 addr = pginfo->u.phy.addr;
1620 u64 size = pginfo->u.phy.size;
1621 u64 num_hw, offs_hw;
1624 num_hw = NUM_CHUNKS((addr % pginfo->hwpage_size) + size,
1625 pginfo->hwpage_size);
1626 offs_hw = (addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size;
1628 while (pginfo->next_hwpage < offs_hw + num_hw) {
1630 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1631 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1632 ehca_gen_err("kpage_cnt >= num_kpages, "
1633 "kpage_cnt=%llx num_kpages=%llx "
1635 "num_hwpages=%llx i=%x",
1639 pginfo->num_hwpages, i);
1642 *kpage = (addr & ~(pginfo->hwpage_size - 1)) +
1643 (pginfo->next_hwpage * pginfo->hwpage_size);
1644 if ( !(*kpage) && addr ) {
1645 ehca_gen_err("addr=%llx size=%llx "
1646 "next_hwpage=%llx", addr,
1647 size, pginfo->next_hwpage);
1650 (pginfo->hwpage_cnt)++;
1651 (pginfo->next_hwpage)++;
1652 if (PAGE_SIZE >= pginfo->hwpage_size) {
1653 if (pginfo->next_hwpage %
1654 (PAGE_SIZE / pginfo->hwpage_size) == 0)
1655 (pginfo->kpage_cnt)++;
1657 pginfo->kpage_cnt += pginfo->hwpage_size /
1661 if (i >= number) break;
1663 if (pginfo->next_hwpage >= offs_hw + num_hw) {
1664 pginfo->next_hwpage = 0;
1670 static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
1671 u32 number, u64 *kpage)
1677 /* loop over desired page_list entries */
1678 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1679 for (i = 0; i < number; i++) {
1680 *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +
1681 pginfo->next_hwpage * pginfo->hwpage_size;
1683 ehca_gen_err("*fmrlist=%llx fmrlist=%p "
1684 "next_listelem=%llx next_hwpage=%llx",
1686 pginfo->u.fmr.next_listelem,
1687 pginfo->next_hwpage);
1690 (pginfo->hwpage_cnt)++;
1691 if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
1692 if (pginfo->next_hwpage %
1693 (pginfo->u.fmr.fmr_pgsize /
1694 pginfo->hwpage_size) == 0) {
1695 (pginfo->kpage_cnt)++;
1696 (pginfo->u.fmr.next_listelem)++;
1698 pginfo->next_hwpage = 0;
1700 (pginfo->next_hwpage)++;
1702 unsigned int cnt_per_hwpage = pginfo->hwpage_size /
1703 pginfo->u.fmr.fmr_pgsize;
1706 /* check if adrs are contiguous */
1707 for (j = 1; j < cnt_per_hwpage; j++) {
1708 u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1);
1709 if (prev + pginfo->u.fmr.fmr_pgsize != p) {
1710 ehca_gen_err("uncontiguous fmr pages "
1711 "found prev=%llx p=%llx "
1712 "idx=%x", prev, p, i + j);
1717 pginfo->kpage_cnt += cnt_per_hwpage;
1718 pginfo->u.fmr.next_listelem += cnt_per_hwpage;
1719 fmrlist += cnt_per_hwpage;
1726 /* setup page buffer from page info */
1727 int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
1733 switch (pginfo->type) {
1734 case EHCA_MR_PGI_PHYS:
1735 ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
1737 case EHCA_MR_PGI_USER:
1738 ret = PAGE_SIZE >= pginfo->hwpage_size ?
1739 ehca_set_pagebuf_user1(pginfo, number, kpage) :
1740 ehca_set_pagebuf_user2(pginfo, number, kpage);
1742 case EHCA_MR_PGI_FMR:
1743 ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
1746 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1751 } /* end ehca_set_pagebuf() */
1753 /*----------------------------------------------------------------------*/
1756 * check MR if it is a max-MR, i.e. uses whole memory
1757 * in case it's a max-MR 1 is returned, else 0
1759 int ehca_mr_is_maxmr(u64 size,
1762 /* a MR is treated as max-MR only if it fits following: */
1763 if ((size == ehca_mr_len) &&
1764 (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
1765 ehca_gen_dbg("this is a max-MR");
1769 } /* end ehca_mr_is_maxmr() */
1771 /*----------------------------------------------------------------------*/
1773 /* map access control for MR/MW. This routine is used for MR and MW. */
1774 void ehca_mrmw_map_acl(int ib_acl,
1778 if (ib_acl & IB_ACCESS_REMOTE_READ)
1779 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
1780 if (ib_acl & IB_ACCESS_REMOTE_WRITE)
1781 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
1782 if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
1783 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
1784 if (ib_acl & IB_ACCESS_LOCAL_WRITE)
1785 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
1786 if (ib_acl & IB_ACCESS_MW_BIND)
1787 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
1788 } /* end ehca_mrmw_map_acl() */
1790 /*----------------------------------------------------------------------*/
1792 /* sets page size in hipz access control for MR/MW. */
1793 void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
1795 *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
1796 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
1798 /*----------------------------------------------------------------------*/
1801 * reverse map access control for MR/MW.
1802 * This routine is used for MR and MW.
1804 void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
1805 int *ib_acl) /*OUT*/
1808 if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
1809 *ib_acl |= IB_ACCESS_REMOTE_READ;
1810 if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
1811 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
1812 if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
1813 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
1814 if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
1815 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
1816 if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
1817 *ib_acl |= IB_ACCESS_MW_BIND;
1818 } /* end ehca_mrmw_reverse_map_acl() */
1821 /*----------------------------------------------------------------------*/
1824 * MR destructor and constructor
1825 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
1826 * except struct ib_mr and spinlock
1828 void ehca_mr_deletenew(struct ehca_mr *mr)
1832 mr->num_hwpages = 0;
1835 mr->fmr_page_size = 0;
1836 mr->fmr_max_pages = 0;
1837 mr->fmr_max_maps = 0;
1838 mr->fmr_map_cnt = 0;
1839 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
1840 memset(&mr->galpas, 0, sizeof(mr->galpas));
1841 } /* end ehca_mr_deletenew() */
1843 int ehca_init_mrmw_cache(void)
1845 mr_cache = kmem_cache_create("ehca_cache_mr",
1846 sizeof(struct ehca_mr), 0,
1851 mw_cache = kmem_cache_create("ehca_cache_mw",
1852 sizeof(struct ehca_mw), 0,
1856 kmem_cache_destroy(mr_cache);
1863 void ehca_cleanup_mrmw_cache(void)
1865 kmem_cache_destroy(mr_cache);
1866 kmem_cache_destroy(mw_cache);
1869 static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
1872 if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
1873 ehca_top_bmap->dir[dir] =
1874 kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
1875 if (!ehca_top_bmap->dir[dir])
1877 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1878 memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
1883 static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
1885 if (!ehca_bmap_valid(ehca_bmap->top[top])) {
1886 ehca_bmap->top[top] =
1887 kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
1888 if (!ehca_bmap->top[top])
1890 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1891 memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
1893 return ehca_init_top_bmap(ehca_bmap->top[top], dir);
1896 static inline int ehca_calc_index(unsigned long i, unsigned long s)
1898 return (i >> s) & EHCA_INDEX_MASK;
1901 void ehca_destroy_busmap(void)
1908 for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
1909 if (!ehca_bmap_valid(ehca_bmap->top[top]))
1911 for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1912 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1915 kfree(ehca_bmap->top[top]->dir[dir]);
1918 kfree(ehca_bmap->top[top]);
1925 static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
1927 unsigned long i, start_section, end_section;
1934 ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
1937 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1938 memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
1941 start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE;
1942 end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
1943 for (i = start_section; i < end_section; i++) {
1945 top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
1946 dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
1947 idx = i & EHCA_INDEX_MASK;
1949 ret = ehca_init_bmap(ehca_bmap, top, dir);
1951 ehca_destroy_busmap();
1954 ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
1955 ehca_mr_len += EHCA_SECTSIZE;
1960 static int ehca_is_hugepage(unsigned long pfn)
1964 if (pfn & EHCA_HUGEPAGE_PFN_MASK)
1967 page_order = compound_order(pfn_to_page(pfn));
1968 if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
1974 static int ehca_create_busmap_callback(unsigned long initial_pfn,
1975 unsigned long total_nr_pages, void *arg)
1978 unsigned long pfn, start_pfn, end_pfn, nr_pages;
1980 if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
1981 return ehca_update_busmap(initial_pfn, total_nr_pages);
1983 /* Given chunk is >= 16GB -> check for hugepages */
1984 start_pfn = initial_pfn;
1985 end_pfn = initial_pfn + total_nr_pages;
1988 while (pfn < end_pfn) {
1989 if (ehca_is_hugepage(pfn)) {
1990 /* Add mem found in front of the hugepage */
1991 nr_pages = pfn - start_pfn;
1992 ret = ehca_update_busmap(start_pfn, nr_pages);
1995 /* Skip the hugepage */
1996 pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
1999 pfn += (EHCA_SECTSIZE / PAGE_SIZE);
2002 /* Add mem found behind the hugepage(s) */
2003 nr_pages = pfn - start_pfn;
2004 return ehca_update_busmap(start_pfn, nr_pages);
2007 int ehca_create_busmap(void)
2012 ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
2013 ehca_create_busmap_callback);
2017 static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
2018 struct ehca_mr *e_mr,
2019 struct ehca_mr_pginfo *pginfo)
2024 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2026 ehca_err(&shca->ib_device, "kpage alloc failed");
2029 for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
2030 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2032 hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
2033 if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
2037 ehca_free_fw_ctrlblock(kpage);
2039 if (hret == H_SUCCESS)
2040 return 0; /* Everything is fine */
2042 ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
2043 "h_ret=%lli e_mr=%p top=%x lkey=%x "
2044 "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
2045 e_mr->ib.ib_mr.lkey,
2046 shca->ipz_hca_handle.handle,
2047 e_mr->ipz_mr_handle.handle);
2048 return ehca2ib_return_code(hret);
2052 static u64 ehca_map_vaddr(void *caddr)
2055 unsigned long abs_addr, offset;
2059 return EHCA_INVAL_ADDR;
2061 abs_addr = __pa(caddr);
2062 top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
2063 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2064 return EHCA_INVAL_ADDR;
2066 dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
2067 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
2068 return EHCA_INVAL_ADDR;
2070 idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
2072 entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
2073 if (ehca_bmap_valid(entry)) {
2074 offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
2075 return entry | offset;
2077 return EHCA_INVAL_ADDR;
2080 static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2082 return dma_addr == EHCA_INVAL_ADDR;
2085 static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
2086 size_t size, enum dma_data_direction direction)
2089 return ehca_map_vaddr(cpu_addr);
2091 return EHCA_INVAL_ADDR;
2094 static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
2095 enum dma_data_direction direction)
2097 /* This is only a stub; nothing to be done here */
2100 static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
2101 unsigned long offset, size_t size,
2102 enum dma_data_direction direction)
2106 if (offset + size > PAGE_SIZE)
2107 return EHCA_INVAL_ADDR;
2109 addr = ehca_map_vaddr(page_address(page));
2110 if (!ehca_dma_mapping_error(dev, addr))
2116 static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
2117 enum dma_data_direction direction)
2119 /* This is only a stub; nothing to be done here */
2122 static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
2123 int nents, enum dma_data_direction direction)
2125 struct scatterlist *sg;
2128 for_each_sg(sgl, sg, nents, i) {
2130 addr = ehca_map_vaddr(sg_virt(sg));
2131 if (ehca_dma_mapping_error(dev, addr))
2134 sg->dma_address = addr;
2135 sg->dma_length = sg->length;
2140 static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
2141 int nents, enum dma_data_direction direction)
2143 /* This is only a stub; nothing to be done here */
2146 static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
2148 enum dma_data_direction dir)
2150 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2153 static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
2155 enum dma_data_direction dir)
2157 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2160 static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
2161 u64 *dma_handle, gfp_t flag)
2167 p = alloc_pages(flag, get_order(size));
2169 addr = page_address(p);
2170 dma_addr = ehca_map_vaddr(addr);
2171 if (ehca_dma_mapping_error(dev, dma_addr)) {
2172 free_pages((unsigned long)addr, get_order(size));
2176 *dma_handle = dma_addr;
2182 static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
2183 void *cpu_addr, u64 dma_handle)
2185 if (cpu_addr && size)
2186 free_pages((unsigned long)cpu_addr, get_order(size));
2190 struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
2191 .mapping_error = ehca_dma_mapping_error,
2192 .map_single = ehca_dma_map_single,
2193 .unmap_single = ehca_dma_unmap_single,
2194 .map_page = ehca_dma_map_page,
2195 .unmap_page = ehca_dma_unmap_page,
2196 .map_sg = ehca_dma_map_sg,
2197 .unmap_sg = ehca_dma_unmap_sg,
2198 .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
2199 .sync_single_for_device = ehca_dma_sync_single_for_device,
2200 .alloc_coherent = ehca_dma_alloc_coherent,
2201 .free_coherent = ehca_dma_free_coherent,