2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/platform_device.h>
35 #include <rdma/ib_umem.h>
36 #include "hns_roce_device.h"
37 #include "hns_roce_cmd.h"
38 #include "hns_roce_hem.h"
40 static u32 hw_index_to_key(unsigned long ind)
42 return (u32)(ind >> 24) | (ind << 8);
45 unsigned long key_to_hw_index(u32 key)
47 return (key << 24) | (key >> 8);
50 static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
51 struct hns_roce_cmd_mailbox *mailbox,
52 unsigned long mpt_index)
54 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
55 HNS_ROCE_CMD_SW2HW_MPT,
56 HNS_ROCE_CMD_TIMEOUT_MSECS);
59 int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
60 struct hns_roce_cmd_mailbox *mailbox,
61 unsigned long mpt_index)
63 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
64 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
65 HNS_ROCE_CMD_TIMEOUT_MSECS);
68 static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
74 spin_lock(&buddy->lock);
76 for (o = order; o <= buddy->max_order; ++o) {
77 if (buddy->num_free[o]) {
78 m = 1 << (buddy->max_order - o);
79 *seg = find_first_bit(buddy->bits[o], m);
84 spin_unlock(&buddy->lock);
88 clear_bit(*seg, buddy->bits[o]);
94 set_bit(*seg ^ 1, buddy->bits[o]);
98 spin_unlock(&buddy->lock);
104 static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
109 spin_lock(&buddy->lock);
111 while (test_bit(seg ^ 1, buddy->bits[order])) {
112 clear_bit(seg ^ 1, buddy->bits[order]);
113 --buddy->num_free[order];
118 set_bit(seg, buddy->bits[order]);
119 ++buddy->num_free[order];
121 spin_unlock(&buddy->lock);
124 static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
128 buddy->max_order = max_order;
129 spin_lock_init(&buddy->lock);
130 buddy->bits = kcalloc(buddy->max_order + 1,
131 sizeof(*buddy->bits),
133 buddy->num_free = kcalloc(buddy->max_order + 1,
134 sizeof(*buddy->num_free),
136 if (!buddy->bits || !buddy->num_free)
139 for (i = 0; i <= buddy->max_order; ++i) {
140 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
141 buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
143 if (!buddy->bits[i]) {
144 buddy->bits[i] = vzalloc(s * sizeof(long));
150 set_bit(0, buddy->bits[buddy->max_order]);
151 buddy->num_free[buddy->max_order] = 1;
156 for (i = 0; i <= buddy->max_order; ++i)
157 kvfree(buddy->bits[i]);
161 kfree(buddy->num_free);
165 static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
169 for (i = 0; i <= buddy->max_order; ++i)
170 kvfree(buddy->bits[i]);
173 kfree(buddy->num_free);
176 static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
179 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
182 ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
186 if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
187 *seg + (1 << order) - 1)) {
188 hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
195 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
196 struct hns_roce_mtt *mtt)
201 /* Page num is zero, correspond to DMA memory register */
204 mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
208 /* Note: if page_shift is zero, FAST memory register */
209 mtt->page_shift = page_shift;
211 /* Compute MTT entry necessary */
212 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
216 /* Allocate MTT entry */
217 ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
224 void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
226 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
231 hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
232 hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
233 mtt->first_seg + (1 << mtt->order) - 1);
236 static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
237 u64 size, u32 access, int npages,
238 struct hns_roce_mr *mr)
240 unsigned long index = 0;
242 struct device *dev = &hr_dev->pdev->dev;
244 /* Allocate a key for mr from mr_table */
245 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
249 mr->iova = iova; /* MR va starting addr */
250 mr->size = size; /* MR addr range */
251 mr->pd = pd; /* MR num */
252 mr->access = access; /* MR access permit */
253 mr->enabled = 0; /* MR active status */
254 mr->key = hw_index_to_key(index); /* MR key */
257 mr->type = MR_TYPE_DMA;
259 mr->pbl_dma_addr = 0;
261 mr->type = MR_TYPE_MR;
262 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
272 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
273 struct hns_roce_mr *mr)
275 struct device *dev = &hr_dev->pdev->dev;
280 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
281 & (hr_dev->caps.num_mtpts - 1));
283 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
286 if (mr->size != ~0ULL) {
287 npages = ib_umem_page_count(mr->umem);
288 dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
292 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
293 key_to_hw_index(mr->key), BITMAP_NO_RR);
296 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
297 struct hns_roce_mr *mr)
300 unsigned long mtpt_idx = key_to_hw_index(mr->key);
301 struct device *dev = &hr_dev->pdev->dev;
302 struct hns_roce_cmd_mailbox *mailbox;
303 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
305 /* Prepare HEM entry memory */
306 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
310 /* Allocate mailbox memory */
311 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
312 if (IS_ERR(mailbox)) {
313 ret = PTR_ERR(mailbox);
317 ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
319 dev_err(dev, "Write mtpt fail!\n");
323 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
324 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
326 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
331 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
336 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
339 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
343 static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
344 struct hns_roce_mtt *mtt, u32 start_index,
345 u32 npages, u64 *page_list)
349 dma_addr_t dma_handle;
350 u32 s = start_index * sizeof(u64);
352 /* All MTTs must fit in the same page */
353 if (start_index / (PAGE_SIZE / sizeof(u64)) !=
354 (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
357 if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
360 mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
361 mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
366 /* Save page addr, low 12 bits : 0 */
367 for (i = 0; i < npages; ++i)
368 mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
373 static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
374 struct hns_roce_mtt *mtt, u32 start_index,
375 u32 npages, u64 *page_list)
384 chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
386 ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
392 start_index += chunk;
399 int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
400 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
404 u64 *page_list = NULL;
406 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
410 for (i = 0; i < buf->npages; ++i) {
412 page_list[i] = buf->direct.map + (i << buf->page_shift);
414 page_list[i] = buf->page_list[i].map;
417 ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
424 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
426 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
429 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
430 hr_dev->caps.num_mtpts,
431 hr_dev->caps.num_mtpts - 1,
432 hr_dev->caps.reserved_mrws, 0);
436 ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
437 ilog2(hr_dev->caps.num_mtt_segs));
444 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
448 void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
450 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
452 hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
453 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
456 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
459 struct hns_roce_mr *mr = NULL;
461 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
463 return ERR_PTR(-ENOMEM);
465 /* Allocate memory region key */
466 ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
471 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
475 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
481 hns_roce_mr_free(to_hr_dev(pd->device), mr);
488 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
489 struct hns_roce_mtt *mtt, struct ib_umem *umem)
491 struct scatterlist *sg;
498 pages = (u64 *) __get_free_page(GFP_KERNEL);
504 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
505 len = sg_dma_len(sg) >> mtt->page_shift;
506 for (k = 0; k < len; ++k) {
507 pages[i++] = sg_dma_address(sg) +
508 (k << umem->page_shift);
509 if (i == PAGE_SIZE / sizeof(u64)) {
510 ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
521 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
524 free_page((unsigned long) pages);
528 static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
529 struct ib_umem *umem)
533 struct scatterlist *sg;
535 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
536 mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
546 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
547 u64 virt_addr, int access_flags,
548 struct ib_udata *udata)
550 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
551 struct device *dev = &hr_dev->pdev->dev;
552 struct hns_roce_mr *mr = NULL;
556 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
558 return ERR_PTR(-ENOMEM);
560 mr->umem = ib_umem_get(pd->uobject->context, start, length,
562 if (IS_ERR(mr->umem)) {
563 ret = PTR_ERR(mr->umem);
567 n = ib_umem_page_count(mr->umem);
568 if (mr->umem->page_shift != HNS_ROCE_HEM_PAGE_SHIFT) {
569 dev_err(dev, "Just support 4K page size but is 0x%lx now!\n",
570 BIT(mr->umem->page_shift));
575 if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
576 dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
582 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
583 access_flags, n, mr);
587 ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
591 ret = hns_roce_mr_enable(hr_dev, mr);
595 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
600 hns_roce_mr_free(hr_dev, mr);
603 ib_umem_release(mr->umem);
610 int hns_roce_dereg_mr(struct ib_mr *ibmr)
612 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
613 struct hns_roce_mr *mr = to_hr_mr(ibmr);
616 if (hr_dev->hw->dereg_mr) {
617 ret = hr_dev->hw->dereg_mr(hr_dev, mr);
619 hns_roce_mr_free(hr_dev, mr);
622 ib_umem_release(mr->umem);