1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_status.h"
38 #include "i40iw_user.h"
39 #include "i40iw_register.h"
41 static u32 nop_signature = 0x55550000;
44 * i40iw_nop_1 - insert a nop wqe and move head. no post work
47 static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp)
51 u32 wqe_idx, peek_head;
52 bool signaled = false;
54 if (!qp->sq_ring.head)
55 return I40IW_ERR_PARAM;
57 wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
58 wqe = qp->sq_base[wqe_idx].elem;
60 qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE;
62 peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size;
63 wqe_0 = qp->sq_base[peek_head].elem;
65 wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
67 wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
69 set_64bit_val(wqe, 0, 0);
70 set_64bit_val(wqe, 8, 0);
71 set_64bit_val(wqe, 16, 0);
73 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
74 LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
75 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++;
77 wmb(); /* Memory barrier to ensure data is written before valid bit is set */
79 set_64bit_val(wqe, 24, header);
84 * i40iw_qp_post_wr - post wr to hrdware
87 void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)
93 mb(); /* valid bit is written and loads completed before reading shadow */
95 /* read the doorbell shadow area */
96 get_64bit_val(qp->shadow_area, 0, &temp);
98 hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL);
99 sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
100 if (sw_sq_head != hw_sq_tail) {
101 if (sw_sq_head > qp->initial_ring.head) {
102 if ((hw_sq_tail >= qp->initial_ring.head) &&
103 (hw_sq_tail < sw_sq_head)) {
104 writel(qp->qp_id, qp->wqe_alloc_reg);
106 } else if (sw_sq_head != qp->initial_ring.head) {
107 if ((hw_sq_tail >= qp->initial_ring.head) ||
108 (hw_sq_tail < sw_sq_head)) {
109 writel(qp->qp_id, qp->wqe_alloc_reg);
114 qp->initial_ring.head = qp->sq_ring.head;
118 * i40iw_qp_ring_push_db - ring qp doorbell
120 * @wqe_idx: wqe index
122 static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx)
124 set_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);
125 qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
129 * i40iw_qp_get_next_send_wqe - return next wqe ptr
131 * @wqe_idx: return wqe index
132 * @wqe_size: size of sq wqe
134 u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
145 enum i40iw_status_code ret_code = 0;
146 u8 nop_wqe_cnt = 0, i;
149 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
152 qp->swqe_polarity = !qp->swqe_polarity;
153 wqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem;
154 offset = (u16)(wqe_ptr) & 0x7F;
155 if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) {
156 nop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE;
157 for (i = 0; i < nop_wqe_cnt; i++) {
159 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
164 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
166 qp->swqe_polarity = !qp->swqe_polarity;
169 if (((*wqe_idx & 3) == 1) && (wqe_size == I40IW_WQE_SIZE_64)) {
171 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
174 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
176 qp->swqe_polarity = !qp->swqe_polarity;
179 for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
180 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
185 wqe = qp->sq_base[*wqe_idx].elem;
187 peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
188 wqe_0 = qp->sq_base[peek_head].elem;
190 if (((peek_head & 3) == 1) || ((peek_head & 3) == 3)) {
191 if (RS_64(wqe_0[3], I40IWQPSQ_VALID) != !qp->swqe_polarity)
192 wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
195 qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id;
196 qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
197 qp->sq_wrtrk_array[*wqe_idx].wqe_size = wqe_size;
202 * i40iw_set_fragment - set fragment in wqe
203 * @wqe: wqe for setting fragment
204 * @offset: offset value
205 * @sge: sge length and stag
207 static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge)
210 set_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO));
211 set_64bit_val(wqe, (offset + 8),
212 (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) |
213 LS_64(sge->stag, I40IWQPSQ_FRAG_STAG)));
218 * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
220 * @wqe_idx: return wqe index
222 u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
225 enum i40iw_status_code ret_code;
227 if (I40IW_RING_FULL_ERR(qp->rq_ring))
230 I40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
234 qp->rwqe_polarity = !qp->rwqe_polarity;
235 /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
236 wqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem;
242 * i40iw_rdma_write - rdma write operation
244 * @info: post sq information
245 * @post_sq: flag to post sq
247 static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
248 struct i40iw_post_sq_info *info,
253 struct i40iw_rdma_write *op_info;
255 u32 total_size = 0, byte_off;
256 enum i40iw_status_code ret_code;
257 bool read_fence = false;
260 op_info = &info->op.rdma_write;
261 if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
262 return I40IW_ERR_INVALID_FRAG_COUNT;
264 for (i = 0; i < op_info->num_lo_sges; i++)
265 total_size += op_info->lo_sg_list[i].len;
267 if (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE)
268 return I40IW_ERR_QP_INVALID_MSG_SIZE;
270 read_fence |= info->read_fence;
272 ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size);
276 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
278 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
279 set_64bit_val(wqe, 16,
280 LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
281 if (!op_info->rem_addr.stag)
282 return I40IW_ERR_BAD_STAG;
284 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
285 LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
286 LS_64((op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) |
287 LS_64(read_fence, I40IWQPSQ_READFENCE) |
288 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
289 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
290 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
292 i40iw_set_fragment(wqe, 0, op_info->lo_sg_list);
294 for (i = 1; i < op_info->num_lo_sges; i++) {
295 byte_off = 32 + (i - 1) * 16;
296 i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);
299 wmb(); /* make sure WQE is populated before valid bit is set */
301 set_64bit_val(wqe, 24, header);
304 i40iw_qp_post_wr(qp);
310 * i40iw_rdma_read - rdma read command
312 * @info: post sq information
313 * @inv_stag: flag for inv_stag
314 * @post_sq: flag to post sq
316 static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp,
317 struct i40iw_post_sq_info *info,
322 struct i40iw_rdma_read *op_info;
325 enum i40iw_status_code ret_code;
327 bool local_fence = false;
329 op_info = &info->op.rdma_read;
330 ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size);
333 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id);
335 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
336 local_fence |= info->local_fence;
338 set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
339 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
340 LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) |
341 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
342 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
343 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
344 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
346 i40iw_set_fragment(wqe, 0, &op_info->lo_addr);
348 wmb(); /* make sure WQE is populated before valid bit is set */
350 set_64bit_val(wqe, 24, header);
352 i40iw_qp_post_wr(qp);
358 * i40iw_send - rdma send command
360 * @info: post sq information
361 * @stag_to_inv: stag_to_inv value
362 * @post_sq: flag to post sq
364 static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
365 struct i40iw_post_sq_info *info,
370 struct i40iw_post_send *op_info;
372 u32 i, wqe_idx, total_size = 0, byte_off;
373 enum i40iw_status_code ret_code;
374 bool read_fence = false;
377 op_info = &info->op.send;
378 if (qp->max_sq_frag_cnt < op_info->num_sges)
379 return I40IW_ERR_INVALID_FRAG_COUNT;
381 for (i = 0; i < op_info->num_sges; i++)
382 total_size += op_info->sg_list[i].len;
383 ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size);
387 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
389 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
391 read_fence |= info->read_fence;
392 set_64bit_val(wqe, 16, 0);
393 header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
394 LS_64(info->op_type, I40IWQPSQ_OPCODE) |
395 LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0),
396 I40IWQPSQ_ADDFRAGCNT) |
397 LS_64(read_fence, I40IWQPSQ_READFENCE) |
398 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
399 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
400 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
402 i40iw_set_fragment(wqe, 0, op_info->sg_list);
404 for (i = 1; i < op_info->num_sges; i++) {
405 byte_off = 32 + (i - 1) * 16;
406 i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);
409 wmb(); /* make sure WQE is populated before valid bit is set */
411 set_64bit_val(wqe, 24, header);
413 i40iw_qp_post_wr(qp);
419 * i40iw_inline_rdma_write - inline rdma write operation
421 * @info: post sq information
422 * @post_sq: flag to post sq
424 static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
425 struct i40iw_post_sq_info *info,
430 struct i40iw_inline_rdma_write *op_info;
434 enum i40iw_status_code ret_code;
435 bool read_fence = false;
438 op_info = &info->op.inline_rdma_write;
439 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
440 return I40IW_ERR_INVALID_IMM_DATA_SIZE;
442 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
446 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
448 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
450 read_fence |= info->read_fence;
451 set_64bit_val(wqe, 16,
452 LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
454 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
455 LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
456 LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
457 LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
458 LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
459 LS_64(read_fence, I40IWQPSQ_READFENCE) |
460 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
461 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
462 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
465 src = (u8 *)(op_info->data);
467 if (op_info->len <= 16) {
468 for (i = 0; i < op_info->len; i++, src++, dest++)
471 for (i = 0; i < 16; i++, src++, dest++)
473 dest = (u8 *)wqe + 32;
474 for (; i < op_info->len; i++, src++, dest++)
478 wmb(); /* make sure WQE is populated before valid bit is set */
480 set_64bit_val(wqe, 24, header);
483 push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
484 memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
485 i40iw_qp_ring_push_db(qp, wqe_idx);
488 i40iw_qp_post_wr(qp);
495 * i40iw_inline_send - inline send operation
497 * @info: post sq information
498 * @stag_to_inv: remote stag
499 * @post_sq: flag to post sq
501 static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
502 struct i40iw_post_sq_info *info,
508 struct i40iw_post_inline_send *op_info;
511 enum i40iw_status_code ret_code;
512 bool read_fence = false;
516 op_info = &info->op.inline_send;
517 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
518 return I40IW_ERR_INVALID_IMM_DATA_SIZE;
520 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
524 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
526 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
528 read_fence |= info->read_fence;
529 header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
530 LS_64(info->op_type, I40IWQPSQ_OPCODE) |
531 LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
532 LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
533 LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
534 LS_64(read_fence, I40IWQPSQ_READFENCE) |
535 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
536 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
537 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
540 src = (u8 *)(op_info->data);
542 if (op_info->len <= 16) {
543 for (i = 0; i < op_info->len; i++, src++, dest++)
546 for (i = 0; i < 16; i++, src++, dest++)
548 dest = (u8 *)wqe + 32;
549 for (; i < op_info->len; i++, src++, dest++)
553 wmb(); /* make sure WQE is populated before valid bit is set */
555 set_64bit_val(wqe, 24, header);
558 push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
559 memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
560 i40iw_qp_ring_push_db(qp, wqe_idx);
563 i40iw_qp_post_wr(qp);
570 * i40iw_stag_local_invalidate - stag invalidate operation
572 * @info: post sq information
573 * @post_sq: flag to post sq
575 static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp,
576 struct i40iw_post_sq_info *info,
580 struct i40iw_inv_local_stag *op_info;
583 bool local_fence = false;
585 op_info = &info->op.inv_local_stag;
586 local_fence = info->local_fence;
588 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
590 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
591 set_64bit_val(wqe, 0, 0);
592 set_64bit_val(wqe, 8,
593 LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG));
594 set_64bit_val(wqe, 16, 0);
595 header = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) |
596 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
597 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
598 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
599 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
601 wmb(); /* make sure WQE is populated before valid bit is set */
603 set_64bit_val(wqe, 24, header);
606 i40iw_qp_post_wr(qp);
612 * i40iw_mw_bind - Memory Window bind operation
614 * @info: post sq information
615 * @post_sq: flag to post sq
617 static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp,
618 struct i40iw_post_sq_info *info,
622 struct i40iw_bind_window *op_info;
625 bool local_fence = false;
627 op_info = &info->op.bind_window;
629 local_fence |= info->local_fence;
630 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
632 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
633 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
634 set_64bit_val(wqe, 8,
635 LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) |
636 LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG));
637 set_64bit_val(wqe, 16, op_info->bind_length);
638 header = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) |
639 LS_64(((op_info->enable_reads << 2) |
640 (op_info->enable_writes << 3)),
641 I40IWQPSQ_STAGRIGHTS) |
642 LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ? 1 : 0),
643 I40IWQPSQ_VABASEDTO) |
644 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
645 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
646 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
647 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
649 wmb(); /* make sure WQE is populated before valid bit is set */
651 set_64bit_val(wqe, 24, header);
654 i40iw_qp_post_wr(qp);
660 * i40iw_post_receive - post receive wqe
662 * @info: post rq information
664 static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,
665 struct i40iw_post_rq_info *info)
669 u32 total_size = 0, wqe_idx, i, byte_off;
671 if (qp->max_rq_frag_cnt < info->num_sges)
672 return I40IW_ERR_INVALID_FRAG_COUNT;
673 for (i = 0; i < info->num_sges; i++)
674 total_size += info->sg_list[i].len;
675 wqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx);
677 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
679 qp->rq_wrid_array[wqe_idx] = info->wr_id;
680 set_64bit_val(wqe, 16, 0);
682 header = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0),
683 I40IWQPSQ_ADDFRAGCNT) |
684 LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID);
686 i40iw_set_fragment(wqe, 0, info->sg_list);
688 for (i = 1; i < info->num_sges; i++) {
689 byte_off = 32 + (i - 1) * 16;
690 i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);
693 wmb(); /* make sure WQE is populated before valid bit is set */
695 set_64bit_val(wqe, 24, header);
701 * i40iw_cq_request_notification - cq notification request (door bell)
703 * @cq_notify: notification type
705 static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq,
706 enum i40iw_completion_notify cq_notify)
714 get_64bit_val(cq->shadow_area, 32, &temp_val);
715 arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
718 sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
719 arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
721 if (cq_notify == IW_CQ_COMPL_EVENT)
723 temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
724 LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
725 LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
726 LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT);
728 set_64bit_val(cq->shadow_area, 32, temp_val);
730 wmb(); /* make sure WQE is populated before valid bit is set */
732 writel(cq->cq_id, cq->cqe_alloc_reg);
736 * i40iw_cq_post_entries - update tail in shadow memory
738 * @count: # of entries processed
740 static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
743 I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count);
744 set_64bit_val(cq->shadow_area, 0,
745 I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
750 * i40iw_cq_poll_completion - get cq completion info
752 * @info: cq poll information returned
753 * @post_cq: update cq tail
755 static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
756 struct i40iw_cq_poll_info *info,
759 u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
761 struct i40iw_qp_uk *qp;
762 struct i40iw_ring *pring = NULL;
763 u32 wqe_idx, q_type, array_idx = 0;
764 enum i40iw_status_code ret_code = 0;
765 enum i40iw_status_code ret_code2 = 0;
766 bool move_cq_head = true;
770 if (cq->avoid_mem_cflct)
771 cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq);
773 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq);
775 get_64bit_val(cqe, 24, &qword3);
776 polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
778 if (polarity != cq->polarity)
779 return I40IW_ERR_QUEUE_EMPTY;
781 q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
782 info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
783 info->push_dropped = (bool)RS_64(qword3, I40IWCQ_PSHDROP);
785 info->comp_status = I40IW_COMPL_STATUS_FLUSHED;
786 info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR);
787 info->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR);
789 info->comp_status = I40IW_COMPL_STATUS_SUCCESS;
792 get_64bit_val(cqe, 0, &qword0);
793 get_64bit_val(cqe, 16, &qword2);
795 info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
797 info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
799 get_64bit_val(cqe, 8, &comp_ctx);
801 info->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT);
802 info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ);
804 qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
806 ret_code = I40IW_ERR_QUEUE_DESTROYED;
809 wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
810 info->qp_handle = (i40iw_qp_handle)(unsigned long)qp;
812 if (q_type == I40IW_CQE_QTYPE_RQ) {
813 array_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier;
814 if (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) {
815 info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
816 array_idx = qp->rq_ring.tail;
818 info->wr_id = qp->rq_wrid_array[array_idx];
821 info->op_type = I40IW_OP_TYPE_REC;
822 if (qword3 & I40IWCQ_STAG_MASK) {
823 info->stag_invalid_set = true;
824 info->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG);
826 info->stag_invalid_set = false;
828 info->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN);
829 I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
830 pring = &qp->rq_ring;
832 if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
833 info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
834 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
836 info->op_type = (u8)RS_64(qword3, I40IWCQ_OP);
837 sw_wqe = qp->sq_base[wqe_idx].elem;
838 get_64bit_val(sw_wqe, 24, &wqe_qword);
840 addl_wqes = qp->sq_wrtrk_array[wqe_idx].wqe_size / I40IW_QP_WQE_MIN_SIZE;
841 I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes));
847 tail = qp->sq_ring.tail;
848 sw_wqe = qp->sq_base[tail].elem;
849 get_64bit_val(sw_wqe, 24, &wqe_qword);
850 op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE);
851 info->op_type = op_type;
852 addl_wqes = qp->sq_wrtrk_array[tail].wqe_size / I40IW_QP_WQE_MIN_SIZE;
853 I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes));
854 if (op_type != I40IWQP_OP_NOP) {
855 info->wr_id = qp->sq_wrtrk_array[tail].wrid;
856 info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
861 pring = &qp->sq_ring;
868 (info->comp_status == I40IW_COMPL_STATUS_FLUSHED))
869 if (pring && (I40IW_RING_MORE_WORK(*pring)))
870 move_cq_head = false;
873 I40IW_RING_MOVE_HEAD(cq->cq_ring, ret_code2);
875 if (ret_code2 && !ret_code)
876 ret_code = ret_code2;
878 if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
882 I40IW_RING_MOVE_TAIL(cq->cq_ring);
883 set_64bit_val(cq->shadow_area, 0,
884 I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
889 qword3 &= ~I40IW_CQ_WQEIDX_MASK;
890 qword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX);
891 set_64bit_val(cqe, 24, qword3);
898 * i40iw_get_wqe_shift - get shift count for maximum wqe size
899 * @wqdepth: depth of wq required.
900 * @sge: Maximum Scatter Gather Elements wqe
901 * @inline_data: Maximum inline data size
902 * @shift: Returns the shift needed based on sge
904 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
905 * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes).
906 * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
907 * Shift of 2 otherwise (wqe size of 128 bytes).
909 enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data, u8 *shift)
914 if (sge > 1 || inline_data > 16)
915 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
917 /* check if wqdepth is multiple of 2 or not */
919 if ((wqdepth < I40IWQP_SW_MIN_WQSIZE) || (wqdepth & (wqdepth - 1)))
920 return I40IW_ERR_INVALID_SIZE;
922 size = wqdepth << *shift; /* multiple of 32 bytes count */
923 if (size > I40IWQP_SW_MAX_WQSIZE)
924 return I40IW_ERR_INVALID_SIZE;
928 static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
930 i40iw_qp_ring_push_db,
934 i40iw_inline_rdma_write,
936 i40iw_stag_local_invalidate,
942 static struct i40iw_cq_ops iw_cq_ops = {
943 i40iw_cq_request_notification,
944 i40iw_cq_poll_completion,
945 i40iw_cq_post_entries,
949 static struct i40iw_device_uk_ops iw_device_uk_ops = {
955 * i40iw_qp_uk_init - initialize shared qp
956 * @qp: hw qp (user and kernel)
957 * @info: qp initialization info
959 * initializes the vars used in both user and kernel mode.
960 * size of the wqe depends on numbers of max. fragements
961 * allowed. Then size of wqe * the number of wqes should be the
962 * amount of memory allocated for sq and rq. If srq is used,
963 * then rq_base will point to one rq wqe only (not the whole
966 enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
967 struct i40iw_qp_uk_init_info *info)
969 enum i40iw_status_code ret_code = 0;
973 if (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
974 return I40IW_ERR_INVALID_FRAG_COUNT;
976 if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
977 return I40IW_ERR_INVALID_FRAG_COUNT;
978 ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
982 ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
986 qp->sq_base = info->sq;
987 qp->rq_base = info->rq;
988 qp->shadow_area = info->shadow_area;
989 qp->sq_wrtrk_array = info->sq_wrtrk_array;
990 qp->rq_wrid_array = info->rq_wrid_array;
992 qp->wqe_alloc_reg = info->wqe_alloc_reg;
993 qp->qp_id = info->qp_id;
995 qp->sq_size = info->sq_size;
996 qp->push_db = info->push_db;
997 qp->push_wqe = info->push_wqe;
999 qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1000 sq_ring_size = qp->sq_size << sqshift;
1002 I40IW_RING_INIT(qp->sq_ring, sq_ring_size);
1003 I40IW_RING_INIT(qp->initial_ring, sq_ring_size);
1004 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
1005 I40IW_RING_MOVE_TAIL(qp->sq_ring);
1006 I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
1007 qp->swqe_polarity = 1;
1008 qp->swqe_polarity_deferred = 1;
1009 qp->rwqe_polarity = 0;
1012 qp->rq_size = info->rq_size;
1013 qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1014 qp->rq_wqe_size = rqshift;
1015 I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
1016 qp->rq_wqe_size_multiplier = 4 << rqshift;
1018 qp->ops = iw_qp_uk_ops;
1024 * i40iw_cq_uk_init - initialize shared cq (user and kernel)
1026 * @info: hw cq initialization info
1028 enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
1029 struct i40iw_cq_uk_init_info *info)
1031 if ((info->cq_size < I40IW_MIN_CQ_SIZE) ||
1032 (info->cq_size > I40IW_MAX_CQ_SIZE))
1033 return I40IW_ERR_INVALID_SIZE;
1034 cq->cq_base = (struct i40iw_cqe *)info->cq_base;
1035 cq->cq_id = info->cq_id;
1036 cq->cq_size = info->cq_size;
1037 cq->cqe_alloc_reg = info->cqe_alloc_reg;
1038 cq->shadow_area = info->shadow_area;
1039 cq->avoid_mem_cflct = info->avoid_mem_cflct;
1041 I40IW_RING_INIT(cq->cq_ring, cq->cq_size);
1043 cq->ops = iw_cq_ops;
1049 * i40iw_device_init_uk - setup routines for iwarp shared device
1050 * @dev: iwarp shared (user and kernel)
1052 void i40iw_device_init_uk(struct i40iw_dev_uk *dev)
1054 dev->ops_uk = iw_device_uk_ops;
1058 * i40iw_clean_cq - clean cq entries
1059 * @ queue completion context
1062 void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)
1065 u64 qword3, comp_ctx;
1069 cq_head = cq->cq_ring.head;
1070 temp = cq->polarity;
1072 if (cq->avoid_mem_cflct)
1073 cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]);
1075 cqe = (u64 *)&cq->cq_base[cq_head];
1076 get_64bit_val(cqe, 24, &qword3);
1077 polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
1079 if (polarity != temp)
1082 get_64bit_val(cqe, 8, &comp_ctx);
1083 if ((void *)(unsigned long)comp_ctx == queue)
1084 set_64bit_val(cqe, 8, 0);
1086 cq_head = (cq_head + 1) % cq->cq_ring.size;
1093 * i40iw_nop - send a nop
1095 * @wr_id: work request id
1096 * @signaled: flag if signaled for completion
1097 * @post_sq: flag to post sq
1099 enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp,
1107 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, wr_id);
1109 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
1110 set_64bit_val(wqe, 0, 0);
1111 set_64bit_val(wqe, 8, 0);
1112 set_64bit_val(wqe, 16, 0);
1114 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
1115 LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
1116 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
1118 wmb(); /* make sure WQE is populated before valid bit is set */
1120 set_64bit_val(wqe, 24, header);
1122 i40iw_qp_post_wr(qp);
1128 * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
1129 * @frag_cnt: number of fragments
1130 * @wqe_size: size of sq wqe returned
1132 enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size)
1137 *wqe_size = I40IW_QP_WQE_MIN_SIZE;
1152 return I40IW_ERR_INVALID_FRAG_COUNT;
1159 * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1160 * @frag_cnt: number of fragments
1161 * @wqe_size: size of rq wqe returned
1163 enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size)
1181 return I40IW_ERR_INVALID_FRAG_COUNT;
1188 * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
1189 * @data_size: data size for inline
1190 * @wqe_size: size of sq wqe returned
1192 enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
1195 if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
1196 return I40IW_ERR_INVALID_IMM_DATA_SIZE;
1198 if (data_size <= 16)
1199 *wqe_size = I40IW_QP_WQE_MIN_SIZE;
1200 else if (data_size <= 48)
1202 else if (data_size <= 80)