2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 #ifndef __IW_CXGB4_H__
32 #define __IW_CXGB4_H__
34 #include <linux/mutex.h>
35 #include <linux/list.h>
36 #include <linux/spinlock.h>
37 #include <linux/idr.h>
38 #include <linux/workqueue.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/inet.h>
44 #include <linux/wait.h>
45 #include <linux/kref.h>
46 #include <linux/timer.h>
48 #include <linux/kfifo.h>
50 #include <asm/byteorder.h>
52 #include <net/net_namespace.h>
54 #include <rdma/ib_verbs.h>
55 #include <rdma/iw_cm.h>
58 #include "cxgb4_uld.h"
62 #define DRV_NAME "iw_cxgb4"
63 #define MOD DRV_NAME ":"
65 extern int c4iw_debug;
66 #define PDBG(fmt, args...) \
69 printk(MOD fmt, ## args); \
74 #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
75 #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
77 static inline void *cplhdr(struct sk_buff *skb)
82 struct c4iw_resource {
83 struct kfifo tpt_fifo;
84 spinlock_t tpt_fifo_lock;
85 struct kfifo qid_fifo;
86 spinlock_t qid_fifo_lock;
87 struct kfifo pdid_fifo;
88 spinlock_t pdid_fifo_lock;
91 struct c4iw_qid_list {
92 struct list_head entry;
96 struct c4iw_dev_ucontext {
97 struct list_head qpids;
98 struct list_head cqids;
102 enum c4iw_rdev_flags {
103 T4_FATAL_ERROR = (1<<0),
107 struct c4iw_resource resource;
108 unsigned long qpshift;
110 unsigned long cqshift;
112 struct c4iw_dev_ucontext uctx;
113 struct gen_pool *pbl_pool;
114 struct gen_pool *rqt_pool;
115 struct gen_pool *ocqp_pool;
117 struct cxgb4_lld_info lldi;
118 unsigned long oc_mw_pa;
119 void __iomem *oc_mw_kva;
122 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
124 return rdev->flags & T4_FATAL_ERROR;
127 static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
129 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
132 #define C4IW_WR_TO (10*HZ)
134 struct c4iw_wr_wait {
135 wait_queue_head_t wait;
140 static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
144 init_waitqueue_head(&wr_waitp->wait);
147 static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
148 struct c4iw_wr_wait *wr_waitp,
152 unsigned to = C4IW_WR_TO;
155 wait_event_timeout(wr_waitp->wait, wr_waitp->done, to);
156 if (!wr_waitp->done) {
157 printk(KERN_ERR MOD "%s - Device %s not responding - "
158 "tid %u qpid %u\n", func,
159 pci_name(rdev->lldi.pdev), hwtid, qpid);
162 } while (!wr_waitp->done);
164 printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n",
165 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
166 return wr_waitp->ret;
171 struct ib_device ibdev;
172 struct c4iw_rdev rdev;
173 u32 device_cap_flags;
178 struct list_head entry;
179 struct delayed_work db_drop_task;
180 struct dentry *debugfs_root;
184 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
186 return container_of(ibdev, struct c4iw_dev, ibdev);
189 static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
191 return container_of(rdev, struct c4iw_dev, rdev);
194 static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
196 return idr_find(&rhp->cqidr, cqid);
199 static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
201 return idr_find(&rhp->qpidr, qpid);
204 static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
206 return idr_find(&rhp->mmidr, mmid);
209 static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
210 void *handle, u32 id)
216 if (!idr_pre_get(idr, GFP_KERNEL))
218 spin_lock_irq(&rhp->lock);
219 ret = idr_get_new_above(idr, handle, id, &newid);
221 spin_unlock_irq(&rhp->lock);
222 } while (ret == -EAGAIN);
227 static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
229 spin_lock_irq(&rhp->lock);
231 spin_unlock_irq(&rhp->lock);
237 struct c4iw_dev *rhp;
240 static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
242 return container_of(ibpd, struct c4iw_pd, ibpd);
245 struct tpt_attributes {
248 enum fw_ri_mem_perms perms;
257 u32 remote_invaliate_disable:1;
259 u32 mw_bind_enable:1;
265 struct ib_umem *umem;
266 struct c4iw_dev *rhp;
268 struct tpt_attributes attr;
271 static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
273 return container_of(ibmr, struct c4iw_mr, ibmr);
278 struct c4iw_dev *rhp;
280 struct tpt_attributes attr;
283 static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
285 return container_of(ibmw, struct c4iw_mw, ibmw);
288 struct c4iw_fr_page_list {
289 struct ib_fast_reg_page_list ibpl;
290 DEFINE_DMA_UNMAP_ADDR(mapping);
292 struct c4iw_dev *dev;
296 static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
297 struct ib_fast_reg_page_list *ibpl)
299 return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
304 struct c4iw_dev *rhp;
308 wait_queue_head_t wait;
311 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
313 return container_of(ibcq, struct c4iw_cq, ibcq);
316 struct c4iw_mpa_attributes {
318 u8 recv_marker_enabled;
319 u8 xmit_marker_enabled;
325 struct c4iw_qp_attributes {
331 u32 sq_max_sges_rdma_write;
335 u8 enable_rdma_write;
337 u8 enable_mmid0_fastreg;
342 char terminate_buffer[52];
343 u32 terminate_msg_len;
344 u8 is_terminate_local;
345 struct c4iw_mpa_attributes mpa_attr;
346 struct c4iw_ep *llp_stream_handle;
351 struct c4iw_dev *rhp;
353 struct c4iw_qp_attributes attr;
358 wait_queue_head_t wait;
359 struct timer_list timer;
362 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
364 return container_of(ibqp, struct c4iw_qp, ibqp);
367 struct c4iw_ucontext {
368 struct ib_ucontext ibucontext;
369 struct c4iw_dev_ucontext uctx;
371 spinlock_t mmap_lock;
372 struct list_head mmaps;
375 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
377 return container_of(c, struct c4iw_ucontext, ibucontext);
380 struct c4iw_mm_entry {
381 struct list_head entry;
387 static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
388 u32 key, unsigned len)
390 struct list_head *pos, *nxt;
391 struct c4iw_mm_entry *mm;
393 spin_lock(&ucontext->mmap_lock);
394 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
396 mm = list_entry(pos, struct c4iw_mm_entry, entry);
397 if (mm->key == key && mm->len == len) {
398 list_del_init(&mm->entry);
399 spin_unlock(&ucontext->mmap_lock);
400 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
401 key, (unsigned long long) mm->addr, mm->len);
405 spin_unlock(&ucontext->mmap_lock);
409 static inline void insert_mmap(struct c4iw_ucontext *ucontext,
410 struct c4iw_mm_entry *mm)
412 spin_lock(&ucontext->mmap_lock);
413 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
414 mm->key, (unsigned long long) mm->addr, mm->len);
415 list_add_tail(&mm->entry, &ucontext->mmaps);
416 spin_unlock(&ucontext->mmap_lock);
419 enum c4iw_qp_attr_mask {
420 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
421 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
422 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
423 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
424 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
425 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
426 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
427 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
428 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
429 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
430 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
431 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
432 C4IW_QP_ATTR_MAX_ORD |
433 C4IW_QP_ATTR_MAX_IRD |
434 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
435 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
436 C4IW_QP_ATTR_MPA_ATTR |
437 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
440 int c4iw_modify_qp(struct c4iw_dev *rhp,
442 enum c4iw_qp_attr_mask mask,
443 struct c4iw_qp_attributes *attrs,
450 C4IW_QP_STATE_TERMINATE,
451 C4IW_QP_STATE_CLOSING,
455 static inline int c4iw_convert_state(enum ib_qp_state ib_state)
460 return C4IW_QP_STATE_IDLE;
462 return C4IW_QP_STATE_RTS;
464 return C4IW_QP_STATE_CLOSING;
466 return C4IW_QP_STATE_TERMINATE;
468 return C4IW_QP_STATE_ERROR;
474 static inline u32 c4iw_ib_to_tpt_access(int a)
476 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
477 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
478 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
479 FW_RI_MEM_ACCESS_LOCAL_READ;
482 static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
484 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
485 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
488 enum c4iw_mmid_state {
489 C4IW_STAG_STATE_VALID,
490 C4IW_STAG_STATE_INVALID
493 #define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
495 #define MPA_KEY_REQ "MPA ID Req Frame"
496 #define MPA_KEY_REP "MPA ID Rep Frame"
498 #define MPA_MAX_PRIVATE_DATA 256
499 #define MPA_REJECT 0x20
501 #define MPA_MARKERS 0x80
502 #define MPA_FLAGS_MASK 0xE0
504 #define c4iw_put_ep(ep) { \
505 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
506 ep, atomic_read(&((ep)->kref.refcount))); \
507 WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
508 kref_put(&((ep)->kref), _c4iw_free_ep); \
511 #define c4iw_get_ep(ep) { \
512 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
513 ep, atomic_read(&((ep)->kref.refcount))); \
514 kref_get(&((ep)->kref)); \
516 void _c4iw_free_ep(struct kref *kref);
522 __be16 private_data_size;
526 struct terminate_message {
533 #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
535 enum c4iw_layers_types {
539 RDMAP_LOCAL_CATA = 0x00,
540 RDMAP_REMOTE_PROT = 0x01,
541 RDMAP_REMOTE_OP = 0x02,
542 DDP_LOCAL_CATA = 0x00,
543 DDP_TAGGED_ERR = 0x01,
544 DDP_UNTAGGED_ERR = 0x02,
548 enum c4iw_rdma_ecodes {
549 RDMAP_INV_STAG = 0x00,
550 RDMAP_BASE_BOUNDS = 0x01,
551 RDMAP_ACC_VIOL = 0x02,
552 RDMAP_STAG_NOT_ASSOC = 0x03,
553 RDMAP_TO_WRAP = 0x04,
554 RDMAP_INV_VERS = 0x05,
555 RDMAP_INV_OPCODE = 0x06,
556 RDMAP_STREAM_CATA = 0x07,
557 RDMAP_GLOBAL_CATA = 0x08,
558 RDMAP_CANT_INV_STAG = 0x09,
559 RDMAP_UNSPECIFIED = 0xff
562 enum c4iw_ddp_ecodes {
563 DDPT_INV_STAG = 0x00,
564 DDPT_BASE_BOUNDS = 0x01,
565 DDPT_STAG_NOT_ASSOC = 0x02,
567 DDPT_INV_VERS = 0x04,
569 DDPU_INV_MSN_NOBUF = 0x02,
570 DDPU_INV_MSN_RANGE = 0x03,
572 DDPU_MSG_TOOBIG = 0x05,
576 enum c4iw_mpa_ecodes {
578 MPA_MARKER_ERR = 0x03
597 PEER_ABORT_IN_PROGRESS = 0,
598 ABORT_REQ_IN_PROGRESS = 1,
599 RELEASE_RESOURCES = 2,
603 struct c4iw_ep_common {
604 struct iw_cm_id *cm_id;
606 struct c4iw_dev *dev;
607 enum c4iw_ep_state state;
610 struct sockaddr_in local_addr;
611 struct sockaddr_in remote_addr;
612 struct c4iw_wr_wait wr_wait;
616 struct c4iw_listen_ep {
617 struct c4iw_ep_common com;
623 struct c4iw_ep_common com;
624 struct c4iw_ep *parent_ep;
625 struct timer_list timer;
626 struct list_head entry;
631 struct l2t_entry *l2t;
632 struct dst_entry *dst;
633 struct sk_buff *mpa_skb;
634 struct c4iw_mpa_attributes mpa_attr;
635 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
636 unsigned int mpa_pkt_len;
651 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
653 return cm_id->provider_data;
656 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
658 return cm_id->provider_data;
661 static inline int compute_wscale(int win)
665 while (wscale < 14 && (65535<<wscale) < win)
670 typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
672 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
673 struct l2t_entry *l2t);
674 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
675 struct c4iw_dev_ucontext *uctx);
676 u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock);
677 void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock);
678 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
679 int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
680 int c4iw_pblpool_create(struct c4iw_rdev *rdev);
681 int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
682 int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
683 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
684 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
685 void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
686 void c4iw_destroy_resource(struct c4iw_resource *rscp);
687 int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
688 int c4iw_register_device(struct c4iw_dev *dev);
689 void c4iw_unregister_device(struct c4iw_dev *dev);
690 int __init c4iw_cm_init(void);
691 void __exit c4iw_cm_term(void);
692 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
693 struct c4iw_dev_ucontext *uctx);
694 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
695 struct c4iw_dev_ucontext *uctx);
696 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
697 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
698 struct ib_send_wr **bad_wr);
699 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
700 struct ib_recv_wr **bad_wr);
701 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
702 struct ib_mw_bind *mw_bind);
703 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
704 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
705 int c4iw_destroy_listen(struct iw_cm_id *cm_id);
706 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
707 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
708 void c4iw_qp_add_ref(struct ib_qp *qp);
709 void c4iw_qp_rem_ref(struct ib_qp *qp);
710 void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
711 struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
712 struct ib_device *device,
714 struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
715 int c4iw_dealloc_mw(struct ib_mw *mw);
716 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
717 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
718 u64 length, u64 virt, int acc,
719 struct ib_udata *udata);
720 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
721 struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
722 struct ib_phys_buf *buffer_list,
726 int c4iw_reregister_phys_mem(struct ib_mr *mr,
729 struct ib_phys_buf *buffer_list,
731 int acc, u64 *iova_start);
732 int c4iw_dereg_mr(struct ib_mr *ib_mr);
733 int c4iw_destroy_cq(struct ib_cq *ib_cq);
734 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
736 struct ib_ucontext *ib_context,
737 struct ib_udata *udata);
738 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
739 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
740 int c4iw_destroy_qp(struct ib_qp *ib_qp);
741 struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
742 struct ib_qp_init_attr *attrs,
743 struct ib_udata *udata);
744 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
745 int attr_mask, struct ib_udata *udata);
746 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
747 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
748 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
749 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
750 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
751 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
752 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
753 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
754 void c4iw_flush_hw_cq(struct t4_cq *cq);
755 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
756 void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
757 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
758 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
759 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
760 int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
761 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
762 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
763 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
764 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
765 struct c4iw_dev_ucontext *uctx);
766 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
767 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
768 struct c4iw_dev_ucontext *uctx);
769 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
771 extern struct cxgb4_client t4c_client;
772 extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
773 extern int c4iw_max_read_depth;