]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/smc/smc_ib.h
smc: switch to usage of IB_PD_UNSAFE_GLOBAL_RKEY
[karo-tx-linux.git] / net / smc / smc_ib.h
1 /*
2  * Shared Memory Communications over RDMA (SMC-R) and RoCE
3  *
4  *  Definitions for IB environment
5  *
6  *  Copyright IBM Corp. 2016
7  *
8  *  Author(s):  Ursula Braun <Ursula Braun@linux.vnet.ibm.com>
9  */
10
11 #ifndef _SMC_IB_H
12 #define _SMC_IB_H
13
14 #include <linux/interrupt.h>
15 #include <linux/if_ether.h>
16 #include <rdma/ib_verbs.h>
17
18 #define SMC_MAX_PORTS                   2       /* Max # of ports */
19 #define SMC_GID_SIZE                    sizeof(union ib_gid)
20
21 #define SMC_IB_MAX_SEND_SGE             2
22
23 struct smc_ib_devices {                 /* list of smc ib devices definition */
24         struct list_head        list;
25         spinlock_t              lock;   /* protects list of smc ib devices */
26 };
27
28 extern struct smc_ib_devices    smc_ib_devices; /* list of smc ib devices */
29
30 struct smc_ib_device {                          /* ib-device infos for smc */
31         struct list_head        list;
32         struct ib_device        *ibdev;
33         struct ib_port_attr     pattr[SMC_MAX_PORTS];   /* ib dev. port attrs */
34         struct ib_event_handler event_handler;  /* global ib_event handler */
35         struct ib_cq            *roce_cq_send;  /* send completion queue */
36         struct ib_cq            *roce_cq_recv;  /* recv completion queue */
37         struct tasklet_struct   send_tasklet;   /* called by send cq handler */
38         struct tasklet_struct   recv_tasklet;   /* called by recv cq handler */
39         char                    mac[SMC_MAX_PORTS][ETH_ALEN];
40                                                 /* mac address per port*/
41         union ib_gid            gid[SMC_MAX_PORTS]; /* gid per port */
42         u8                      initialized : 1; /* ib dev CQ, evthdl done */
43         struct work_struct      port_event_work;
44         unsigned long           port_event_mask;
45 };
46
47 struct smc_buf_desc;
48 struct smc_link;
49
50 int smc_ib_register_client(void) __init;
51 void smc_ib_unregister_client(void);
52 bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport);
53 int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport);
54 int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size,
55                    struct smc_buf_desc *buf_slot,
56                    enum dma_data_direction data_direction);
57 void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int bufsize,
58                       struct smc_buf_desc *buf_slot,
59                       enum dma_data_direction data_direction);
60 void smc_ib_dealloc_protection_domain(struct smc_link *lnk);
61 int smc_ib_create_protection_domain(struct smc_link *lnk);
62 void smc_ib_destroy_queue_pair(struct smc_link *lnk);
63 int smc_ib_create_queue_pair(struct smc_link *lnk);
64 int smc_ib_ready_link(struct smc_link *lnk);
65 int smc_ib_modify_qp_rts(struct smc_link *lnk);
66 int smc_ib_modify_qp_reset(struct smc_link *lnk);
67 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
68
69
70 #endif