1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
42 #include <net/addrconf.h>
45 #include "i40iw_register.h"
46 #include <net/netevent.h>
47 #define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
48 #define CLIENT_IW_INTERFACE_VERSION_MINOR 01
49 #define CLIENT_IW_INTERFACE_VERSION_BUILD 00
51 #define DRV_VERSION_MAJOR 0
52 #define DRV_VERSION_MINOR 5
53 #define DRV_VERSION_BUILD 123
54 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
55 __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
58 module_param(push_mode, int, 0644);
59 MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
62 module_param(debug, int, 0644);
63 MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
65 static int resource_profile;
66 module_param(resource_profile, int, 0644);
67 MODULE_PARM_DESC(resource_profile,
68 "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
70 static int max_rdma_vfs = 32;
71 module_param(max_rdma_vfs, int, 0644);
72 MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
73 static int mpa_version = 2;
74 module_param(mpa_version, int, 0644);
75 MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
77 MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
78 MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
79 MODULE_LICENSE("Dual BSD/GPL");
80 MODULE_VERSION(DRV_VERSION);
82 static struct i40e_client i40iw_client;
83 static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
85 static LIST_HEAD(i40iw_handlers);
86 static spinlock_t i40iw_handler_lock;
88 static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
89 u32 vf_id, u8 *msg, u16 len);
91 static struct notifier_block i40iw_inetaddr_notifier = {
92 .notifier_call = i40iw_inetaddr_event
95 static struct notifier_block i40iw_inetaddr6_notifier = {
96 .notifier_call = i40iw_inet6addr_event
99 static struct notifier_block i40iw_net_notifier = {
100 .notifier_call = i40iw_net_event
103 static atomic_t i40iw_notifiers_registered;
106 * i40iw_find_i40e_handler - find a handler given a client info
107 * @ldev: pointer to a client info
109 static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
111 struct i40iw_handler *hdl;
114 spin_lock_irqsave(&i40iw_handler_lock, flags);
115 list_for_each_entry(hdl, &i40iw_handlers, list) {
116 if (hdl->ldev.netdev == ldev->netdev) {
117 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
121 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
126 * i40iw_find_netdev - find a handler given a netdev
127 * @netdev: pointer to net_device
129 struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
131 struct i40iw_handler *hdl;
134 spin_lock_irqsave(&i40iw_handler_lock, flags);
135 list_for_each_entry(hdl, &i40iw_handlers, list) {
136 if (hdl->ldev.netdev == netdev) {
137 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
141 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
146 * i40iw_add_handler - add a handler to the list
147 * @hdl: handler to be added to the handler list
149 static void i40iw_add_handler(struct i40iw_handler *hdl)
153 spin_lock_irqsave(&i40iw_handler_lock, flags);
154 list_add(&hdl->list, &i40iw_handlers);
155 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
159 * i40iw_del_handler - delete a handler from the list
160 * @hdl: handler to be deleted from the handler list
162 static int i40iw_del_handler(struct i40iw_handler *hdl)
166 spin_lock_irqsave(&i40iw_handler_lock, flags);
167 list_del(&hdl->list);
168 spin_unlock_irqrestore(&i40iw_handler_lock, flags);
173 * i40iw_enable_intr - set up device interrupts
174 * @dev: hardware control device structure
175 * @msix_id: id of the interrupt to be enabled
177 static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
181 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
182 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
183 (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
185 i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
187 i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
191 * i40iw_dpc - tasklet for aeq and ceq 0
192 * @data: iwarp device
194 static void i40iw_dpc(unsigned long data)
196 struct i40iw_device *iwdev = (struct i40iw_device *)data;
198 if (iwdev->msix_shared)
199 i40iw_process_ceq(iwdev, iwdev->ceqlist);
200 i40iw_process_aeq(iwdev);
201 i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
205 * i40iw_ceq_dpc - dpc handler for CEQ
206 * @data: data points to CEQ
208 static void i40iw_ceq_dpc(unsigned long data)
210 struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
211 struct i40iw_device *iwdev = iwceq->iwdev;
213 i40iw_process_ceq(iwdev, iwceq);
214 i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
218 * i40iw_irq_handler - interrupt handler for aeq and ceq0
219 * @irq: Interrupt request number
220 * @data: iwarp device
222 static irqreturn_t i40iw_irq_handler(int irq, void *data)
224 struct i40iw_device *iwdev = (struct i40iw_device *)data;
226 tasklet_schedule(&iwdev->dpc_tasklet);
231 * i40iw_destroy_cqp - destroy control qp
232 * @iwdev: iwarp device
233 * @create_done: 1 if cqp create poll was success
235 * Issue destroy cqp request and
236 * free the resources associated with the cqp
238 static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
240 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
241 struct i40iw_cqp *cqp = &iwdev->cqp;
244 dev->cqp_ops->cqp_destroy(dev->cqp);
246 i40iw_free_dma_mem(dev->hw, &cqp->sq);
247 kfree(cqp->scratch_array);
248 iwdev->cqp.scratch_array = NULL;
250 kfree(cqp->cqp_requests);
251 cqp->cqp_requests = NULL;
255 * i40iw_disable_irqs - disable device interrupts
256 * @dev: hardware control device structure
257 * @msic_vec: msix vector to disable irq
258 * @dev_id: parameter to pass to free_irq (used during irq setup)
260 * The function is called when destroying aeq/ceq
262 static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
263 struct i40iw_msix_vector *msix_vec,
267 i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
269 i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
270 irq_set_affinity_hint(msix_vec->irq, NULL);
271 free_irq(msix_vec->irq, dev_id);
275 * i40iw_destroy_aeq - destroy aeq
276 * @iwdev: iwarp device
278 * Issue a destroy aeq request and
279 * free the resources associated with the aeq
280 * The function is called during driver unload
282 static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
284 enum i40iw_status_code status = I40IW_ERR_NOT_READY;
285 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
286 struct i40iw_aeq *aeq = &iwdev->aeq;
288 if (!iwdev->msix_shared)
289 i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
293 if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
294 status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
296 i40iw_pr_err("destroy aeq failed %d\n", status);
299 i40iw_free_dma_mem(dev->hw, &aeq->mem);
303 * i40iw_destroy_ceq - destroy ceq
304 * @iwdev: iwarp device
305 * @iwceq: ceq to be destroyed
307 * Issue a destroy ceq request and
308 * free the resources associated with the ceq
310 static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
311 struct i40iw_ceq *iwceq)
313 enum i40iw_status_code status;
314 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
319 status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
321 i40iw_pr_err("ceq destroy command failed %d\n", status);
325 status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
327 i40iw_pr_err("ceq destroy completion failed %d\n", status);
329 i40iw_free_dma_mem(dev->hw, &iwceq->mem);
333 * i40iw_dele_ceqs - destroy all ceq's
334 * @iwdev: iwarp device
336 * Go through all of the device ceq's and for each ceq
337 * disable the ceq interrupt and destroy the ceq
339 static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
342 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
343 struct i40iw_ceq *iwceq = iwdev->ceqlist;
344 struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
346 if (iwdev->msix_shared) {
347 i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
348 i40iw_destroy_ceq(iwdev, iwceq);
353 for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
354 i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
355 i40iw_destroy_ceq(iwdev, iwceq);
360 * i40iw_destroy_ccq - destroy control cq
361 * @iwdev: iwarp device
363 * Issue destroy ccq request and
364 * free the resources associated with the ccq
366 static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
368 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
369 struct i40iw_ccq *ccq = &iwdev->ccq;
370 enum i40iw_status_code status = 0;
373 status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
375 i40iw_pr_err("ccq destroy failed %d\n", status);
376 i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
379 /* types of hmc objects */
380 static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
385 I40IW_HMC_IW_APBVT_ENTRY,
395 * i40iw_close_hmc_objects_type - delete hmc objects of a given type
396 * @iwdev: iwarp device
397 * @obj_type: the hmc object type to be deleted
398 * @is_pf: true if the function is PF otherwise false
399 * @reset: true if called before reset
401 static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
402 enum i40iw_hmc_rsrc_type obj_type,
403 struct i40iw_hmc_info *hmc_info,
407 struct i40iw_hmc_del_obj_info info;
409 memset(&info, 0, sizeof(info));
410 info.hmc_info = hmc_info;
411 info.rsrc_type = obj_type;
412 info.count = hmc_info->hmc_obj[obj_type].cnt;
414 if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
415 i40iw_pr_err("del obj of type %d failed\n", obj_type);
419 * i40iw_del_hmc_objects - remove all device hmc objects
421 * @hmc_info: hmc_info to free
422 * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
423 * by PF on behalf of VF
424 * @reset: true if called before reset
426 static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
427 struct i40iw_hmc_info *hmc_info,
433 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
434 i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
438 * i40iw_ceq_handler - interrupt handler for ceq
441 static irqreturn_t i40iw_ceq_handler(int irq, void *data)
443 struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
445 if (iwceq->irq != irq)
446 i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
447 tasklet_schedule(&iwceq->dpc_tasklet);
452 * i40iw_create_hmc_obj_type - create hmc object of a given type
453 * @dev: hardware control device structure
454 * @info: information for the hmc object to create
456 static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
457 struct i40iw_hmc_create_obj_info *info)
459 return dev->hmc_ops->create_hmc_object(dev, info);
463 * i40iw_create_hmc_objs - create all hmc objects for the device
464 * @iwdev: iwarp device
465 * @is_pf: true if the function is PF otherwise false
467 * Create the device hmc objects and allocate hmc pages
468 * Return 0 if successful, otherwise clean up and return error
470 static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
473 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
474 struct i40iw_hmc_create_obj_info info;
475 enum i40iw_status_code status;
478 memset(&info, 0, sizeof(info));
479 info.hmc_info = dev->hmc_info;
481 info.entry_type = iwdev->sd_type;
482 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
483 info.rsrc_type = iw_hmc_obj_types[i];
484 info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
485 status = i40iw_create_hmc_obj_type(dev, &info);
487 i40iw_pr_err("create obj type %d status = %d\n",
488 iw_hmc_obj_types[i], status);
493 return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
499 /* destroy the hmc objects of a given type */
500 i40iw_close_hmc_objects_type(dev,
510 * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
511 * @iwdev: iwarp device
512 * @memptr: points to the memory addresses
513 * @size: size of memory needed
514 * @mask: mask for the aligned memory
516 * Get aligned memory of the requested size and
517 * update the memptr to point to the new aligned memory
518 * Return 0 if successful, otherwise return no memory error
520 enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
521 struct i40iw_dma_mem *memptr,
525 unsigned long va, newva;
528 va = (unsigned long)iwdev->obj_next.va;
531 newva = ALIGN(va, (mask + 1));
533 memptr->va = (u8 *)va + extra;
534 memptr->pa = iwdev->obj_next.pa + extra;
536 if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
537 return I40IW_ERR_NO_MEMORY;
539 iwdev->obj_next.va = memptr->va + size;
540 iwdev->obj_next.pa = memptr->pa + size;
545 * i40iw_create_cqp - create control qp
546 * @iwdev: iwarp device
548 * Return 0, if the cqp and all the resources associated with it
549 * are successfully created, otherwise return error
551 static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
553 enum i40iw_status_code status;
554 u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
555 struct i40iw_dma_mem mem;
556 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
557 struct i40iw_cqp_init_info cqp_init_info;
558 struct i40iw_cqp *cqp = &iwdev->cqp;
559 u16 maj_err, min_err;
562 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
563 if (!cqp->cqp_requests)
564 return I40IW_ERR_NO_MEMORY;
565 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
566 if (!cqp->scratch_array) {
567 kfree(cqp->cqp_requests);
568 return I40IW_ERR_NO_MEMORY;
570 dev->cqp = &cqp->sc_cqp;
572 memset(&cqp_init_info, 0, sizeof(cqp_init_info));
573 status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
574 (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
575 I40IW_CQP_ALIGNMENT);
578 status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
579 I40IW_HOST_CTX_ALIGNMENT_MASK);
582 dev->cqp->host_ctx_pa = mem.pa;
583 dev->cqp->host_ctx = mem.va;
584 /* populate the cqp init info */
585 cqp_init_info.dev = dev;
586 cqp_init_info.sq_size = sqsize;
587 cqp_init_info.sq = cqp->sq.va;
588 cqp_init_info.sq_pa = cqp->sq.pa;
589 cqp_init_info.host_ctx_pa = mem.pa;
590 cqp_init_info.host_ctx = mem.va;
591 cqp_init_info.hmc_profile = iwdev->resource_profile;
592 cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
593 cqp_init_info.scratch_array = cqp->scratch_array;
594 status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
596 i40iw_pr_err("cqp init status %d\n", status);
599 status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
601 i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
602 status, maj_err, min_err);
605 spin_lock_init(&cqp->req_lock);
606 INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
607 INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
608 /* init the waitq of the cqp_requests and add them to the list */
609 for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
610 init_waitqueue_head(&cqp->cqp_requests[i].waitq);
611 list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
615 /* clean up the created resources */
616 i40iw_destroy_cqp(iwdev, false);
621 * i40iw_create_ccq - create control cq
622 * @iwdev: iwarp device
624 * Return 0, if the ccq and the resources associated with it
625 * are successfully created, otherwise return error
627 static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
629 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
630 struct i40iw_dma_mem mem;
631 enum i40iw_status_code status;
632 struct i40iw_ccq_init_info info;
633 struct i40iw_ccq *ccq = &iwdev->ccq;
635 memset(&info, 0, sizeof(info));
636 dev->ccq = &ccq->sc_cq;
639 ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
640 ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
641 status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
642 ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
645 status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
646 I40IW_SHADOWAREA_MASK);
649 ccq->sc_cq.back_cq = (void *)ccq;
650 /* populate the ccq init info */
651 info.cq_base = ccq->mem_cq.va;
652 info.cq_pa = ccq->mem_cq.pa;
653 info.num_elem = IW_CCQ_SIZE;
654 info.shadow_area = mem.va;
655 info.shadow_area_pa = mem.pa;
656 info.ceqe_mask = false;
657 info.ceq_id_valid = true;
658 info.shadow_read_threshold = 16;
659 status = dev->ccq_ops->ccq_init(dev->ccq, &info);
661 status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
664 i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
669 * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
670 * @iwdev: iwarp device
671 * @msix_vec: interrupt vector information
672 * @iwceq: ceq associated with the vector
673 * @ceq_id: the id number of the iwceq
675 * Allocate interrupt resources and enable irq handling
676 * Return 0 if successful, otherwise return error
678 static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
679 struct i40iw_ceq *iwceq,
681 struct i40iw_msix_vector *msix_vec)
683 enum i40iw_status_code status;
686 if (iwdev->msix_shared && !ceq_id) {
687 tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
688 status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
690 tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
691 status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
694 cpumask_clear(&mask);
695 cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
696 irq_set_affinity_hint(msix_vec->irq, &mask);
699 i40iw_pr_err("ceq irq config fail\n");
700 return I40IW_ERR_CONFIG;
702 msix_vec->ceq_id = ceq_id;
708 * i40iw_create_ceq - create completion event queue
709 * @iwdev: iwarp device
710 * @iwceq: pointer to the ceq resources to be created
711 * @ceq_id: the id number of the iwceq
713 * Return 0, if the ceq and the resources associated with it
714 * are successfully created, otherwise return error
716 static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
717 struct i40iw_ceq *iwceq,
720 enum i40iw_status_code status;
721 struct i40iw_ceq_init_info info;
722 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
725 memset(&info, 0, sizeof(info));
726 info.ceq_id = ceq_id;
727 iwceq->iwdev = iwdev;
728 iwceq->mem.size = sizeof(struct i40iw_ceqe) *
729 iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
730 status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
731 I40IW_CEQ_ALIGNMENT);
734 info.ceq_id = ceq_id;
735 info.ceqe_base = iwceq->mem.va;
736 info.ceqe_pa = iwceq->mem.pa;
738 info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
739 iwceq->sc_ceq.ceq_id = ceq_id;
741 scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
742 status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
744 status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
748 i40iw_free_dma_mem(dev->hw, &iwceq->mem);
752 void i40iw_request_reset(struct i40iw_device *iwdev)
754 struct i40e_info *ldev = iwdev->ldev;
756 ldev->ops->request_reset(ldev, iwdev->client, 1);
760 * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
761 * @iwdev: iwarp device
762 * @ldev: i40e lan device
764 * Allocate a list for all device completion event queues
765 * Create the ceq's and configure their msix interrupt vectors
766 * Return 0, if at least one ceq is successfully set up, otherwise return error
768 static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
769 struct i40e_info *ldev)
773 struct i40iw_ceq *iwceq;
774 struct i40iw_msix_vector *msix_vec;
775 enum i40iw_status_code status = 0;
778 if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
779 status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
784 status = I40IW_ERR_BAD_PTR;
788 num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
789 iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
790 if (!iwdev->ceqlist) {
791 status = I40IW_ERR_NO_MEMORY;
794 i = (iwdev->msix_shared) ? 0 : 1;
795 for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
796 iwceq = &iwdev->ceqlist[ceq_id];
797 status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
799 i40iw_pr_err("create ceq status = %d\n", status);
803 msix_vec = &iwdev->iw_msixtbl[i];
804 iwceq->irq = msix_vec->irq;
805 iwceq->msix_idx = msix_vec->idx;
806 status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
808 i40iw_destroy_ceq(iwdev, iwceq);
811 i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
817 if (!iwdev->ceqs_count) {
818 kfree(iwdev->ceqlist);
819 iwdev->ceqlist = NULL;
828 * i40iw_configure_aeq_vector - set up the msix vector for aeq
829 * @iwdev: iwarp device
831 * Allocate interrupt resources and enable irq handling
832 * Return 0 if successful, otherwise return error
834 static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
836 struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
839 if (!iwdev->msix_shared) {
840 tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
841 ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
844 i40iw_pr_err("aeq irq config fail\n");
845 return I40IW_ERR_CONFIG;
852 * i40iw_create_aeq - create async event queue
853 * @iwdev: iwarp device
855 * Return 0, if the aeq and the resources associated with it
856 * are successfully created, otherwise return error
858 static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
860 enum i40iw_status_code status;
861 struct i40iw_aeq_init_info info;
862 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
863 struct i40iw_aeq *aeq = &iwdev->aeq;
867 aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
868 iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
869 memset(&info, 0, sizeof(info));
870 aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
871 status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
872 I40IW_AEQ_ALIGNMENT);
876 info.aeqe_base = aeq->mem.va;
877 info.aeq_elem_pa = aeq->mem.pa;
878 info.elem_cnt = aeq_size;
880 status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
883 status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
885 status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
888 i40iw_free_dma_mem(dev->hw, &aeq->mem);
893 * i40iw_setup_aeq - set up the device aeq
894 * @iwdev: iwarp device
896 * Create the aeq and configure its msix interrupt vector
897 * Return 0 if successful, otherwise return error
899 static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
901 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
902 enum i40iw_status_code status;
904 status = i40iw_create_aeq(iwdev);
908 status = i40iw_configure_aeq_vector(iwdev);
910 i40iw_destroy_aeq(iwdev);
914 if (!iwdev->msix_shared)
915 i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
920 * i40iw_initialize_ilq - create iwarp local queue for cm
921 * @iwdev: iwarp device
923 * Return 0 if successful, otherwise return error
925 static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
927 struct i40iw_puda_rsrc_info info;
928 enum i40iw_status_code status;
930 memset(&info, 0, sizeof(info));
931 info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
938 info.buf_size = 1024;
939 info.tx_buf_cnt = 16384;
940 info.receive = i40iw_receive_ilq;
941 info.xmit_complete = i40iw_free_sqbuf;
942 status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
944 i40iw_pr_err("ilq create fail\n");
949 * i40iw_initialize_ieq - create iwarp exception queue
950 * @iwdev: iwarp device
952 * Return 0 if successful, otherwise return error
954 static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
956 struct i40iw_puda_rsrc_info info;
957 enum i40iw_status_code status;
959 memset(&info, 0, sizeof(info));
960 info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
962 info.qp_id = iwdev->sc_dev.exception_lan_queue;
967 info.buf_size = 2048;
968 info.tx_buf_cnt = 16384;
969 status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
971 i40iw_pr_err("ieq create fail\n");
976 * i40iw_hmc_setup - create hmc objects for the device
977 * @iwdev: iwarp device
979 * Set up the device private memory space for the number and size of
980 * the hmc objects and create the objects
981 * Return 0 if successful, otherwise return error
983 static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
985 enum i40iw_status_code status;
987 iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
988 status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
991 status = i40iw_create_hmc_objs(iwdev, true);
994 iwdev->init_state = HMC_OBJS_CREATED;
1000 * i40iw_del_init_mem - deallocate memory resources
1001 * @iwdev: iwarp device
1003 static void i40iw_del_init_mem(struct i40iw_device *iwdev)
1005 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1007 i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
1008 kfree(dev->hmc_info->sd_table.sd_entry);
1009 dev->hmc_info->sd_table.sd_entry = NULL;
1010 kfree(iwdev->mem_resources);
1011 iwdev->mem_resources = NULL;
1012 kfree(iwdev->ceqlist);
1013 iwdev->ceqlist = NULL;
1014 kfree(iwdev->iw_msixtbl);
1015 iwdev->iw_msixtbl = NULL;
1016 kfree(iwdev->hmc_info_mem);
1017 iwdev->hmc_info_mem = NULL;
1021 * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
1022 * @iwdev: iwarp device
1023 * @idx: the index of the mac ip address to delete
1025 static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
1027 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1028 struct i40iw_cqp_request *cqp_request;
1029 struct cqp_commands_info *cqp_info;
1030 enum i40iw_status_code status = 0;
1032 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1034 i40iw_pr_err("cqp_request memory failed\n");
1037 cqp_info = &cqp_request->info;
1038 cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
1039 cqp_info->post_sq = 1;
1040 cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1041 cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1042 cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
1043 cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
1044 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1046 i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
1050 * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
1051 * @iwdev: iwarp device
1052 * @mac_addr: pointer to mac address
1053 * @idx: the index of the mac ip address to add
1055 static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
1059 struct i40iw_local_mac_ipaddr_entry_info *info;
1060 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1061 struct i40iw_cqp_request *cqp_request;
1062 struct cqp_commands_info *cqp_info;
1063 enum i40iw_status_code status = 0;
1065 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1067 i40iw_pr_err("cqp_request memory failed\n");
1068 return I40IW_ERR_NO_MEMORY;
1071 cqp_info = &cqp_request->info;
1073 cqp_info->post_sq = 1;
1074 info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
1075 ether_addr_copy(info->mac_addr, mac_addr);
1076 info->entry_idx = idx;
1077 cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1078 cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
1079 cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1080 cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1081 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1083 i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
1088 * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
1089 * @iwdev: iwarp device
1090 * @mac_ip_tbl_idx: the index of the new mac ip address
1092 * Allocate a mac ip address entry and update the mac_ip_tbl_idx
1093 * to hold the index of the newly created mac ip address
1094 * Return 0 if successful, otherwise return error
1096 static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
1097 u16 *mac_ip_tbl_idx)
1099 struct i40iw_cqp *iwcqp = &iwdev->cqp;
1100 struct i40iw_cqp_request *cqp_request;
1101 struct cqp_commands_info *cqp_info;
1102 enum i40iw_status_code status = 0;
1104 cqp_request = i40iw_get_cqp_request(iwcqp, true);
1106 i40iw_pr_err("cqp_request memory failed\n");
1107 return I40IW_ERR_NO_MEMORY;
1110 /* increment refcount, because we need the cqp request ret value */
1111 atomic_inc(&cqp_request->refcount);
1113 cqp_info = &cqp_request->info;
1114 cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
1115 cqp_info->post_sq = 1;
1116 cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1117 cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1118 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1120 *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
1122 i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
1123 /* decrement refcount and free the cqp request, if no longer used */
1124 i40iw_put_cqp_request(iwcqp, cqp_request);
1129 * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
1130 * @iwdev: iwarp device
1131 * @macaddr: pointer to mac address
1133 * Allocate a mac ip address entry and add it to the hw table
1134 * Return 0 if successful, otherwise return error
1136 static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
1139 enum i40iw_status_code status;
1141 status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
1143 status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
1144 (u8)iwdev->mac_ip_table_idx);
1146 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1152 * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
1153 * @iwdev: iwarp device
1155 static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
1157 struct net_device *ip_dev;
1158 struct inet6_dev *idev;
1159 struct inet6_ifaddr *ifp, *tmp;
1160 u32 local_ipaddr6[4];
1163 for_each_netdev_rcu(&init_net, ip_dev) {
1164 if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
1165 (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
1166 (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
1167 idev = __in6_dev_get(ip_dev);
1169 i40iw_pr_err("ipv6 inet device not found\n");
1172 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
1173 i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
1174 rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
1175 i40iw_copy_ip_ntohl(local_ipaddr6,
1176 ifp->addr.in6_u.u6_addr32);
1177 i40iw_manage_arp_cache(iwdev,
1189 * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
1190 * @iwdev: iwarp device
1192 static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
1194 struct net_device *dev;
1195 struct in_device *idev;
1196 bool got_lock = true;
1199 if (!rtnl_trylock())
1202 for_each_netdev(&init_net, dev) {
1203 if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
1204 (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
1205 (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
1206 idev = in_dev_get(dev);
1208 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
1209 "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
1210 rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
1212 ip_addr = ntohl(ifa->ifa_address);
1213 i40iw_manage_arp_cache(iwdev,
1228 * i40iw_add_mac_ip - add mac and ip addresses
1229 * @iwdev: iwarp device
1231 * Create and add a mac ip address entry to the hw table and
1232 * ipv4/ipv6 addresses to the arp cache
1233 * Return 0 if successful, otherwise return error
1235 static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
1237 struct net_device *netdev = iwdev->netdev;
1238 enum i40iw_status_code status;
1240 status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
1243 i40iw_add_ipv4_addr(iwdev);
1244 i40iw_add_ipv6_addr(iwdev);
1249 * i40iw_wait_pe_ready - Check if firmware is ready
1250 * @hw: provides access to registers
1252 static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
1261 statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
1262 i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
1263 statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
1264 i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
1265 statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
1266 i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
1267 __LINE__, statuscpu1);
1268 statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
1269 i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
1270 __LINE__, statuscpu2);
1271 if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
1272 break; /* SUCCESS */
1275 } while (retrycount < 14);
1276 i40iw_wr32(hw, 0xb4040, 0x4C104C5);
1280 * i40iw_initialize_dev - initialize device
1281 * @iwdev: iwarp device
1282 * @ldev: lan device information
1284 * Allocate memory for the hmc objects and initialize iwdev
1285 * Return 0 if successful, otherwise clean up the resources
1288 static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
1289 struct i40e_info *ldev)
1291 enum i40iw_status_code status;
1292 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1293 struct i40iw_device_init_info info;
1294 struct i40iw_vsi_init_info vsi_info;
1295 struct i40iw_dma_mem mem;
1296 struct i40iw_l2params l2params;
1298 struct i40iw_vsi_stats_info stats_info;
1299 u16 last_qset = I40IW_NO_QSET;
1303 memset(&l2params, 0, sizeof(l2params));
1304 memset(&info, 0, sizeof(info));
1305 size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
1306 (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
1307 iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1308 if (!iwdev->hmc_info_mem)
1309 return I40IW_ERR_NO_MEMORY;
1311 iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
1312 dev->hmc_info = &iwdev->hw.hmc;
1313 dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
1314 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
1315 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
1318 info.fpm_query_buf_pa = mem.pa;
1319 info.fpm_query_buf = mem.va;
1320 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
1321 I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
1324 info.fpm_commit_buf_pa = mem.pa;
1325 info.fpm_commit_buf = mem.va;
1326 info.hmc_fn_id = ldev->fid;
1327 info.is_pf = (ldev->ftype) ? false : true;
1328 info.bar0 = ldev->hw_addr;
1329 info.hw = &iwdev->hw;
1330 info.debug_mask = debug;
1332 (ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
1333 for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
1334 qset = ldev->params.qos.prio_qos[i].qs_handle;
1335 l2params.qs_handle_list[i] = qset;
1336 if (last_qset == I40IW_NO_QSET)
1338 else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
1341 i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
1342 info.exception_lan_queue = 1;
1343 info.vchnl_send = i40iw_virtchnl_send;
1344 status = i40iw_device_init(&iwdev->sc_dev, &info);
1348 memset(&vsi_info, 0, sizeof(vsi_info));
1349 vsi_info.dev = &iwdev->sc_dev;
1350 vsi_info.back_vsi = (void *)iwdev;
1351 vsi_info.params = &l2params;
1352 i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
1355 memset(&stats_info, 0, sizeof(stats_info));
1356 stats_info.fcn_id = ldev->fid;
1357 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1358 if (!stats_info.pestat) {
1359 status = I40IW_ERR_NO_MEMORY;
1362 stats_info.stats_initialize = true;
1363 if (stats_info.pestat)
1364 i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
1368 kfree(iwdev->hmc_info_mem);
1369 iwdev->hmc_info_mem = NULL;
1374 * i40iw_register_notifiers - register tcp ip notifiers
1376 static void i40iw_register_notifiers(void)
1378 if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
1379 register_inetaddr_notifier(&i40iw_inetaddr_notifier);
1380 register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1381 register_netevent_notifier(&i40iw_net_notifier);
1386 * i40iw_save_msix_info - copy msix vector information to iwarp device
1387 * @iwdev: iwarp device
1388 * @ldev: lan device information
1390 * Allocate iwdev msix table and copy the ldev msix info to the table
1391 * Return 0 if successful, otherwise return error
1393 static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
1394 struct i40e_info *ldev)
1396 struct i40e_qvlist_info *iw_qvlist;
1397 struct i40e_qv_info *iw_qvinfo;
1402 iwdev->msix_count = ldev->msix_count;
1404 size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
1405 size += sizeof(struct i40e_qvlist_info);
1406 size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
1407 iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
1409 if (!iwdev->iw_msixtbl)
1410 return I40IW_ERR_NO_MEMORY;
1411 iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
1412 iw_qvlist = iwdev->iw_qvlist;
1413 iw_qvinfo = iw_qvlist->qv_info;
1414 iw_qvlist->num_vectors = iwdev->msix_count;
1415 if (iwdev->msix_count <= num_online_cpus())
1416 iwdev->msix_shared = true;
1417 for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
1418 iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
1419 iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
1420 iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
1422 iw_qvinfo->aeq_idx = 0;
1423 if (iwdev->msix_shared)
1424 iw_qvinfo->ceq_idx = ceq_idx++;
1426 iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
1428 iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
1429 iw_qvinfo->ceq_idx = ceq_idx++;
1431 iw_qvinfo->itr_idx = 3;
1432 iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
1438 * i40iw_deinit_device - clean up the device resources
1439 * @iwdev: iwarp device
1441 * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
1442 * destroy the device queues and free the pble and the hmc objects
1444 static void i40iw_deinit_device(struct i40iw_device *iwdev)
1446 struct i40e_info *ldev = iwdev->ldev;
1448 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1450 i40iw_pr_info("state = %d\n", iwdev->init_state);
1451 if (iwdev->param_wq)
1452 destroy_workqueue(iwdev->param_wq);
1454 switch (iwdev->init_state) {
1455 case RDMA_DEV_REGISTERED:
1456 iwdev->iw_status = 0;
1457 i40iw_port_ibevent(iwdev);
1458 i40iw_destroy_rdma_device(iwdev->iwibdev);
1460 case IP_ADDR_REGISTERED:
1462 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1465 if (!atomic_dec_return(&i40iw_notifiers_registered)) {
1466 unregister_netevent_notifier(&i40iw_net_notifier);
1467 unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
1468 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1471 case PBLE_CHUNK_MEM:
1472 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
1475 i40iw_dele_ceqs(iwdev);
1478 i40iw_destroy_aeq(iwdev);
1481 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
1484 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
1487 i40iw_destroy_ccq(iwdev);
1489 case HMC_OBJS_CREATED:
1490 i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
1493 i40iw_destroy_cqp(iwdev, true);
1496 i40iw_cleanup_cm_core(&iwdev->cm_core);
1497 if (iwdev->vsi.pestat) {
1498 i40iw_vsi_stats_free(&iwdev->vsi);
1499 kfree(iwdev->vsi.pestat);
1501 i40iw_del_init_mem(iwdev);
1506 i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
1510 i40iw_del_handler(i40iw_find_i40e_handler(ldev));
1515 * i40iw_setup_init_state - set up the initial device struct
1516 * @hdl: handler for iwarp device - one per instance
1517 * @ldev: lan device information
1518 * @client: iwarp client information, provided during registration
1520 * Initialize the iwarp device and its hdl information
1521 * using the ldev and client information
1522 * Return 0 if successful, otherwise return error
1524 static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
1525 struct i40e_info *ldev,
1526 struct i40e_client *client)
1528 struct i40iw_device *iwdev = &hdl->device;
1529 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1530 enum i40iw_status_code status;
1532 memcpy(&hdl->ldev, ldev, sizeof(*ldev));
1533 if (resource_profile == 1)
1534 resource_profile = 2;
1536 iwdev->mpa_version = mpa_version;
1537 iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
1538 (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
1539 I40IW_HMC_PROFILE_DEFAULT;
1540 iwdev->max_rdma_vfs =
1541 (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
1542 iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
1543 iwdev->netdev = ldev->netdev;
1544 hdl->client = client;
1546 iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
1548 iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
1550 status = i40iw_save_msix_info(iwdev, ldev);
1553 iwdev->hw.dev_context = (void *)ldev->pcidev;
1554 iwdev->hw.hw_addr = ldev->hw_addr;
1555 status = i40iw_allocate_dma_mem(&iwdev->hw,
1556 &iwdev->obj_mem, 8192, 4096);
1559 iwdev->obj_next = iwdev->obj_mem;
1560 iwdev->push_mode = push_mode;
1562 init_waitqueue_head(&iwdev->vchnl_waitq);
1563 init_waitqueue_head(&dev->vf_reqs);
1564 init_waitqueue_head(&iwdev->close_wq);
1566 status = i40iw_initialize_dev(iwdev, ldev);
1569 kfree(iwdev->iw_msixtbl);
1570 i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
1571 iwdev->iw_msixtbl = NULL;
1577 * i40iw_get_used_rsrc - determine resources used internally
1578 * @iwdev: iwarp device
1580 * Called after internal allocations
1582 static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
1584 iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
1585 iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
1586 iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
1587 iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
1591 * i40iw_open - client interface operation open for iwarp/uda device
1592 * @ldev: lan device information
1593 * @client: iwarp client information, provided during registration
1595 * Called by the lan driver during the processing of client register
1596 * Create device resources, set up queues, pble and hmc objects and
1597 * register the device with the ib verbs interface
1598 * Return 0 if successful, otherwise return error
1600 static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1602 struct i40iw_device *iwdev;
1603 struct i40iw_sc_dev *dev;
1604 enum i40iw_status_code status;
1605 struct i40iw_handler *hdl;
1607 hdl = i40iw_find_netdev(ldev->netdev);
1611 hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
1614 iwdev = &hdl->device;
1616 dev = &iwdev->sc_dev;
1617 i40iw_setup_cm_core(iwdev);
1619 dev->back_dev = (void *)iwdev;
1620 iwdev->ldev = &hdl->ldev;
1621 iwdev->client = client;
1622 mutex_init(&iwdev->pbl_mutex);
1623 i40iw_add_handler(hdl);
1626 status = i40iw_setup_init_state(hdl, ldev, client);
1629 iwdev->init_state = INITIAL_STATE;
1631 i40iw_wait_pe_ready(dev->hw);
1632 status = i40iw_create_cqp(iwdev);
1635 iwdev->init_state = CQP_CREATED;
1636 status = i40iw_hmc_setup(iwdev);
1639 status = i40iw_create_ccq(iwdev);
1642 iwdev->init_state = CCQ_CREATED;
1643 status = i40iw_initialize_ilq(iwdev);
1646 iwdev->init_state = ILQ_CREATED;
1647 status = i40iw_initialize_ieq(iwdev);
1650 iwdev->init_state = IEQ_CREATED;
1651 status = i40iw_setup_aeq(iwdev);
1654 iwdev->init_state = AEQ_CREATED;
1655 status = i40iw_setup_ceqs(iwdev, ldev);
1658 iwdev->init_state = CEQ_CREATED;
1659 status = i40iw_initialize_hw_resources(iwdev);
1662 i40iw_get_used_rsrc(iwdev);
1663 dev->ccq_ops->ccq_arm(dev->ccq);
1664 status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
1667 iwdev->init_state = PBLE_CHUNK_MEM;
1668 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
1669 i40iw_register_notifiers();
1670 iwdev->init_state = INET_NOTIFIER;
1671 status = i40iw_add_mac_ip(iwdev);
1674 iwdev->init_state = IP_ADDR_REGISTERED;
1675 if (i40iw_register_rdma_device(iwdev)) {
1676 i40iw_pr_err("register rdma device fail\n");
1680 iwdev->init_state = RDMA_DEV_REGISTERED;
1681 iwdev->iw_status = 1;
1682 i40iw_port_ibevent(iwdev);
1683 iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
1684 if(iwdev->param_wq == NULL)
1686 i40iw_pr_info("i40iw_open completed\n");
1690 i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
1691 i40iw_deinit_device(iwdev);
1696 * i40iw_l2params_worker - worker for l2 params change
1697 * @work: work pointer for l2 params
1699 static void i40iw_l2params_worker(struct work_struct *work)
1701 struct l2params_work *dwork =
1702 container_of(work, struct l2params_work, work);
1703 struct i40iw_device *iwdev = dwork->iwdev;
1705 i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
1706 atomic_dec(&iwdev->params_busy);
1711 * i40iw_l2param_change - handle qs handles for qos and mss change
1712 * @ldev: lan device information
1713 * @client: client for paramater change
1714 * @params: new parameters from L2
1716 static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
1717 struct i40e_params *params)
1719 struct i40iw_handler *hdl;
1720 struct i40iw_l2params *l2params;
1721 struct l2params_work *work;
1722 struct i40iw_device *iwdev;
1725 hdl = i40iw_find_i40e_handler(ldev);
1729 iwdev = &hdl->device;
1731 if (atomic_read(&iwdev->params_busy))
1735 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1739 atomic_inc(&iwdev->params_busy);
1741 work->iwdev = iwdev;
1742 l2params = &work->l2params;
1743 for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
1744 l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
1746 l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
1748 INIT_WORK(&work->work, i40iw_l2params_worker);
1749 queue_work(iwdev->param_wq, &work->work);
1753 * i40iw_close - client interface operation close for iwarp/uda device
1754 * @ldev: lan device information
1755 * @client: client to close
1757 * Called by the lan driver during the processing of client unregister
1758 * Destroy and clean up the driver resources
1760 static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
1762 struct i40iw_device *iwdev;
1763 struct i40iw_handler *hdl;
1765 hdl = i40iw_find_i40e_handler(ldev);
1769 iwdev = &hdl->device;
1770 iwdev->closing = true;
1773 iwdev->reset = true;
1775 i40iw_cm_disconnect_all(iwdev);
1776 destroy_workqueue(iwdev->virtchnl_wq);
1777 i40iw_deinit_device(iwdev);
1781 * i40iw_vf_reset - process VF reset
1782 * @ldev: lan device information
1783 * @client: client interface instance
1784 * @vf_id: virtual function id
1786 * Called when a VF is reset by the PF
1787 * Destroy and clean up the VF resources
1789 static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
1791 struct i40iw_handler *hdl;
1792 struct i40iw_sc_dev *dev;
1793 struct i40iw_hmc_fcn_info hmc_fcn_info;
1794 struct i40iw_virt_mem vf_dev_mem;
1795 struct i40iw_vfdev *tmp_vfdev;
1797 unsigned long flags;
1798 struct i40iw_device *iwdev;
1800 hdl = i40iw_find_i40e_handler(ldev);
1804 dev = &hdl->device.sc_dev;
1805 iwdev = (struct i40iw_device *)dev->back_dev;
1807 for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
1808 if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
1810 /* free all resources allocated on behalf of vf */
1811 tmp_vfdev = dev->vf_dev[i];
1812 spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
1813 dev->vf_dev[i] = NULL;
1814 spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
1815 i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
1816 /* remove vf hmc function */
1817 memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
1818 hmc_fcn_info.vf_id = vf_id;
1819 hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
1820 hmc_fcn_info.free_fcn = true;
1821 i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
1823 vf_dev_mem.va = tmp_vfdev;
1824 vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
1825 sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
1826 i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
1832 * i40iw_vf_enable - enable a number of VFs
1833 * @ldev: lan device information
1834 * @client: client interface instance
1835 * @num_vfs: number of VFs for the PF
1837 * Called when the number of VFs changes
1839 static void i40iw_vf_enable(struct i40e_info *ldev,
1840 struct i40e_client *client,
1843 struct i40iw_handler *hdl;
1845 hdl = i40iw_find_i40e_handler(ldev);
1849 if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
1850 hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
1852 hdl->device.max_enabled_vfs = num_vfs;
1856 * i40iw_vf_capable - check if VF capable
1857 * @ldev: lan device information
1858 * @client: client interface instance
1859 * @vf_id: virtual function id
1861 * Return 1 if a VF slot is available or if VF is already RDMA enabled
1862 * Return 0 otherwise
1864 static int i40iw_vf_capable(struct i40e_info *ldev,
1865 struct i40e_client *client,
1868 struct i40iw_handler *hdl;
1869 struct i40iw_sc_dev *dev;
1872 hdl = i40iw_find_i40e_handler(ldev);
1876 dev = &hdl->device.sc_dev;
1878 for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
1879 if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
1887 * i40iw_virtchnl_receive - receive a message through the virtual channel
1888 * @ldev: lan device information
1889 * @client: client interface instance
1890 * @vf_id: virtual function id associated with the message
1891 * @msg: message buffer pointer
1892 * @len: length of the message
1894 * Invoke virtual channel receive operation for the given msg
1895 * Return 0 if successful, otherwise return error
1897 static int i40iw_virtchnl_receive(struct i40e_info *ldev,
1898 struct i40e_client *client,
1903 struct i40iw_handler *hdl;
1904 struct i40iw_sc_dev *dev;
1905 struct i40iw_device *iwdev;
1906 int ret_code = I40IW_NOT_SUPPORTED;
1909 return I40IW_ERR_PARAM;
1911 hdl = i40iw_find_i40e_handler(ldev);
1913 return I40IW_ERR_PARAM;
1915 dev = &hdl->device.sc_dev;
1916 iwdev = dev->back_dev;
1918 if (dev->vchnl_if.vchnl_recv) {
1919 ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
1921 atomic_dec(&iwdev->vchnl_msgs);
1922 wake_up(&iwdev->vchnl_waitq);
1929 * i40iw_vf_clear_to_send - wait to send virtual channel message
1930 * @dev: iwarp device *
1931 * Wait for until virtual channel is clear
1932 * before sending the next message
1934 * Returns false if error
1935 * Returns true if clear to send
1937 bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
1939 struct i40iw_device *iwdev;
1940 wait_queue_entry_t wait;
1942 iwdev = dev->back_dev;
1944 if (!wq_has_sleeper(&dev->vf_reqs) &&
1945 (atomic_read(&iwdev->vchnl_msgs) == 0))
1946 return true; /* virtual channel is clear */
1949 add_wait_queue_exclusive(&dev->vf_reqs, &wait);
1951 if (!wait_event_timeout(dev->vf_reqs,
1952 (atomic_read(&iwdev->vchnl_msgs) == 0),
1953 I40IW_VCHNL_EVENT_TIMEOUT))
1954 dev->vchnl_up = false;
1956 remove_wait_queue(&dev->vf_reqs, &wait);
1958 return dev->vchnl_up;
1962 * i40iw_virtchnl_send - send a message through the virtual channel
1963 * @dev: iwarp device
1964 * @vf_id: virtual function id associated with the message
1965 * @msg: virtual channel message buffer pointer
1966 * @len: length of the message
1968 * Invoke virtual channel send operation for the given msg
1969 * Return 0 if successful, otherwise return error
1971 static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
1976 struct i40iw_device *iwdev;
1977 struct i40e_info *ldev;
1979 if (!dev || !dev->back_dev)
1980 return I40IW_ERR_BAD_PTR;
1982 iwdev = dev->back_dev;
1985 if (ldev && ldev->ops && ldev->ops->virtchnl_send)
1986 return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
1987 return I40IW_ERR_BAD_PTR;
1990 /* client interface functions */
1991 static const struct i40e_client_ops i40e_ops = {
1993 .close = i40iw_close,
1994 .l2_param_change = i40iw_l2param_change,
1995 .virtchnl_receive = i40iw_virtchnl_receive,
1996 .vf_reset = i40iw_vf_reset,
1997 .vf_enable = i40iw_vf_enable,
1998 .vf_capable = i40iw_vf_capable
2002 * i40iw_init_module - driver initialization function
2004 * First function to call when the driver is loaded
2005 * Register the driver as i40e client and port mapper client
2007 static int __init i40iw_init_module(void)
2011 memset(&i40iw_client, 0, sizeof(i40iw_client));
2012 i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
2013 i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
2014 i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
2015 i40iw_client.ops = &i40e_ops;
2016 memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
2017 i40iw_client.type = I40E_CLIENT_IWARP;
2018 spin_lock_init(&i40iw_handler_lock);
2019 ret = i40e_register_client(&i40iw_client);
2024 * i40iw_exit_module - driver exit clean up function
2026 * The function is called just before the driver is unloaded
2027 * Unregister the driver as i40e client and port mapper client
2029 static void __exit i40iw_exit_module(void)
2031 i40e_unregister_client(&i40iw_client);
2034 module_init(i40iw_init_module);
2035 module_exit(i40iw_exit_module);