2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
59 struct list_head list;
67 struct list_head list;
82 struct list_head list;
84 enum mlx4_protocol prot;
85 enum mlx4_steer_type steer;
90 RES_QP_BUSY = RES_ANY_BUSY,
92 /* QP number was allocated */
95 /* ICM memory for QP context was mapped */
98 /* QP is in hw ownership */
103 struct res_common com;
108 struct list_head mcg_list;
113 /* saved qp params before VST enforcement in order to restore on VGT */
123 enum res_mtt_states {
124 RES_MTT_BUSY = RES_ANY_BUSY,
128 static inline const char *mtt_states_str(enum res_mtt_states state)
131 case RES_MTT_BUSY: return "RES_MTT_BUSY";
132 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
133 default: return "Unknown";
138 struct res_common com;
143 enum res_mpt_states {
144 RES_MPT_BUSY = RES_ANY_BUSY,
151 struct res_common com;
157 RES_EQ_BUSY = RES_ANY_BUSY,
163 struct res_common com;
168 RES_CQ_BUSY = RES_ANY_BUSY,
174 struct res_common com;
179 enum res_srq_states {
180 RES_SRQ_BUSY = RES_ANY_BUSY,
186 struct res_common com;
192 enum res_counter_states {
193 RES_COUNTER_BUSY = RES_ANY_BUSY,
194 RES_COUNTER_ALLOCATED,
198 struct res_common com;
202 enum res_xrcdn_states {
203 RES_XRCD_BUSY = RES_ANY_BUSY,
208 struct res_common com;
212 enum res_fs_rule_states {
213 RES_FS_RULE_BUSY = RES_ANY_BUSY,
214 RES_FS_RULE_ALLOCATED,
218 struct res_common com;
222 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
224 struct rb_node *node = root->rb_node;
227 struct res_common *res = container_of(node, struct res_common,
230 if (res_id < res->res_id)
231 node = node->rb_left;
232 else if (res_id > res->res_id)
233 node = node->rb_right;
240 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
242 struct rb_node **new = &(root->rb_node), *parent = NULL;
244 /* Figure out where to put new node */
246 struct res_common *this = container_of(*new, struct res_common,
250 if (res->res_id < this->res_id)
251 new = &((*new)->rb_left);
252 else if (res->res_id > this->res_id)
253 new = &((*new)->rb_right);
258 /* Add new node and rebalance tree. */
259 rb_link_node(&res->node, parent, new);
260 rb_insert_color(&res->node, root);
275 static const char *ResourceType(enum mlx4_resource rt)
278 case RES_QP: return "RES_QP";
279 case RES_CQ: return "RES_CQ";
280 case RES_SRQ: return "RES_SRQ";
281 case RES_MPT: return "RES_MPT";
282 case RES_MTT: return "RES_MTT";
283 case RES_MAC: return "RES_MAC";
284 case RES_VLAN: return "RES_VLAN";
285 case RES_EQ: return "RES_EQ";
286 case RES_COUNTER: return "RES_COUNTER";
287 case RES_FS_RULE: return "RES_FS_RULE";
288 case RES_XRCD: return "RES_XRCD";
289 default: return "Unknown resource type !!!";
293 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
294 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
295 enum mlx4_resource res_type, int count,
298 struct mlx4_priv *priv = mlx4_priv(dev);
299 struct resource_allocator *res_alloc =
300 &priv->mfunc.master.res_tracker.res_alloc[res_type];
302 int allocated, free, reserved, guaranteed, from_free;
304 if (slave > dev->num_vfs)
307 spin_lock(&res_alloc->alloc_lock);
308 allocated = (port > 0) ?
309 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
310 res_alloc->allocated[slave];
311 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
313 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
314 res_alloc->res_reserved;
315 guaranteed = res_alloc->guaranteed[slave];
317 if (allocated + count > res_alloc->quota[slave])
320 if (allocated + count <= guaranteed) {
323 /* portion may need to be obtained from free area */
324 if (guaranteed - allocated > 0)
325 from_free = count - (guaranteed - allocated);
329 if (free - from_free > reserved)
334 /* grant the request */
336 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
337 res_alloc->res_port_free[port - 1] -= count;
339 res_alloc->allocated[slave] += count;
340 res_alloc->res_free -= count;
345 spin_unlock(&res_alloc->alloc_lock);
349 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
350 enum mlx4_resource res_type, int count,
353 struct mlx4_priv *priv = mlx4_priv(dev);
354 struct resource_allocator *res_alloc =
355 &priv->mfunc.master.res_tracker.res_alloc[res_type];
357 if (slave > dev->num_vfs)
360 spin_lock(&res_alloc->alloc_lock);
362 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
363 res_alloc->res_port_free[port - 1] += count;
365 res_alloc->allocated[slave] -= count;
366 res_alloc->res_free += count;
369 spin_unlock(&res_alloc->alloc_lock);
373 static inline void initialize_res_quotas(struct mlx4_dev *dev,
374 struct resource_allocator *res_alloc,
375 enum mlx4_resource res_type,
376 int vf, int num_instances)
378 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
379 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
380 if (vf == mlx4_master_func_num(dev)) {
381 res_alloc->res_free = num_instances;
382 if (res_type == RES_MTT) {
383 /* reserved mtts will be taken out of the PF allocation */
384 res_alloc->res_free += dev->caps.reserved_mtts;
385 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
386 res_alloc->quota[vf] += dev->caps.reserved_mtts;
391 void mlx4_init_quotas(struct mlx4_dev *dev)
393 struct mlx4_priv *priv = mlx4_priv(dev);
396 /* quotas for VFs are initialized in mlx4_slave_cap */
397 if (mlx4_is_slave(dev))
400 if (!mlx4_is_mfunc(dev)) {
401 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
402 mlx4_num_reserved_sqps(dev);
403 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
404 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
405 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
406 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
410 pf = mlx4_master_func_num(dev);
412 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
414 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
416 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
418 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
420 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
422 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
424 struct mlx4_priv *priv = mlx4_priv(dev);
428 priv->mfunc.master.res_tracker.slave_list =
429 kzalloc(dev->num_slaves * sizeof(struct slave_list),
431 if (!priv->mfunc.master.res_tracker.slave_list)
434 for (i = 0 ; i < dev->num_slaves; i++) {
435 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
436 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
437 slave_list[i].res_list[t]);
438 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
441 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
443 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
444 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
446 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
447 struct resource_allocator *res_alloc =
448 &priv->mfunc.master.res_tracker.res_alloc[i];
449 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
450 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
451 if (i == RES_MAC || i == RES_VLAN)
452 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
453 (dev->num_vfs + 1) * sizeof(int),
456 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
458 if (!res_alloc->quota || !res_alloc->guaranteed ||
459 !res_alloc->allocated)
462 spin_lock_init(&res_alloc->alloc_lock);
463 for (t = 0; t < dev->num_vfs + 1; t++) {
466 initialize_res_quotas(dev, res_alloc, RES_QP,
467 t, dev->caps.num_qps -
468 dev->caps.reserved_qps -
469 mlx4_num_reserved_sqps(dev));
472 initialize_res_quotas(dev, res_alloc, RES_CQ,
473 t, dev->caps.num_cqs -
474 dev->caps.reserved_cqs);
477 initialize_res_quotas(dev, res_alloc, RES_SRQ,
478 t, dev->caps.num_srqs -
479 dev->caps.reserved_srqs);
482 initialize_res_quotas(dev, res_alloc, RES_MPT,
483 t, dev->caps.num_mpts -
484 dev->caps.reserved_mrws);
487 initialize_res_quotas(dev, res_alloc, RES_MTT,
488 t, dev->caps.num_mtts -
489 dev->caps.reserved_mtts);
492 if (t == mlx4_master_func_num(dev)) {
493 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
494 res_alloc->guaranteed[t] = 2;
495 for (j = 0; j < MLX4_MAX_PORTS; j++)
496 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
498 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
499 res_alloc->guaranteed[t] = 2;
503 if (t == mlx4_master_func_num(dev)) {
504 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
505 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
506 for (j = 0; j < MLX4_MAX_PORTS; j++)
507 res_alloc->res_port_free[j] =
510 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
511 res_alloc->guaranteed[t] = 0;
515 res_alloc->quota[t] = dev->caps.max_counters;
516 res_alloc->guaranteed[t] = 0;
517 if (t == mlx4_master_func_num(dev))
518 res_alloc->res_free = res_alloc->quota[t];
523 if (i == RES_MAC || i == RES_VLAN) {
524 for (j = 0; j < MLX4_MAX_PORTS; j++)
525 res_alloc->res_port_rsvd[j] +=
526 res_alloc->guaranteed[t];
528 res_alloc->res_reserved += res_alloc->guaranteed[t];
532 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
536 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
537 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
538 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
539 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
540 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
541 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
542 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
547 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
548 enum mlx4_res_tracker_free_type type)
550 struct mlx4_priv *priv = mlx4_priv(dev);
553 if (priv->mfunc.master.res_tracker.slave_list) {
554 if (type != RES_TR_FREE_STRUCTS_ONLY) {
555 for (i = 0; i < dev->num_slaves; i++) {
556 if (type == RES_TR_FREE_ALL ||
557 dev->caps.function != i)
558 mlx4_delete_all_resources_for_slave(dev, i);
560 /* free master's vlans */
561 i = dev->caps.function;
562 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
563 rem_slave_vlans(dev, i);
564 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
567 if (type != RES_TR_FREE_SLAVES_ONLY) {
568 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
569 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
570 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
571 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
572 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
573 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
574 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
576 kfree(priv->mfunc.master.res_tracker.slave_list);
577 priv->mfunc.master.res_tracker.slave_list = NULL;
582 static void update_pkey_index(struct mlx4_dev *dev, int slave,
583 struct mlx4_cmd_mailbox *inbox)
585 u8 sched = *(u8 *)(inbox->buf + 64);
586 u8 orig_index = *(u8 *)(inbox->buf + 35);
588 struct mlx4_priv *priv = mlx4_priv(dev);
591 port = (sched >> 6 & 1) + 1;
593 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
594 *(u8 *)(inbox->buf + 35) = new_index;
597 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
600 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
601 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
602 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
604 if (MLX4_QP_ST_UD == ts)
605 qp_ctx->pri_path.mgid_index = 0x80 | slave;
607 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
608 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
609 qp_ctx->pri_path.mgid_index = slave & 0x7F;
610 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
611 qp_ctx->alt_path.mgid_index = slave & 0x7F;
615 static int update_vport_qp_param(struct mlx4_dev *dev,
616 struct mlx4_cmd_mailbox *inbox,
619 struct mlx4_qp_context *qpc = inbox->buf + 8;
620 struct mlx4_vport_oper_state *vp_oper;
621 struct mlx4_priv *priv;
625 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
626 priv = mlx4_priv(dev);
627 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
629 if (MLX4_VGT != vp_oper->state.default_vlan) {
630 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
631 if (MLX4_QP_ST_RC == qp_type ||
632 (MLX4_QP_ST_UD == qp_type &&
633 !mlx4_is_qp_reserved(dev, qpn)))
636 /* the reserved QPs (special, proxy, tunnel)
637 * do not operate over vlans
639 if (mlx4_is_qp_reserved(dev, qpn))
642 /* force strip vlan by clear vsd */
643 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
645 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
646 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
647 qpc->pri_path.vlan_control =
648 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
649 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
650 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
651 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
652 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
653 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
654 } else if (0 != vp_oper->state.default_vlan) {
655 qpc->pri_path.vlan_control =
656 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
657 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
658 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
659 } else { /* priority tagged */
660 qpc->pri_path.vlan_control =
661 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
662 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
665 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
666 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
667 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
668 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
669 qpc->pri_path.sched_queue &= 0xC7;
670 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
672 if (vp_oper->state.spoofchk) {
673 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
674 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
679 static int mpt_mask(struct mlx4_dev *dev)
681 return dev->caps.num_mpts - 1;
684 static void *find_res(struct mlx4_dev *dev, u64 res_id,
685 enum mlx4_resource type)
687 struct mlx4_priv *priv = mlx4_priv(dev);
689 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
693 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
694 enum mlx4_resource type,
697 struct res_common *r;
700 spin_lock_irq(mlx4_tlock(dev));
701 r = find_res(dev, res_id, type);
707 if (r->state == RES_ANY_BUSY) {
712 if (r->owner != slave) {
717 r->from_state = r->state;
718 r->state = RES_ANY_BUSY;
721 *((struct res_common **)res) = r;
724 spin_unlock_irq(mlx4_tlock(dev));
728 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
729 enum mlx4_resource type,
730 u64 res_id, int *slave)
733 struct res_common *r;
739 spin_lock(mlx4_tlock(dev));
741 r = find_res(dev, id, type);
746 spin_unlock(mlx4_tlock(dev));
751 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
752 enum mlx4_resource type)
754 struct res_common *r;
756 spin_lock_irq(mlx4_tlock(dev));
757 r = find_res(dev, res_id, type);
759 r->state = r->from_state;
760 spin_unlock_irq(mlx4_tlock(dev));
763 static struct res_common *alloc_qp_tr(int id)
767 ret = kzalloc(sizeof *ret, GFP_KERNEL);
771 ret->com.res_id = id;
772 ret->com.state = RES_QP_RESERVED;
774 INIT_LIST_HEAD(&ret->mcg_list);
775 spin_lock_init(&ret->mcg_spl);
776 atomic_set(&ret->ref_count, 0);
781 static struct res_common *alloc_mtt_tr(int id, int order)
785 ret = kzalloc(sizeof *ret, GFP_KERNEL);
789 ret->com.res_id = id;
791 ret->com.state = RES_MTT_ALLOCATED;
792 atomic_set(&ret->ref_count, 0);
797 static struct res_common *alloc_mpt_tr(int id, int key)
801 ret = kzalloc(sizeof *ret, GFP_KERNEL);
805 ret->com.res_id = id;
806 ret->com.state = RES_MPT_RESERVED;
812 static struct res_common *alloc_eq_tr(int id)
816 ret = kzalloc(sizeof *ret, GFP_KERNEL);
820 ret->com.res_id = id;
821 ret->com.state = RES_EQ_RESERVED;
826 static struct res_common *alloc_cq_tr(int id)
830 ret = kzalloc(sizeof *ret, GFP_KERNEL);
834 ret->com.res_id = id;
835 ret->com.state = RES_CQ_ALLOCATED;
836 atomic_set(&ret->ref_count, 0);
841 static struct res_common *alloc_srq_tr(int id)
845 ret = kzalloc(sizeof *ret, GFP_KERNEL);
849 ret->com.res_id = id;
850 ret->com.state = RES_SRQ_ALLOCATED;
851 atomic_set(&ret->ref_count, 0);
856 static struct res_common *alloc_counter_tr(int id)
858 struct res_counter *ret;
860 ret = kzalloc(sizeof *ret, GFP_KERNEL);
864 ret->com.res_id = id;
865 ret->com.state = RES_COUNTER_ALLOCATED;
870 static struct res_common *alloc_xrcdn_tr(int id)
872 struct res_xrcdn *ret;
874 ret = kzalloc(sizeof *ret, GFP_KERNEL);
878 ret->com.res_id = id;
879 ret->com.state = RES_XRCD_ALLOCATED;
884 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
886 struct res_fs_rule *ret;
888 ret = kzalloc(sizeof *ret, GFP_KERNEL);
892 ret->com.res_id = id;
893 ret->com.state = RES_FS_RULE_ALLOCATED;
898 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
901 struct res_common *ret;
905 ret = alloc_qp_tr(id);
908 ret = alloc_mpt_tr(id, extra);
911 ret = alloc_mtt_tr(id, extra);
914 ret = alloc_eq_tr(id);
917 ret = alloc_cq_tr(id);
920 ret = alloc_srq_tr(id);
923 printk(KERN_ERR "implementation missing\n");
926 ret = alloc_counter_tr(id);
929 ret = alloc_xrcdn_tr(id);
932 ret = alloc_fs_rule_tr(id, extra);
943 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
944 enum mlx4_resource type, int extra)
948 struct mlx4_priv *priv = mlx4_priv(dev);
949 struct res_common **res_arr;
950 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
951 struct rb_root *root = &tracker->res_tree[type];
953 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
957 for (i = 0; i < count; ++i) {
958 res_arr[i] = alloc_tr(base + i, type, slave, extra);
960 for (--i; i >= 0; --i)
968 spin_lock_irq(mlx4_tlock(dev));
969 for (i = 0; i < count; ++i) {
970 if (find_res(dev, base + i, type)) {
974 err = res_tracker_insert(root, res_arr[i]);
977 list_add_tail(&res_arr[i]->list,
978 &tracker->slave_list[slave].res_list[type]);
980 spin_unlock_irq(mlx4_tlock(dev));
986 for (--i; i >= base; --i)
987 rb_erase(&res_arr[i]->node, root);
989 spin_unlock_irq(mlx4_tlock(dev));
991 for (i = 0; i < count; ++i)
999 static int remove_qp_ok(struct res_qp *res)
1001 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1002 !list_empty(&res->mcg_list)) {
1003 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1004 res->com.state, atomic_read(&res->ref_count));
1006 } else if (res->com.state != RES_QP_RESERVED) {
1013 static int remove_mtt_ok(struct res_mtt *res, int order)
1015 if (res->com.state == RES_MTT_BUSY ||
1016 atomic_read(&res->ref_count)) {
1017 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1019 mtt_states_str(res->com.state),
1020 atomic_read(&res->ref_count));
1022 } else if (res->com.state != RES_MTT_ALLOCATED)
1024 else if (res->order != order)
1030 static int remove_mpt_ok(struct res_mpt *res)
1032 if (res->com.state == RES_MPT_BUSY)
1034 else if (res->com.state != RES_MPT_RESERVED)
1040 static int remove_eq_ok(struct res_eq *res)
1042 if (res->com.state == RES_MPT_BUSY)
1044 else if (res->com.state != RES_MPT_RESERVED)
1050 static int remove_counter_ok(struct res_counter *res)
1052 if (res->com.state == RES_COUNTER_BUSY)
1054 else if (res->com.state != RES_COUNTER_ALLOCATED)
1060 static int remove_xrcdn_ok(struct res_xrcdn *res)
1062 if (res->com.state == RES_XRCD_BUSY)
1064 else if (res->com.state != RES_XRCD_ALLOCATED)
1070 static int remove_fs_rule_ok(struct res_fs_rule *res)
1072 if (res->com.state == RES_FS_RULE_BUSY)
1074 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1080 static int remove_cq_ok(struct res_cq *res)
1082 if (res->com.state == RES_CQ_BUSY)
1084 else if (res->com.state != RES_CQ_ALLOCATED)
1090 static int remove_srq_ok(struct res_srq *res)
1092 if (res->com.state == RES_SRQ_BUSY)
1094 else if (res->com.state != RES_SRQ_ALLOCATED)
1100 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1104 return remove_qp_ok((struct res_qp *)res);
1106 return remove_cq_ok((struct res_cq *)res);
1108 return remove_srq_ok((struct res_srq *)res);
1110 return remove_mpt_ok((struct res_mpt *)res);
1112 return remove_mtt_ok((struct res_mtt *)res, extra);
1116 return remove_eq_ok((struct res_eq *)res);
1118 return remove_counter_ok((struct res_counter *)res);
1120 return remove_xrcdn_ok((struct res_xrcdn *)res);
1122 return remove_fs_rule_ok((struct res_fs_rule *)res);
1128 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1129 enum mlx4_resource type, int extra)
1133 struct mlx4_priv *priv = mlx4_priv(dev);
1134 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1135 struct res_common *r;
1137 spin_lock_irq(mlx4_tlock(dev));
1138 for (i = base; i < base + count; ++i) {
1139 r = res_tracker_lookup(&tracker->res_tree[type], i);
1144 if (r->owner != slave) {
1148 err = remove_ok(r, type, extra);
1153 for (i = base; i < base + count; ++i) {
1154 r = res_tracker_lookup(&tracker->res_tree[type], i);
1155 rb_erase(&r->node, &tracker->res_tree[type]);
1162 spin_unlock_irq(mlx4_tlock(dev));
1167 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1168 enum res_qp_states state, struct res_qp **qp,
1171 struct mlx4_priv *priv = mlx4_priv(dev);
1172 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1176 spin_lock_irq(mlx4_tlock(dev));
1177 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1180 else if (r->com.owner != slave)
1185 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1186 __func__, r->com.res_id);
1190 case RES_QP_RESERVED:
1191 if (r->com.state == RES_QP_MAPPED && !alloc)
1194 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1199 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1200 r->com.state == RES_QP_HW)
1203 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1211 if (r->com.state != RES_QP_MAPPED)
1219 r->com.from_state = r->com.state;
1220 r->com.to_state = state;
1221 r->com.state = RES_QP_BUSY;
1227 spin_unlock_irq(mlx4_tlock(dev));
1232 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1233 enum res_mpt_states state, struct res_mpt **mpt)
1235 struct mlx4_priv *priv = mlx4_priv(dev);
1236 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1240 spin_lock_irq(mlx4_tlock(dev));
1241 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1244 else if (r->com.owner != slave)
1252 case RES_MPT_RESERVED:
1253 if (r->com.state != RES_MPT_MAPPED)
1257 case RES_MPT_MAPPED:
1258 if (r->com.state != RES_MPT_RESERVED &&
1259 r->com.state != RES_MPT_HW)
1264 if (r->com.state != RES_MPT_MAPPED)
1272 r->com.from_state = r->com.state;
1273 r->com.to_state = state;
1274 r->com.state = RES_MPT_BUSY;
1280 spin_unlock_irq(mlx4_tlock(dev));
1285 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1286 enum res_eq_states state, struct res_eq **eq)
1288 struct mlx4_priv *priv = mlx4_priv(dev);
1289 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1293 spin_lock_irq(mlx4_tlock(dev));
1294 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1297 else if (r->com.owner != slave)
1305 case RES_EQ_RESERVED:
1306 if (r->com.state != RES_EQ_HW)
1311 if (r->com.state != RES_EQ_RESERVED)
1320 r->com.from_state = r->com.state;
1321 r->com.to_state = state;
1322 r->com.state = RES_EQ_BUSY;
1328 spin_unlock_irq(mlx4_tlock(dev));
1333 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1334 enum res_cq_states state, struct res_cq **cq)
1336 struct mlx4_priv *priv = mlx4_priv(dev);
1337 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1341 spin_lock_irq(mlx4_tlock(dev));
1342 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1345 else if (r->com.owner != slave)
1353 case RES_CQ_ALLOCATED:
1354 if (r->com.state != RES_CQ_HW)
1356 else if (atomic_read(&r->ref_count))
1363 if (r->com.state != RES_CQ_ALLOCATED)
1374 r->com.from_state = r->com.state;
1375 r->com.to_state = state;
1376 r->com.state = RES_CQ_BUSY;
1382 spin_unlock_irq(mlx4_tlock(dev));
1387 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1388 enum res_cq_states state, struct res_srq **srq)
1390 struct mlx4_priv *priv = mlx4_priv(dev);
1391 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1395 spin_lock_irq(mlx4_tlock(dev));
1396 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1399 else if (r->com.owner != slave)
1407 case RES_SRQ_ALLOCATED:
1408 if (r->com.state != RES_SRQ_HW)
1410 else if (atomic_read(&r->ref_count))
1415 if (r->com.state != RES_SRQ_ALLOCATED)
1424 r->com.from_state = r->com.state;
1425 r->com.to_state = state;
1426 r->com.state = RES_SRQ_BUSY;
1432 spin_unlock_irq(mlx4_tlock(dev));
1437 static void res_abort_move(struct mlx4_dev *dev, int slave,
1438 enum mlx4_resource type, int id)
1440 struct mlx4_priv *priv = mlx4_priv(dev);
1441 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1442 struct res_common *r;
1444 spin_lock_irq(mlx4_tlock(dev));
1445 r = res_tracker_lookup(&tracker->res_tree[type], id);
1446 if (r && (r->owner == slave))
1447 r->state = r->from_state;
1448 spin_unlock_irq(mlx4_tlock(dev));
1451 static void res_end_move(struct mlx4_dev *dev, int slave,
1452 enum mlx4_resource type, int id)
1454 struct mlx4_priv *priv = mlx4_priv(dev);
1455 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1456 struct res_common *r;
1458 spin_lock_irq(mlx4_tlock(dev));
1459 r = res_tracker_lookup(&tracker->res_tree[type], id);
1460 if (r && (r->owner == slave))
1461 r->state = r->to_state;
1462 spin_unlock_irq(mlx4_tlock(dev));
1465 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1467 return mlx4_is_qp_reserved(dev, qpn) &&
1468 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1471 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1473 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1476 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1477 u64 in_param, u64 *out_param)
1486 case RES_OP_RESERVE:
1487 count = get_param_l(&in_param);
1488 align = get_param_h(&in_param);
1489 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1493 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1495 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1499 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1501 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1502 __mlx4_qp_release_range(dev, base, count);
1505 set_param_l(out_param, base);
1507 case RES_OP_MAP_ICM:
1508 qpn = get_param_l(&in_param) & 0x7fffff;
1509 if (valid_reserved(dev, slave, qpn)) {
1510 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1515 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1520 if (!fw_reserved(dev, qpn)) {
1521 err = __mlx4_qp_alloc_icm(dev, qpn);
1523 res_abort_move(dev, slave, RES_QP, qpn);
1528 res_end_move(dev, slave, RES_QP, qpn);
1538 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1539 u64 in_param, u64 *out_param)
1545 if (op != RES_OP_RESERVE_AND_MAP)
1548 order = get_param_l(&in_param);
1550 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1554 base = __mlx4_alloc_mtt_range(dev, order);
1556 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1560 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1562 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1563 __mlx4_free_mtt_range(dev, base, order);
1565 set_param_l(out_param, base);
1571 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1572 u64 in_param, u64 *out_param)
1577 struct res_mpt *mpt;
1580 case RES_OP_RESERVE:
1581 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1585 index = __mlx4_mpt_reserve(dev);
1587 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1590 id = index & mpt_mask(dev);
1592 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1594 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1595 __mlx4_mpt_release(dev, index);
1598 set_param_l(out_param, index);
1600 case RES_OP_MAP_ICM:
1601 index = get_param_l(&in_param);
1602 id = index & mpt_mask(dev);
1603 err = mr_res_start_move_to(dev, slave, id,
1604 RES_MPT_MAPPED, &mpt);
1608 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1610 res_abort_move(dev, slave, RES_MPT, id);
1614 res_end_move(dev, slave, RES_MPT, id);
1620 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1621 u64 in_param, u64 *out_param)
1627 case RES_OP_RESERVE_AND_MAP:
1628 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1632 err = __mlx4_cq_alloc_icm(dev, &cqn);
1634 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1638 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1640 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1641 __mlx4_cq_free_icm(dev, cqn);
1645 set_param_l(out_param, cqn);
1655 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1656 u64 in_param, u64 *out_param)
1662 case RES_OP_RESERVE_AND_MAP:
1663 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1667 err = __mlx4_srq_alloc_icm(dev, &srqn);
1669 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1673 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1675 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1676 __mlx4_srq_free_icm(dev, srqn);
1680 set_param_l(out_param, srqn);
1690 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1692 struct mlx4_priv *priv = mlx4_priv(dev);
1693 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1694 struct mac_res *res;
1696 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1698 res = kzalloc(sizeof *res, GFP_KERNEL);
1700 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1704 res->port = (u8) port;
1705 list_add_tail(&res->list,
1706 &tracker->slave_list[slave].res_list[RES_MAC]);
1710 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1713 struct mlx4_priv *priv = mlx4_priv(dev);
1714 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1715 struct list_head *mac_list =
1716 &tracker->slave_list[slave].res_list[RES_MAC];
1717 struct mac_res *res, *tmp;
1719 list_for_each_entry_safe(res, tmp, mac_list, list) {
1720 if (res->mac == mac && res->port == (u8) port) {
1721 list_del(&res->list);
1722 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1729 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1731 struct mlx4_priv *priv = mlx4_priv(dev);
1732 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1733 struct list_head *mac_list =
1734 &tracker->slave_list[slave].res_list[RES_MAC];
1735 struct mac_res *res, *tmp;
1737 list_for_each_entry_safe(res, tmp, mac_list, list) {
1738 list_del(&res->list);
1739 __mlx4_unregister_mac(dev, res->port, res->mac);
1740 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1745 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1746 u64 in_param, u64 *out_param, int in_port)
1752 if (op != RES_OP_RESERVE_AND_MAP)
1755 port = !in_port ? get_param_l(out_param) : in_port;
1758 err = __mlx4_register_mac(dev, port, mac);
1760 set_param_l(out_param, err);
1765 err = mac_add_to_slave(dev, slave, mac, port);
1767 __mlx4_unregister_mac(dev, port, mac);
1772 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1773 int port, int vlan_index)
1775 struct mlx4_priv *priv = mlx4_priv(dev);
1776 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1777 struct list_head *vlan_list =
1778 &tracker->slave_list[slave].res_list[RES_VLAN];
1779 struct vlan_res *res, *tmp;
1781 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1782 if (res->vlan == vlan && res->port == (u8) port) {
1783 /* vlan found. update ref count */
1789 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1791 res = kzalloc(sizeof(*res), GFP_KERNEL);
1793 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1797 res->port = (u8) port;
1798 res->vlan_index = vlan_index;
1800 list_add_tail(&res->list,
1801 &tracker->slave_list[slave].res_list[RES_VLAN]);
1806 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1809 struct mlx4_priv *priv = mlx4_priv(dev);
1810 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1811 struct list_head *vlan_list =
1812 &tracker->slave_list[slave].res_list[RES_VLAN];
1813 struct vlan_res *res, *tmp;
1815 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1816 if (res->vlan == vlan && res->port == (u8) port) {
1817 if (!--res->ref_count) {
1818 list_del(&res->list);
1819 mlx4_release_resource(dev, slave, RES_VLAN,
1828 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1830 struct mlx4_priv *priv = mlx4_priv(dev);
1831 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1832 struct list_head *vlan_list =
1833 &tracker->slave_list[slave].res_list[RES_VLAN];
1834 struct vlan_res *res, *tmp;
1837 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1838 list_del(&res->list);
1839 /* dereference the vlan the num times the slave referenced it */
1840 for (i = 0; i < res->ref_count; i++)
1841 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1842 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1847 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1848 u64 in_param, u64 *out_param, int in_port)
1850 struct mlx4_priv *priv = mlx4_priv(dev);
1851 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1857 port = !in_port ? get_param_l(out_param) : in_port;
1859 if (!port || op != RES_OP_RESERVE_AND_MAP)
1862 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1863 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1864 slave_state[slave].old_vlan_api = true;
1868 vlan = (u16) in_param;
1870 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1872 set_param_l(out_param, (u32) vlan_index);
1873 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1875 __mlx4_unregister_vlan(dev, port, vlan);
1880 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1881 u64 in_param, u64 *out_param)
1886 if (op != RES_OP_RESERVE)
1889 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1893 err = __mlx4_counter_alloc(dev, &index);
1895 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1899 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1901 __mlx4_counter_free(dev, index);
1902 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1904 set_param_l(out_param, index);
1910 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1911 u64 in_param, u64 *out_param)
1916 if (op != RES_OP_RESERVE)
1919 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1923 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1925 __mlx4_xrcd_free(dev, xrcdn);
1927 set_param_l(out_param, xrcdn);
1932 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1933 struct mlx4_vhcr *vhcr,
1934 struct mlx4_cmd_mailbox *inbox,
1935 struct mlx4_cmd_mailbox *outbox,
1936 struct mlx4_cmd_info *cmd)
1939 int alop = vhcr->op_modifier;
1941 switch (vhcr->in_modifier & 0xFF) {
1943 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1944 vhcr->in_param, &vhcr->out_param);
1948 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1949 vhcr->in_param, &vhcr->out_param);
1953 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1954 vhcr->in_param, &vhcr->out_param);
1958 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1959 vhcr->in_param, &vhcr->out_param);
1963 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1964 vhcr->in_param, &vhcr->out_param);
1968 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1969 vhcr->in_param, &vhcr->out_param,
1970 (vhcr->in_modifier >> 8) & 0xFF);
1974 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1975 vhcr->in_param, &vhcr->out_param,
1976 (vhcr->in_modifier >> 8) & 0xFF);
1980 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1981 vhcr->in_param, &vhcr->out_param);
1985 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1986 vhcr->in_param, &vhcr->out_param);
1997 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2006 case RES_OP_RESERVE:
2007 base = get_param_l(&in_param) & 0x7fffff;
2008 count = get_param_h(&in_param);
2009 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2012 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2013 __mlx4_qp_release_range(dev, base, count);
2015 case RES_OP_MAP_ICM:
2016 qpn = get_param_l(&in_param) & 0x7fffff;
2017 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2022 if (!fw_reserved(dev, qpn))
2023 __mlx4_qp_free_icm(dev, qpn);
2025 res_end_move(dev, slave, RES_QP, qpn);
2027 if (valid_reserved(dev, slave, qpn))
2028 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2037 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2038 u64 in_param, u64 *out_param)
2044 if (op != RES_OP_RESERVE_AND_MAP)
2047 base = get_param_l(&in_param);
2048 order = get_param_h(&in_param);
2049 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2051 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2052 __mlx4_free_mtt_range(dev, base, order);
2057 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2063 struct res_mpt *mpt;
2066 case RES_OP_RESERVE:
2067 index = get_param_l(&in_param);
2068 id = index & mpt_mask(dev);
2069 err = get_res(dev, slave, id, RES_MPT, &mpt);
2073 put_res(dev, slave, id, RES_MPT);
2075 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2078 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2079 __mlx4_mpt_release(dev, index);
2081 case RES_OP_MAP_ICM:
2082 index = get_param_l(&in_param);
2083 id = index & mpt_mask(dev);
2084 err = mr_res_start_move_to(dev, slave, id,
2085 RES_MPT_RESERVED, &mpt);
2089 __mlx4_mpt_free_icm(dev, mpt->key);
2090 res_end_move(dev, slave, RES_MPT, id);
2100 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2101 u64 in_param, u64 *out_param)
2107 case RES_OP_RESERVE_AND_MAP:
2108 cqn = get_param_l(&in_param);
2109 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2113 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2114 __mlx4_cq_free_icm(dev, cqn);
2125 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2126 u64 in_param, u64 *out_param)
2132 case RES_OP_RESERVE_AND_MAP:
2133 srqn = get_param_l(&in_param);
2134 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2138 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2139 __mlx4_srq_free_icm(dev, srqn);
2150 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2151 u64 in_param, u64 *out_param, int in_port)
2157 case RES_OP_RESERVE_AND_MAP:
2158 port = !in_port ? get_param_l(out_param) : in_port;
2159 mac_del_from_slave(dev, slave, in_param, port);
2160 __mlx4_unregister_mac(dev, port, in_param);
2171 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2172 u64 in_param, u64 *out_param, int port)
2174 struct mlx4_priv *priv = mlx4_priv(dev);
2175 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2179 case RES_OP_RESERVE_AND_MAP:
2180 if (slave_state[slave].old_vlan_api)
2184 vlan_del_from_slave(dev, slave, in_param, port);
2185 __mlx4_unregister_vlan(dev, port, in_param);
2195 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2196 u64 in_param, u64 *out_param)
2201 if (op != RES_OP_RESERVE)
2204 index = get_param_l(&in_param);
2205 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2209 __mlx4_counter_free(dev, index);
2210 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2215 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2216 u64 in_param, u64 *out_param)
2221 if (op != RES_OP_RESERVE)
2224 xrcdn = get_param_l(&in_param);
2225 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2229 __mlx4_xrcd_free(dev, xrcdn);
2234 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2235 struct mlx4_vhcr *vhcr,
2236 struct mlx4_cmd_mailbox *inbox,
2237 struct mlx4_cmd_mailbox *outbox,
2238 struct mlx4_cmd_info *cmd)
2241 int alop = vhcr->op_modifier;
2243 switch (vhcr->in_modifier & 0xFF) {
2245 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2250 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2251 vhcr->in_param, &vhcr->out_param);
2255 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2260 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2261 vhcr->in_param, &vhcr->out_param);
2265 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2266 vhcr->in_param, &vhcr->out_param);
2270 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2271 vhcr->in_param, &vhcr->out_param,
2272 (vhcr->in_modifier >> 8) & 0xFF);
2276 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2277 vhcr->in_param, &vhcr->out_param,
2278 (vhcr->in_modifier >> 8) & 0xFF);
2282 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2283 vhcr->in_param, &vhcr->out_param);
2287 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2288 vhcr->in_param, &vhcr->out_param);
2296 /* ugly but other choices are uglier */
2297 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2299 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2302 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2304 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2307 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2309 return be32_to_cpu(mpt->mtt_sz);
2312 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2314 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2317 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2319 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2322 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2324 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2327 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2329 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2332 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2334 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2337 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2339 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2342 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2344 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2345 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2346 int log_sq_sride = qpc->sq_size_stride & 7;
2347 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2348 int log_rq_stride = qpc->rq_size_stride & 7;
2349 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2350 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2351 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2352 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2357 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2359 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2360 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2361 total_mem = sq_size + rq_size;
2363 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2369 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2370 int size, struct res_mtt *mtt)
2372 int res_start = mtt->com.res_id;
2373 int res_size = (1 << mtt->order);
2375 if (start < res_start || start + size > res_start + res_size)
2380 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2381 struct mlx4_vhcr *vhcr,
2382 struct mlx4_cmd_mailbox *inbox,
2383 struct mlx4_cmd_mailbox *outbox,
2384 struct mlx4_cmd_info *cmd)
2387 int index = vhcr->in_modifier;
2388 struct res_mtt *mtt;
2389 struct res_mpt *mpt;
2390 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2396 id = index & mpt_mask(dev);
2397 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2401 /* Disable memory windows for VFs. */
2402 if (!mr_is_region(inbox->buf)) {
2407 /* Make sure that the PD bits related to the slave id are zeros. */
2408 pd = mr_get_pd(inbox->buf);
2409 pd_slave = (pd >> 17) & 0x7f;
2410 if (pd_slave != 0 && pd_slave != slave) {
2415 if (mr_is_fmr(inbox->buf)) {
2416 /* FMR and Bind Enable are forbidden in slave devices. */
2417 if (mr_is_bind_enabled(inbox->buf)) {
2421 /* FMR and Memory Windows are also forbidden. */
2422 if (!mr_is_region(inbox->buf)) {
2428 phys = mr_phys_mpt(inbox->buf);
2430 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2434 err = check_mtt_range(dev, slave, mtt_base,
2435 mr_get_mtt_size(inbox->buf), mtt);
2442 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2447 atomic_inc(&mtt->ref_count);
2448 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2451 res_end_move(dev, slave, RES_MPT, id);
2456 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2458 res_abort_move(dev, slave, RES_MPT, id);
2463 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2464 struct mlx4_vhcr *vhcr,
2465 struct mlx4_cmd_mailbox *inbox,
2466 struct mlx4_cmd_mailbox *outbox,
2467 struct mlx4_cmd_info *cmd)
2470 int index = vhcr->in_modifier;
2471 struct res_mpt *mpt;
2474 id = index & mpt_mask(dev);
2475 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2479 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2484 atomic_dec(&mpt->mtt->ref_count);
2486 res_end_move(dev, slave, RES_MPT, id);
2490 res_abort_move(dev, slave, RES_MPT, id);
2495 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2496 struct mlx4_vhcr *vhcr,
2497 struct mlx4_cmd_mailbox *inbox,
2498 struct mlx4_cmd_mailbox *outbox,
2499 struct mlx4_cmd_info *cmd)
2502 int index = vhcr->in_modifier;
2503 struct res_mpt *mpt;
2506 id = index & mpt_mask(dev);
2507 err = get_res(dev, slave, id, RES_MPT, &mpt);
2511 if (mpt->com.from_state != RES_MPT_HW) {
2516 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2519 put_res(dev, slave, id, RES_MPT);
2523 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2525 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2528 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2530 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2533 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2535 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2538 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2539 struct mlx4_qp_context *context)
2541 u32 qpn = vhcr->in_modifier & 0xffffff;
2544 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2547 /* adjust qkey in qp context */
2548 context->qkey = cpu_to_be32(qkey);
2551 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2552 struct mlx4_vhcr *vhcr,
2553 struct mlx4_cmd_mailbox *inbox,
2554 struct mlx4_cmd_mailbox *outbox,
2555 struct mlx4_cmd_info *cmd)
2558 int qpn = vhcr->in_modifier & 0x7fffff;
2559 struct res_mtt *mtt;
2561 struct mlx4_qp_context *qpc = inbox->buf + 8;
2562 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2563 int mtt_size = qp_get_mtt_size(qpc);
2566 int rcqn = qp_get_rcqn(qpc);
2567 int scqn = qp_get_scqn(qpc);
2568 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2569 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2570 struct res_srq *srq;
2571 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2573 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2576 qp->local_qpn = local_qpn;
2577 qp->sched_queue = 0;
2579 qp->vlan_control = 0;
2581 qp->pri_path_fl = 0;
2584 qp->qpc_flags = be32_to_cpu(qpc->flags);
2586 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2590 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2594 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2599 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2606 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2611 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2612 update_pkey_index(dev, slave, inbox);
2613 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2616 atomic_inc(&mtt->ref_count);
2618 atomic_inc(&rcq->ref_count);
2620 atomic_inc(&scq->ref_count);
2624 put_res(dev, slave, scqn, RES_CQ);
2627 atomic_inc(&srq->ref_count);
2628 put_res(dev, slave, srqn, RES_SRQ);
2631 put_res(dev, slave, rcqn, RES_CQ);
2632 put_res(dev, slave, mtt_base, RES_MTT);
2633 res_end_move(dev, slave, RES_QP, qpn);
2639 put_res(dev, slave, srqn, RES_SRQ);
2642 put_res(dev, slave, scqn, RES_CQ);
2644 put_res(dev, slave, rcqn, RES_CQ);
2646 put_res(dev, slave, mtt_base, RES_MTT);
2648 res_abort_move(dev, slave, RES_QP, qpn);
2653 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2655 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2658 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2660 int log_eq_size = eqc->log_eq_size & 0x1f;
2661 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2663 if (log_eq_size + 5 < page_shift)
2666 return 1 << (log_eq_size + 5 - page_shift);
2669 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2671 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2674 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2676 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2677 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2679 if (log_cq_size + 5 < page_shift)
2682 return 1 << (log_cq_size + 5 - page_shift);
2685 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2686 struct mlx4_vhcr *vhcr,
2687 struct mlx4_cmd_mailbox *inbox,
2688 struct mlx4_cmd_mailbox *outbox,
2689 struct mlx4_cmd_info *cmd)
2692 int eqn = vhcr->in_modifier;
2693 int res_id = (slave << 8) | eqn;
2694 struct mlx4_eq_context *eqc = inbox->buf;
2695 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2696 int mtt_size = eq_get_mtt_size(eqc);
2698 struct res_mtt *mtt;
2700 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2703 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2707 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2711 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2715 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2719 atomic_inc(&mtt->ref_count);
2721 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2722 res_end_move(dev, slave, RES_EQ, res_id);
2726 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2728 res_abort_move(dev, slave, RES_EQ, res_id);
2730 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2734 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2735 int len, struct res_mtt **res)
2737 struct mlx4_priv *priv = mlx4_priv(dev);
2738 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2739 struct res_mtt *mtt;
2742 spin_lock_irq(mlx4_tlock(dev));
2743 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2745 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2747 mtt->com.from_state = mtt->com.state;
2748 mtt->com.state = RES_MTT_BUSY;
2753 spin_unlock_irq(mlx4_tlock(dev));
2758 static int verify_qp_parameters(struct mlx4_dev *dev,
2759 struct mlx4_cmd_mailbox *inbox,
2760 enum qp_transition transition, u8 slave)
2763 struct mlx4_qp_context *qp_ctx;
2764 enum mlx4_qp_optpar optpar;
2766 qp_ctx = inbox->buf + 8;
2767 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2768 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2773 switch (transition) {
2774 case QP_TRANS_INIT2RTR:
2775 case QP_TRANS_RTR2RTS:
2776 case QP_TRANS_RTS2RTS:
2777 case QP_TRANS_SQD2SQD:
2778 case QP_TRANS_SQD2RTS:
2779 if (slave != mlx4_master_func_num(dev))
2780 /* slaves have only gid index 0 */
2781 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2782 if (qp_ctx->pri_path.mgid_index)
2784 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2785 if (qp_ctx->alt_path.mgid_index)
2800 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2801 struct mlx4_vhcr *vhcr,
2802 struct mlx4_cmd_mailbox *inbox,
2803 struct mlx4_cmd_mailbox *outbox,
2804 struct mlx4_cmd_info *cmd)
2806 struct mlx4_mtt mtt;
2807 __be64 *page_list = inbox->buf;
2808 u64 *pg_list = (u64 *)page_list;
2810 struct res_mtt *rmtt = NULL;
2811 int start = be64_to_cpu(page_list[0]);
2812 int npages = vhcr->in_modifier;
2815 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2819 /* Call the SW implementation of write_mtt:
2820 * - Prepare a dummy mtt struct
2821 * - Translate inbox contents to simple addresses in host endianess */
2822 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2823 we don't really use it */
2826 for (i = 0; i < npages; ++i)
2827 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2829 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2830 ((u64 *)page_list + 2));
2833 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2838 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2839 struct mlx4_vhcr *vhcr,
2840 struct mlx4_cmd_mailbox *inbox,
2841 struct mlx4_cmd_mailbox *outbox,
2842 struct mlx4_cmd_info *cmd)
2844 int eqn = vhcr->in_modifier;
2845 int res_id = eqn | (slave << 8);
2849 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2853 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2857 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2861 atomic_dec(&eq->mtt->ref_count);
2862 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2863 res_end_move(dev, slave, RES_EQ, res_id);
2864 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2869 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2871 res_abort_move(dev, slave, RES_EQ, res_id);
2876 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2878 struct mlx4_priv *priv = mlx4_priv(dev);
2879 struct mlx4_slave_event_eq_info *event_eq;
2880 struct mlx4_cmd_mailbox *mailbox;
2881 u32 in_modifier = 0;
2886 if (!priv->mfunc.master.slave_state)
2889 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2891 /* Create the event only if the slave is registered */
2892 if (event_eq->eqn < 0)
2895 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2896 res_id = (slave << 8) | event_eq->eqn;
2897 err = get_res(dev, slave, res_id, RES_EQ, &req);
2901 if (req->com.from_state != RES_EQ_HW) {
2906 mailbox = mlx4_alloc_cmd_mailbox(dev);
2907 if (IS_ERR(mailbox)) {
2908 err = PTR_ERR(mailbox);
2912 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2914 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2917 memcpy(mailbox->buf, (u8 *) eqe, 28);
2919 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2921 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2922 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2925 put_res(dev, slave, res_id, RES_EQ);
2926 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2927 mlx4_free_cmd_mailbox(dev, mailbox);
2931 put_res(dev, slave, res_id, RES_EQ);
2934 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2938 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2939 struct mlx4_vhcr *vhcr,
2940 struct mlx4_cmd_mailbox *inbox,
2941 struct mlx4_cmd_mailbox *outbox,
2942 struct mlx4_cmd_info *cmd)
2944 int eqn = vhcr->in_modifier;
2945 int res_id = eqn | (slave << 8);
2949 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2953 if (eq->com.from_state != RES_EQ_HW) {
2958 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2961 put_res(dev, slave, res_id, RES_EQ);
2965 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2966 struct mlx4_vhcr *vhcr,
2967 struct mlx4_cmd_mailbox *inbox,
2968 struct mlx4_cmd_mailbox *outbox,
2969 struct mlx4_cmd_info *cmd)
2972 int cqn = vhcr->in_modifier;
2973 struct mlx4_cq_context *cqc = inbox->buf;
2974 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2976 struct res_mtt *mtt;
2978 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2981 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2984 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2987 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2990 atomic_inc(&mtt->ref_count);
2992 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2993 res_end_move(dev, slave, RES_CQ, cqn);
2997 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2999 res_abort_move(dev, slave, RES_CQ, cqn);
3003 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3004 struct mlx4_vhcr *vhcr,
3005 struct mlx4_cmd_mailbox *inbox,
3006 struct mlx4_cmd_mailbox *outbox,
3007 struct mlx4_cmd_info *cmd)
3010 int cqn = vhcr->in_modifier;
3013 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3016 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3019 atomic_dec(&cq->mtt->ref_count);
3020 res_end_move(dev, slave, RES_CQ, cqn);
3024 res_abort_move(dev, slave, RES_CQ, cqn);
3028 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3029 struct mlx4_vhcr *vhcr,
3030 struct mlx4_cmd_mailbox *inbox,
3031 struct mlx4_cmd_mailbox *outbox,
3032 struct mlx4_cmd_info *cmd)
3034 int cqn = vhcr->in_modifier;
3038 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3042 if (cq->com.from_state != RES_CQ_HW)
3045 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3047 put_res(dev, slave, cqn, RES_CQ);
3052 static int handle_resize(struct mlx4_dev *dev, int slave,
3053 struct mlx4_vhcr *vhcr,
3054 struct mlx4_cmd_mailbox *inbox,
3055 struct mlx4_cmd_mailbox *outbox,
3056 struct mlx4_cmd_info *cmd,
3060 struct res_mtt *orig_mtt;
3061 struct res_mtt *mtt;
3062 struct mlx4_cq_context *cqc = inbox->buf;
3063 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3065 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3069 if (orig_mtt != cq->mtt) {
3074 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3078 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3081 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3084 atomic_dec(&orig_mtt->ref_count);
3085 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3086 atomic_inc(&mtt->ref_count);
3088 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3092 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3094 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3100 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3101 struct mlx4_vhcr *vhcr,
3102 struct mlx4_cmd_mailbox *inbox,
3103 struct mlx4_cmd_mailbox *outbox,
3104 struct mlx4_cmd_info *cmd)
3106 int cqn = vhcr->in_modifier;
3110 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3114 if (cq->com.from_state != RES_CQ_HW)
3117 if (vhcr->op_modifier == 0) {
3118 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3122 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3124 put_res(dev, slave, cqn, RES_CQ);
3129 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3131 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3132 int log_rq_stride = srqc->logstride & 7;
3133 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3135 if (log_srq_size + log_rq_stride + 4 < page_shift)
3138 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3141 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3142 struct mlx4_vhcr *vhcr,
3143 struct mlx4_cmd_mailbox *inbox,
3144 struct mlx4_cmd_mailbox *outbox,
3145 struct mlx4_cmd_info *cmd)
3148 int srqn = vhcr->in_modifier;
3149 struct res_mtt *mtt;
3150 struct res_srq *srq;
3151 struct mlx4_srq_context *srqc = inbox->buf;
3152 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3154 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3157 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3160 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3163 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3168 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3172 atomic_inc(&mtt->ref_count);
3174 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3175 res_end_move(dev, slave, RES_SRQ, srqn);
3179 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3181 res_abort_move(dev, slave, RES_SRQ, srqn);
3186 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3187 struct mlx4_vhcr *vhcr,
3188 struct mlx4_cmd_mailbox *inbox,
3189 struct mlx4_cmd_mailbox *outbox,
3190 struct mlx4_cmd_info *cmd)
3193 int srqn = vhcr->in_modifier;
3194 struct res_srq *srq;
3196 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3199 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3202 atomic_dec(&srq->mtt->ref_count);
3204 atomic_dec(&srq->cq->ref_count);
3205 res_end_move(dev, slave, RES_SRQ, srqn);
3210 res_abort_move(dev, slave, RES_SRQ, srqn);
3215 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3216 struct mlx4_vhcr *vhcr,
3217 struct mlx4_cmd_mailbox *inbox,
3218 struct mlx4_cmd_mailbox *outbox,
3219 struct mlx4_cmd_info *cmd)
3222 int srqn = vhcr->in_modifier;
3223 struct res_srq *srq;
3225 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3228 if (srq->com.from_state != RES_SRQ_HW) {
3232 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3234 put_res(dev, slave, srqn, RES_SRQ);
3238 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3239 struct mlx4_vhcr *vhcr,
3240 struct mlx4_cmd_mailbox *inbox,
3241 struct mlx4_cmd_mailbox *outbox,
3242 struct mlx4_cmd_info *cmd)
3245 int srqn = vhcr->in_modifier;
3246 struct res_srq *srq;
3248 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3252 if (srq->com.from_state != RES_SRQ_HW) {
3257 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3259 put_res(dev, slave, srqn, RES_SRQ);
3263 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3264 struct mlx4_vhcr *vhcr,
3265 struct mlx4_cmd_mailbox *inbox,
3266 struct mlx4_cmd_mailbox *outbox,
3267 struct mlx4_cmd_info *cmd)
3270 int qpn = vhcr->in_modifier & 0x7fffff;
3273 err = get_res(dev, slave, qpn, RES_QP, &qp);
3276 if (qp->com.from_state != RES_QP_HW) {
3281 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3283 put_res(dev, slave, qpn, RES_QP);
3287 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3288 struct mlx4_vhcr *vhcr,
3289 struct mlx4_cmd_mailbox *inbox,
3290 struct mlx4_cmd_mailbox *outbox,
3291 struct mlx4_cmd_info *cmd)
3293 struct mlx4_qp_context *context = inbox->buf + 8;
3294 adjust_proxy_tun_qkey(dev, vhcr, context);
3295 update_pkey_index(dev, slave, inbox);
3296 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3299 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3300 struct mlx4_vhcr *vhcr,
3301 struct mlx4_cmd_mailbox *inbox,
3302 struct mlx4_cmd_mailbox *outbox,
3303 struct mlx4_cmd_info *cmd)
3306 struct mlx4_qp_context *qpc = inbox->buf + 8;
3307 int qpn = vhcr->in_modifier & 0x7fffff;
3309 u8 orig_sched_queue;
3310 __be32 orig_param3 = qpc->param3;
3311 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3312 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3313 u8 orig_pri_path_fl = qpc->pri_path.fl;
3314 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3315 u8 orig_feup = qpc->pri_path.feup;
3317 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3321 update_pkey_index(dev, slave, inbox);
3322 update_gid(dev, inbox, (u8)slave);
3323 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3324 orig_sched_queue = qpc->pri_path.sched_queue;
3325 err = update_vport_qp_param(dev, inbox, slave, qpn);
3329 err = get_res(dev, slave, qpn, RES_QP, &qp);
3332 if (qp->com.from_state != RES_QP_HW) {
3337 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3339 /* if no error, save sched queue value passed in by VF. This is
3340 * essentially the QOS value provided by the VF. This will be useful
3341 * if we allow dynamic changes from VST back to VGT
3344 qp->sched_queue = orig_sched_queue;
3345 qp->param3 = orig_param3;
3346 qp->vlan_control = orig_vlan_control;
3347 qp->fvl_rx = orig_fvl_rx;
3348 qp->pri_path_fl = orig_pri_path_fl;
3349 qp->vlan_index = orig_vlan_index;
3350 qp->feup = orig_feup;
3352 put_res(dev, slave, qpn, RES_QP);
3356 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3357 struct mlx4_vhcr *vhcr,
3358 struct mlx4_cmd_mailbox *inbox,
3359 struct mlx4_cmd_mailbox *outbox,
3360 struct mlx4_cmd_info *cmd)
3363 struct mlx4_qp_context *context = inbox->buf + 8;
3365 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3369 update_pkey_index(dev, slave, inbox);
3370 update_gid(dev, inbox, (u8)slave);
3371 adjust_proxy_tun_qkey(dev, vhcr, context);
3372 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3375 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3376 struct mlx4_vhcr *vhcr,
3377 struct mlx4_cmd_mailbox *inbox,
3378 struct mlx4_cmd_mailbox *outbox,
3379 struct mlx4_cmd_info *cmd)
3382 struct mlx4_qp_context *context = inbox->buf + 8;
3384 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3388 update_pkey_index(dev, slave, inbox);
3389 update_gid(dev, inbox, (u8)slave);
3390 adjust_proxy_tun_qkey(dev, vhcr, context);
3391 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3395 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3396 struct mlx4_vhcr *vhcr,
3397 struct mlx4_cmd_mailbox *inbox,
3398 struct mlx4_cmd_mailbox *outbox,
3399 struct mlx4_cmd_info *cmd)
3401 struct mlx4_qp_context *context = inbox->buf + 8;
3402 adjust_proxy_tun_qkey(dev, vhcr, context);
3403 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3406 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3407 struct mlx4_vhcr *vhcr,
3408 struct mlx4_cmd_mailbox *inbox,
3409 struct mlx4_cmd_mailbox *outbox,
3410 struct mlx4_cmd_info *cmd)
3413 struct mlx4_qp_context *context = inbox->buf + 8;
3415 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3419 adjust_proxy_tun_qkey(dev, vhcr, context);
3420 update_gid(dev, inbox, (u8)slave);
3421 update_pkey_index(dev, slave, inbox);
3422 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3425 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3426 struct mlx4_vhcr *vhcr,
3427 struct mlx4_cmd_mailbox *inbox,
3428 struct mlx4_cmd_mailbox *outbox,
3429 struct mlx4_cmd_info *cmd)
3432 struct mlx4_qp_context *context = inbox->buf + 8;
3434 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3438 adjust_proxy_tun_qkey(dev, vhcr, context);
3439 update_gid(dev, inbox, (u8)slave);
3440 update_pkey_index(dev, slave, inbox);
3441 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3444 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3445 struct mlx4_vhcr *vhcr,
3446 struct mlx4_cmd_mailbox *inbox,
3447 struct mlx4_cmd_mailbox *outbox,
3448 struct mlx4_cmd_info *cmd)
3451 int qpn = vhcr->in_modifier & 0x7fffff;
3454 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3457 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3461 atomic_dec(&qp->mtt->ref_count);
3462 atomic_dec(&qp->rcq->ref_count);
3463 atomic_dec(&qp->scq->ref_count);
3465 atomic_dec(&qp->srq->ref_count);
3466 res_end_move(dev, slave, RES_QP, qpn);
3470 res_abort_move(dev, slave, RES_QP, qpn);
3475 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3476 struct res_qp *rqp, u8 *gid)
3478 struct res_gid *res;
3480 list_for_each_entry(res, &rqp->mcg_list, list) {
3481 if (!memcmp(res->gid, gid, 16))
3487 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3488 u8 *gid, enum mlx4_protocol prot,
3489 enum mlx4_steer_type steer, u64 reg_id)
3491 struct res_gid *res;
3494 res = kzalloc(sizeof *res, GFP_KERNEL);
3498 spin_lock_irq(&rqp->mcg_spl);
3499 if (find_gid(dev, slave, rqp, gid)) {
3503 memcpy(res->gid, gid, 16);
3506 res->reg_id = reg_id;
3507 list_add_tail(&res->list, &rqp->mcg_list);
3510 spin_unlock_irq(&rqp->mcg_spl);
3515 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3516 u8 *gid, enum mlx4_protocol prot,
3517 enum mlx4_steer_type steer, u64 *reg_id)
3519 struct res_gid *res;
3522 spin_lock_irq(&rqp->mcg_spl);
3523 res = find_gid(dev, slave, rqp, gid);
3524 if (!res || res->prot != prot || res->steer != steer)
3527 *reg_id = res->reg_id;
3528 list_del(&res->list);
3532 spin_unlock_irq(&rqp->mcg_spl);
3537 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3538 int block_loopback, enum mlx4_protocol prot,
3539 enum mlx4_steer_type type, u64 *reg_id)
3541 switch (dev->caps.steering_mode) {
3542 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3543 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3544 block_loopback, prot,
3546 case MLX4_STEERING_MODE_B0:
3547 return mlx4_qp_attach_common(dev, qp, gid,
3548 block_loopback, prot, type);
3554 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3555 enum mlx4_protocol prot, enum mlx4_steer_type type,
3558 switch (dev->caps.steering_mode) {
3559 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3560 return mlx4_flow_detach(dev, reg_id);
3561 case MLX4_STEERING_MODE_B0:
3562 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3568 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3569 struct mlx4_vhcr *vhcr,
3570 struct mlx4_cmd_mailbox *inbox,
3571 struct mlx4_cmd_mailbox *outbox,
3572 struct mlx4_cmd_info *cmd)
3574 struct mlx4_qp qp; /* dummy for calling attach/detach */
3575 u8 *gid = inbox->buf;
3576 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3581 int attach = vhcr->op_modifier;
3582 int block_loopback = vhcr->in_modifier >> 31;
3583 u8 steer_type_mask = 2;
3584 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3586 qpn = vhcr->in_modifier & 0xffffff;
3587 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3593 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3596 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3599 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3603 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3607 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3609 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3612 put_res(dev, slave, qpn, RES_QP);
3616 qp_detach(dev, &qp, gid, prot, type, reg_id);
3618 put_res(dev, slave, qpn, RES_QP);
3623 * MAC validation for Flow Steering rules.
3624 * VF can attach rules only with a mac address which is assigned to it.
3626 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3627 struct list_head *rlist)
3629 struct mac_res *res, *tmp;
3632 /* make sure it isn't multicast or broadcast mac*/
3633 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3634 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3635 list_for_each_entry_safe(res, tmp, rlist, list) {
3636 be_mac = cpu_to_be64(res->mac << 16);
3637 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3640 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3641 eth_header->eth.dst_mac, slave);
3648 * In case of missing eth header, append eth header with a MAC address
3649 * assigned to the VF.
3651 static int add_eth_header(struct mlx4_dev *dev, int slave,
3652 struct mlx4_cmd_mailbox *inbox,
3653 struct list_head *rlist, int header_id)
3655 struct mac_res *res, *tmp;
3657 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3658 struct mlx4_net_trans_rule_hw_eth *eth_header;
3659 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3660 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3662 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3664 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3666 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3668 /* Clear a space in the inbox for eth header */
3669 switch (header_id) {
3670 case MLX4_NET_TRANS_RULE_ID_IPV4:
3672 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3673 memmove(ip_header, eth_header,
3674 sizeof(*ip_header) + sizeof(*l4_header));
3676 case MLX4_NET_TRANS_RULE_ID_TCP:
3677 case MLX4_NET_TRANS_RULE_ID_UDP:
3678 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3680 memmove(l4_header, eth_header, sizeof(*l4_header));
3685 list_for_each_entry_safe(res, tmp, rlist, list) {
3686 if (port == res->port) {
3687 be_mac = cpu_to_be64(res->mac << 16);
3692 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3697 memset(eth_header, 0, sizeof(*eth_header));
3698 eth_header->size = sizeof(*eth_header) >> 2;
3699 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3700 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3701 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3707 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3708 struct mlx4_vhcr *vhcr,
3709 struct mlx4_cmd_mailbox *inbox,
3710 struct mlx4_cmd_mailbox *outbox,
3711 struct mlx4_cmd_info *cmd)
3714 struct mlx4_priv *priv = mlx4_priv(dev);
3715 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3716 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3720 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3721 struct _rule_hw *rule_header;
3724 if (dev->caps.steering_mode !=
3725 MLX4_STEERING_MODE_DEVICE_MANAGED)
3728 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3729 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3730 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3732 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3735 rule_header = (struct _rule_hw *)(ctrl + 1);
3736 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3738 switch (header_id) {
3739 case MLX4_NET_TRANS_RULE_ID_ETH:
3740 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3745 case MLX4_NET_TRANS_RULE_ID_IB:
3747 case MLX4_NET_TRANS_RULE_ID_IPV4:
3748 case MLX4_NET_TRANS_RULE_ID_TCP:
3749 case MLX4_NET_TRANS_RULE_ID_UDP:
3750 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3751 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3755 vhcr->in_modifier +=
3756 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3759 pr_err("Corrupted mailbox.\n");
3764 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3765 vhcr->in_modifier, 0,
3766 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3771 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3773 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3775 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3776 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3780 atomic_inc(&rqp->ref_count);
3782 put_res(dev, slave, qpn, RES_QP);
3786 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3787 struct mlx4_vhcr *vhcr,
3788 struct mlx4_cmd_mailbox *inbox,
3789 struct mlx4_cmd_mailbox *outbox,
3790 struct mlx4_cmd_info *cmd)
3794 struct res_fs_rule *rrule;
3796 if (dev->caps.steering_mode !=
3797 MLX4_STEERING_MODE_DEVICE_MANAGED)
3800 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3803 /* Release the rule form busy state before removal */
3804 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3805 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3809 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3811 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3815 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3816 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3819 atomic_dec(&rqp->ref_count);
3821 put_res(dev, slave, rrule->qpn, RES_QP);
3826 BUSY_MAX_RETRIES = 10
3829 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3830 struct mlx4_vhcr *vhcr,
3831 struct mlx4_cmd_mailbox *inbox,
3832 struct mlx4_cmd_mailbox *outbox,
3833 struct mlx4_cmd_info *cmd)
3836 int index = vhcr->in_modifier & 0xffff;
3838 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3842 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3843 put_res(dev, slave, index, RES_COUNTER);
3847 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
3848 struct mlx4_vhcr *vhcr,
3849 struct mlx4_cmd_mailbox *inbox,
3850 struct mlx4_cmd_mailbox *outbox,
3851 struct mlx4_cmd_info *cmd)
3857 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3859 struct res_gid *rgid;
3860 struct res_gid *tmp;
3861 struct mlx4_qp qp; /* dummy for calling attach/detach */
3863 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3864 switch (dev->caps.steering_mode) {
3865 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3866 mlx4_flow_detach(dev, rgid->reg_id);
3868 case MLX4_STEERING_MODE_B0:
3869 qp.qpn = rqp->local_qpn;
3870 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3871 rgid->prot, rgid->steer);
3874 list_del(&rgid->list);
3879 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3880 enum mlx4_resource type, int print)
3882 struct mlx4_priv *priv = mlx4_priv(dev);
3883 struct mlx4_resource_tracker *tracker =
3884 &priv->mfunc.master.res_tracker;
3885 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3886 struct res_common *r;
3887 struct res_common *tmp;
3891 spin_lock_irq(mlx4_tlock(dev));
3892 list_for_each_entry_safe(r, tmp, rlist, list) {
3893 if (r->owner == slave) {
3895 if (r->state == RES_ANY_BUSY) {
3898 "%s id 0x%llx is busy\n",
3903 r->from_state = r->state;
3904 r->state = RES_ANY_BUSY;
3910 spin_unlock_irq(mlx4_tlock(dev));
3915 static int move_all_busy(struct mlx4_dev *dev, int slave,
3916 enum mlx4_resource type)
3918 unsigned long begin;
3923 busy = _move_all_busy(dev, slave, type, 0);
3924 if (time_after(jiffies, begin + 5 * HZ))
3931 busy = _move_all_busy(dev, slave, type, 1);
3935 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3937 struct mlx4_priv *priv = mlx4_priv(dev);
3938 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3939 struct list_head *qp_list =
3940 &tracker->slave_list[slave].res_list[RES_QP];
3948 err = move_all_busy(dev, slave, RES_QP);
3950 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3951 "for slave %d\n", slave);
3953 spin_lock_irq(mlx4_tlock(dev));
3954 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3955 spin_unlock_irq(mlx4_tlock(dev));
3956 if (qp->com.owner == slave) {
3957 qpn = qp->com.res_id;
3958 detach_qp(dev, slave, qp);
3959 state = qp->com.from_state;
3960 while (state != 0) {
3962 case RES_QP_RESERVED:
3963 spin_lock_irq(mlx4_tlock(dev));
3964 rb_erase(&qp->com.node,
3965 &tracker->res_tree[RES_QP]);
3966 list_del(&qp->com.list);
3967 spin_unlock_irq(mlx4_tlock(dev));
3968 if (!valid_reserved(dev, slave, qpn)) {
3969 __mlx4_qp_release_range(dev, qpn, 1);
3970 mlx4_release_resource(dev, slave,
3977 if (!valid_reserved(dev, slave, qpn))
3978 __mlx4_qp_free_icm(dev, qpn);
3979 state = RES_QP_RESERVED;
3983 err = mlx4_cmd(dev, in_param,
3986 MLX4_CMD_TIME_CLASS_A,
3989 mlx4_dbg(dev, "rem_slave_qps: failed"
3990 " to move slave %d qpn %d to"
3993 atomic_dec(&qp->rcq->ref_count);
3994 atomic_dec(&qp->scq->ref_count);
3995 atomic_dec(&qp->mtt->ref_count);
3997 atomic_dec(&qp->srq->ref_count);
3998 state = RES_QP_MAPPED;
4005 spin_lock_irq(mlx4_tlock(dev));
4007 spin_unlock_irq(mlx4_tlock(dev));
4010 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4012 struct mlx4_priv *priv = mlx4_priv(dev);
4013 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4014 struct list_head *srq_list =
4015 &tracker->slave_list[slave].res_list[RES_SRQ];
4016 struct res_srq *srq;
4017 struct res_srq *tmp;
4024 err = move_all_busy(dev, slave, RES_SRQ);
4026 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
4027 "busy for slave %d\n", slave);
4029 spin_lock_irq(mlx4_tlock(dev));
4030 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4031 spin_unlock_irq(mlx4_tlock(dev));
4032 if (srq->com.owner == slave) {
4033 srqn = srq->com.res_id;
4034 state = srq->com.from_state;
4035 while (state != 0) {
4037 case RES_SRQ_ALLOCATED:
4038 __mlx4_srq_free_icm(dev, srqn);
4039 spin_lock_irq(mlx4_tlock(dev));
4040 rb_erase(&srq->com.node,
4041 &tracker->res_tree[RES_SRQ]);
4042 list_del(&srq->com.list);
4043 spin_unlock_irq(mlx4_tlock(dev));
4044 mlx4_release_resource(dev, slave,
4052 err = mlx4_cmd(dev, in_param, srqn, 1,
4054 MLX4_CMD_TIME_CLASS_A,
4057 mlx4_dbg(dev, "rem_slave_srqs: failed"
4058 " to move slave %d srq %d to"
4062 atomic_dec(&srq->mtt->ref_count);
4064 atomic_dec(&srq->cq->ref_count);
4065 state = RES_SRQ_ALLOCATED;
4073 spin_lock_irq(mlx4_tlock(dev));
4075 spin_unlock_irq(mlx4_tlock(dev));
4078 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4080 struct mlx4_priv *priv = mlx4_priv(dev);
4081 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4082 struct list_head *cq_list =
4083 &tracker->slave_list[slave].res_list[RES_CQ];
4092 err = move_all_busy(dev, slave, RES_CQ);
4094 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
4095 "busy for slave %d\n", slave);
4097 spin_lock_irq(mlx4_tlock(dev));
4098 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4099 spin_unlock_irq(mlx4_tlock(dev));
4100 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4101 cqn = cq->com.res_id;
4102 state = cq->com.from_state;
4103 while (state != 0) {
4105 case RES_CQ_ALLOCATED:
4106 __mlx4_cq_free_icm(dev, cqn);
4107 spin_lock_irq(mlx4_tlock(dev));
4108 rb_erase(&cq->com.node,
4109 &tracker->res_tree[RES_CQ]);
4110 list_del(&cq->com.list);
4111 spin_unlock_irq(mlx4_tlock(dev));
4112 mlx4_release_resource(dev, slave,
4120 err = mlx4_cmd(dev, in_param, cqn, 1,
4122 MLX4_CMD_TIME_CLASS_A,
4125 mlx4_dbg(dev, "rem_slave_cqs: failed"
4126 " to move slave %d cq %d to"
4129 atomic_dec(&cq->mtt->ref_count);
4130 state = RES_CQ_ALLOCATED;
4138 spin_lock_irq(mlx4_tlock(dev));
4140 spin_unlock_irq(mlx4_tlock(dev));
4143 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4145 struct mlx4_priv *priv = mlx4_priv(dev);
4146 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4147 struct list_head *mpt_list =
4148 &tracker->slave_list[slave].res_list[RES_MPT];
4149 struct res_mpt *mpt;
4150 struct res_mpt *tmp;
4157 err = move_all_busy(dev, slave, RES_MPT);
4159 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4160 "busy for slave %d\n", slave);
4162 spin_lock_irq(mlx4_tlock(dev));
4163 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4164 spin_unlock_irq(mlx4_tlock(dev));
4165 if (mpt->com.owner == slave) {
4166 mptn = mpt->com.res_id;
4167 state = mpt->com.from_state;
4168 while (state != 0) {
4170 case RES_MPT_RESERVED:
4171 __mlx4_mpt_release(dev, mpt->key);
4172 spin_lock_irq(mlx4_tlock(dev));
4173 rb_erase(&mpt->com.node,
4174 &tracker->res_tree[RES_MPT]);
4175 list_del(&mpt->com.list);
4176 spin_unlock_irq(mlx4_tlock(dev));
4177 mlx4_release_resource(dev, slave,
4183 case RES_MPT_MAPPED:
4184 __mlx4_mpt_free_icm(dev, mpt->key);
4185 state = RES_MPT_RESERVED;
4190 err = mlx4_cmd(dev, in_param, mptn, 0,
4192 MLX4_CMD_TIME_CLASS_A,
4195 mlx4_dbg(dev, "rem_slave_mrs: failed"
4196 " to move slave %d mpt %d to"
4200 atomic_dec(&mpt->mtt->ref_count);
4201 state = RES_MPT_MAPPED;
4208 spin_lock_irq(mlx4_tlock(dev));
4210 spin_unlock_irq(mlx4_tlock(dev));
4213 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4215 struct mlx4_priv *priv = mlx4_priv(dev);
4216 struct mlx4_resource_tracker *tracker =
4217 &priv->mfunc.master.res_tracker;
4218 struct list_head *mtt_list =
4219 &tracker->slave_list[slave].res_list[RES_MTT];
4220 struct res_mtt *mtt;
4221 struct res_mtt *tmp;
4227 err = move_all_busy(dev, slave, RES_MTT);
4229 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4230 "busy for slave %d\n", slave);
4232 spin_lock_irq(mlx4_tlock(dev));
4233 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4234 spin_unlock_irq(mlx4_tlock(dev));
4235 if (mtt->com.owner == slave) {
4236 base = mtt->com.res_id;
4237 state = mtt->com.from_state;
4238 while (state != 0) {
4240 case RES_MTT_ALLOCATED:
4241 __mlx4_free_mtt_range(dev, base,
4243 spin_lock_irq(mlx4_tlock(dev));
4244 rb_erase(&mtt->com.node,
4245 &tracker->res_tree[RES_MTT]);
4246 list_del(&mtt->com.list);
4247 spin_unlock_irq(mlx4_tlock(dev));
4248 mlx4_release_resource(dev, slave, RES_MTT,
4249 1 << mtt->order, 0);
4259 spin_lock_irq(mlx4_tlock(dev));
4261 spin_unlock_irq(mlx4_tlock(dev));
4264 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4266 struct mlx4_priv *priv = mlx4_priv(dev);
4267 struct mlx4_resource_tracker *tracker =
4268 &priv->mfunc.master.res_tracker;
4269 struct list_head *fs_rule_list =
4270 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4271 struct res_fs_rule *fs_rule;
4272 struct res_fs_rule *tmp;
4277 err = move_all_busy(dev, slave, RES_FS_RULE);
4279 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4282 spin_lock_irq(mlx4_tlock(dev));
4283 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4284 spin_unlock_irq(mlx4_tlock(dev));
4285 if (fs_rule->com.owner == slave) {
4286 base = fs_rule->com.res_id;
4287 state = fs_rule->com.from_state;
4288 while (state != 0) {
4290 case RES_FS_RULE_ALLOCATED:
4292 err = mlx4_cmd(dev, base, 0, 0,
4293 MLX4_QP_FLOW_STEERING_DETACH,
4294 MLX4_CMD_TIME_CLASS_A,
4297 spin_lock_irq(mlx4_tlock(dev));
4298 rb_erase(&fs_rule->com.node,
4299 &tracker->res_tree[RES_FS_RULE]);
4300 list_del(&fs_rule->com.list);
4301 spin_unlock_irq(mlx4_tlock(dev));
4311 spin_lock_irq(mlx4_tlock(dev));
4313 spin_unlock_irq(mlx4_tlock(dev));
4316 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4318 struct mlx4_priv *priv = mlx4_priv(dev);
4319 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4320 struct list_head *eq_list =
4321 &tracker->slave_list[slave].res_list[RES_EQ];
4328 struct mlx4_cmd_mailbox *mailbox;
4330 err = move_all_busy(dev, slave, RES_EQ);
4332 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4333 "busy for slave %d\n", slave);
4335 spin_lock_irq(mlx4_tlock(dev));
4336 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4337 spin_unlock_irq(mlx4_tlock(dev));
4338 if (eq->com.owner == slave) {
4339 eqn = eq->com.res_id;
4340 state = eq->com.from_state;
4341 while (state != 0) {
4343 case RES_EQ_RESERVED:
4344 spin_lock_irq(mlx4_tlock(dev));
4345 rb_erase(&eq->com.node,
4346 &tracker->res_tree[RES_EQ]);
4347 list_del(&eq->com.list);
4348 spin_unlock_irq(mlx4_tlock(dev));
4354 mailbox = mlx4_alloc_cmd_mailbox(dev);
4355 if (IS_ERR(mailbox)) {
4359 err = mlx4_cmd_box(dev, slave, 0,
4362 MLX4_CMD_TIME_CLASS_A,
4365 mlx4_dbg(dev, "rem_slave_eqs: failed"
4366 " to move slave %d eqs %d to"
4367 " SW ownership\n", slave, eqn);
4368 mlx4_free_cmd_mailbox(dev, mailbox);
4369 atomic_dec(&eq->mtt->ref_count);
4370 state = RES_EQ_RESERVED;
4378 spin_lock_irq(mlx4_tlock(dev));
4380 spin_unlock_irq(mlx4_tlock(dev));
4383 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4385 struct mlx4_priv *priv = mlx4_priv(dev);
4386 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4387 struct list_head *counter_list =
4388 &tracker->slave_list[slave].res_list[RES_COUNTER];
4389 struct res_counter *counter;
4390 struct res_counter *tmp;
4394 err = move_all_busy(dev, slave, RES_COUNTER);
4396 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4397 "busy for slave %d\n", slave);
4399 spin_lock_irq(mlx4_tlock(dev));
4400 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4401 if (counter->com.owner == slave) {
4402 index = counter->com.res_id;
4403 rb_erase(&counter->com.node,
4404 &tracker->res_tree[RES_COUNTER]);
4405 list_del(&counter->com.list);
4407 __mlx4_counter_free(dev, index);
4408 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4411 spin_unlock_irq(mlx4_tlock(dev));
4414 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4416 struct mlx4_priv *priv = mlx4_priv(dev);
4417 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4418 struct list_head *xrcdn_list =
4419 &tracker->slave_list[slave].res_list[RES_XRCD];
4420 struct res_xrcdn *xrcd;
4421 struct res_xrcdn *tmp;
4425 err = move_all_busy(dev, slave, RES_XRCD);
4427 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4428 "busy for slave %d\n", slave);
4430 spin_lock_irq(mlx4_tlock(dev));
4431 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4432 if (xrcd->com.owner == slave) {
4433 xrcdn = xrcd->com.res_id;
4434 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4435 list_del(&xrcd->com.list);
4437 __mlx4_xrcd_free(dev, xrcdn);
4440 spin_unlock_irq(mlx4_tlock(dev));
4443 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4445 struct mlx4_priv *priv = mlx4_priv(dev);
4447 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4448 rem_slave_vlans(dev, slave);
4449 rem_slave_macs(dev, slave);
4450 rem_slave_fs_rule(dev, slave);
4451 rem_slave_qps(dev, slave);
4452 rem_slave_srqs(dev, slave);
4453 rem_slave_cqs(dev, slave);
4454 rem_slave_mrs(dev, slave);
4455 rem_slave_eqs(dev, slave);
4456 rem_slave_mtts(dev, slave);
4457 rem_slave_counters(dev, slave);
4458 rem_slave_xrcdns(dev, slave);
4459 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4462 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4464 struct mlx4_vf_immed_vlan_work *work =
4465 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4466 struct mlx4_cmd_mailbox *mailbox;
4467 struct mlx4_update_qp_context *upd_context;
4468 struct mlx4_dev *dev = &work->priv->dev;
4469 struct mlx4_resource_tracker *tracker =
4470 &work->priv->mfunc.master.res_tracker;
4471 struct list_head *qp_list =
4472 &tracker->slave_list[work->slave].res_list[RES_QP];
4475 u64 qp_path_mask_vlan_ctrl =
4476 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4477 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4478 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4479 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4480 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4481 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4483 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4484 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4485 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4486 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4487 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4488 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4489 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4492 int port, errors = 0;
4495 if (mlx4_is_slave(dev)) {
4496 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4501 mailbox = mlx4_alloc_cmd_mailbox(dev);
4502 if (IS_ERR(mailbox))
4504 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4505 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4506 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4507 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4508 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4509 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4510 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4511 else if (!work->vlan_id)
4512 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4513 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4515 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4516 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4517 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4519 upd_context = mailbox->buf;
4520 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
4522 spin_lock_irq(mlx4_tlock(dev));
4523 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4524 spin_unlock_irq(mlx4_tlock(dev));
4525 if (qp->com.owner == work->slave) {
4526 if (qp->com.from_state != RES_QP_HW ||
4527 !qp->sched_queue || /* no INIT2RTR trans yet */
4528 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4529 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4530 spin_lock_irq(mlx4_tlock(dev));
4533 port = (qp->sched_queue >> 6 & 1) + 1;
4534 if (port != work->port) {
4535 spin_lock_irq(mlx4_tlock(dev));
4538 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4539 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4541 upd_context->primary_addr_path_mask =
4542 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4543 if (work->vlan_id == MLX4_VGT) {
4544 upd_context->qp_context.param3 = qp->param3;
4545 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4546 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4547 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4548 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4549 upd_context->qp_context.pri_path.feup = qp->feup;
4550 upd_context->qp_context.pri_path.sched_queue =
4553 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4554 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4555 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4556 upd_context->qp_context.pri_path.fvl_rx =
4557 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4558 upd_context->qp_context.pri_path.fl =
4559 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4560 upd_context->qp_context.pri_path.feup =
4561 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4562 upd_context->qp_context.pri_path.sched_queue =
4563 qp->sched_queue & 0xC7;
4564 upd_context->qp_context.pri_path.sched_queue |=
4565 ((work->qos & 0x7) << 3);
4568 err = mlx4_cmd(dev, mailbox->dma,
4569 qp->local_qpn & 0xffffff,
4570 0, MLX4_CMD_UPDATE_QP,
4571 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4573 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4574 "port %d, qpn %d (%d)\n",
4575 work->slave, port, qp->local_qpn,
4580 spin_lock_irq(mlx4_tlock(dev));
4582 spin_unlock_irq(mlx4_tlock(dev));
4583 mlx4_free_cmd_mailbox(dev, mailbox);
4586 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4587 errors, work->slave, work->port);
4589 /* unregister previous vlan_id if needed and we had no errors
4590 * while updating the QPs
4592 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4593 NO_INDX != work->orig_vlan_ix)
4594 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4595 work->orig_vlan_id);