2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/bpf.h>
35 #include <linux/etherdevice.h>
36 #include <linux/tcp.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/slab.h>
40 #include <linux/hash.h>
42 #include <net/busy_poll.h>
43 #include <net/vxlan.h>
44 #include <net/devlink.h>
46 #include <linux/mlx4/driver.h>
47 #include <linux/mlx4/device.h>
48 #include <linux/mlx4/cmd.h>
49 #include <linux/mlx4/cq.h>
54 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
56 struct mlx4_en_priv *priv = netdev_priv(dev);
58 unsigned int offset = 0;
60 if (up && up != MLX4_EN_NUM_UP)
63 netdev_set_num_tc(dev, up);
65 /* Partition Tx queues evenly amongst UP's */
66 for (i = 0; i < up; i++) {
67 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
68 offset += priv->num_tx_rings_p_up;
71 #ifdef CONFIG_MLX4_EN_DCB
72 if (!mlx4_is_slave(priv->mdev->dev)) {
75 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
77 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
78 priv->cee_config.pfc_state = false;
81 #endif /* CONFIG_MLX4_EN_DCB */
86 static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
87 struct tc_to_netdev *tc)
89 if (tc->type != TC_SETUP_MQPRIO)
92 return mlx4_en_setup_tc(dev, tc->tc);
95 #ifdef CONFIG_RFS_ACCEL
97 struct mlx4_en_filter {
98 struct list_head next;
99 struct work_struct work;
108 struct mlx4_en_priv *priv;
109 u32 flow_id; /* RFS infrastructure id */
110 int id; /* mlx4_en driver id */
111 u64 reg_id; /* Flow steering API id */
112 u8 activated; /* Used to prevent expiry before filter
115 struct hlist_node filter_chain;
118 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
120 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
124 return MLX4_NET_TRANS_RULE_ID_UDP;
126 return MLX4_NET_TRANS_RULE_ID_TCP;
128 return MLX4_NET_TRANS_RULE_NUM;
132 static void mlx4_en_filter_work(struct work_struct *work)
134 struct mlx4_en_filter *filter = container_of(work,
135 struct mlx4_en_filter,
137 struct mlx4_en_priv *priv = filter->priv;
138 struct mlx4_spec_list spec_tcp_udp = {
139 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
142 .dst_port = filter->dst_port,
143 .dst_port_msk = (__force __be16)-1,
144 .src_port = filter->src_port,
145 .src_port_msk = (__force __be16)-1,
149 struct mlx4_spec_list spec_ip = {
150 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
153 .dst_ip = filter->dst_ip,
154 .dst_ip_msk = (__force __be32)-1,
155 .src_ip = filter->src_ip,
156 .src_ip_msk = (__force __be32)-1,
160 struct mlx4_spec_list spec_eth = {
161 .id = MLX4_NET_TRANS_RULE_ID_ETH,
163 struct mlx4_net_trans_rule rule = {
164 .list = LIST_HEAD_INIT(rule.list),
165 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
168 .promisc_mode = MLX4_FS_REGULAR,
170 .priority = MLX4_DOMAIN_RFS,
173 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
175 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
176 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
180 list_add_tail(&spec_eth.list, &rule.list);
181 list_add_tail(&spec_ip.list, &rule.list);
182 list_add_tail(&spec_tcp_udp.list, &rule.list);
184 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
185 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
186 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
188 filter->activated = 0;
190 if (filter->reg_id) {
191 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
192 if (rc && rc != -ENOENT)
193 en_err(priv, "Error detaching flow. rc = %d\n", rc);
196 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
198 en_err(priv, "Error attaching flow. err = %d\n", rc);
201 mlx4_en_filter_rfs_expire(priv);
203 filter->activated = 1;
206 static inline struct hlist_head *
207 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
208 __be16 src_port, __be16 dst_port)
213 l = (__force unsigned long)src_port |
214 ((__force unsigned long)dst_port << 2);
215 l ^= (__force unsigned long)(src_ip ^ dst_ip);
217 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
219 return &priv->filter_hash[bucket_idx];
222 static struct mlx4_en_filter *
223 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
224 __be32 dst_ip, u8 ip_proto, __be16 src_port,
225 __be16 dst_port, u32 flow_id)
227 struct mlx4_en_filter *filter = NULL;
229 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
234 filter->rxq_index = rxq_index;
235 INIT_WORK(&filter->work, mlx4_en_filter_work);
237 filter->src_ip = src_ip;
238 filter->dst_ip = dst_ip;
239 filter->ip_proto = ip_proto;
240 filter->src_port = src_port;
241 filter->dst_port = dst_port;
243 filter->flow_id = flow_id;
245 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
247 list_add_tail(&filter->next, &priv->filters);
248 hlist_add_head(&filter->filter_chain,
249 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
255 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
257 struct mlx4_en_priv *priv = filter->priv;
260 list_del(&filter->next);
262 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
263 if (rc && rc != -ENOENT)
264 en_err(priv, "Error detaching flow. rc = %d\n", rc);
269 static inline struct mlx4_en_filter *
270 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
271 u8 ip_proto, __be16 src_port, __be16 dst_port)
273 struct mlx4_en_filter *filter;
274 struct mlx4_en_filter *ret = NULL;
276 hlist_for_each_entry(filter,
277 filter_hash_bucket(priv, src_ip, dst_ip,
280 if (filter->src_ip == src_ip &&
281 filter->dst_ip == dst_ip &&
282 filter->ip_proto == ip_proto &&
283 filter->src_port == src_port &&
284 filter->dst_port == dst_port) {
294 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
295 u16 rxq_index, u32 flow_id)
297 struct mlx4_en_priv *priv = netdev_priv(net_dev);
298 struct mlx4_en_filter *filter;
299 const struct iphdr *ip;
306 int nhoff = skb_network_offset(skb);
309 if (skb->protocol != htons(ETH_P_IP))
310 return -EPROTONOSUPPORT;
312 ip = (const struct iphdr *)(skb->data + nhoff);
313 if (ip_is_fragment(ip))
314 return -EPROTONOSUPPORT;
316 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
317 return -EPROTONOSUPPORT;
318 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
320 ip_proto = ip->protocol;
326 spin_lock_bh(&priv->filters_lock);
327 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
330 if (filter->rxq_index == rxq_index)
333 filter->rxq_index = rxq_index;
335 filter = mlx4_en_filter_alloc(priv, rxq_index,
336 src_ip, dst_ip, ip_proto,
337 src_port, dst_port, flow_id);
344 queue_work(priv->mdev->workqueue, &filter->work);
349 spin_unlock_bh(&priv->filters_lock);
354 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
356 struct mlx4_en_filter *filter, *tmp;
359 spin_lock_bh(&priv->filters_lock);
360 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
361 list_move(&filter->next, &del_list);
362 hlist_del(&filter->filter_chain);
364 spin_unlock_bh(&priv->filters_lock);
366 list_for_each_entry_safe(filter, tmp, &del_list, next) {
367 cancel_work_sync(&filter->work);
368 mlx4_en_filter_free(filter);
372 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
374 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
378 spin_lock_bh(&priv->filters_lock);
379 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
380 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
383 if (filter->activated &&
384 !work_pending(&filter->work) &&
385 rps_may_expire_flow(priv->dev,
386 filter->rxq_index, filter->flow_id,
388 list_move(&filter->next, &del_list);
389 hlist_del(&filter->filter_chain);
391 last_filter = filter;
396 if (last_filter && (&last_filter->next != priv->filters.next))
397 list_move(&priv->filters, &last_filter->next);
399 spin_unlock_bh(&priv->filters_lock);
401 list_for_each_entry_safe(filter, tmp, &del_list, next)
402 mlx4_en_filter_free(filter);
406 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
407 __be16 proto, u16 vid)
409 struct mlx4_en_priv *priv = netdev_priv(dev);
410 struct mlx4_en_dev *mdev = priv->mdev;
414 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
416 set_bit(vid, priv->active_vlans);
418 /* Add VID to port VLAN filter */
419 mutex_lock(&mdev->state_lock);
420 if (mdev->device_up && priv->port_up) {
421 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
423 en_err(priv, "Failed configuring VLAN filter\n");
427 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
429 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
432 mutex_unlock(&mdev->state_lock);
436 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
437 __be16 proto, u16 vid)
439 struct mlx4_en_priv *priv = netdev_priv(dev);
440 struct mlx4_en_dev *mdev = priv->mdev;
443 en_dbg(HW, priv, "Killing VID:%d\n", vid);
445 clear_bit(vid, priv->active_vlans);
447 /* Remove VID from port VLAN filter */
448 mutex_lock(&mdev->state_lock);
449 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
451 if (mdev->device_up && priv->port_up) {
452 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
454 en_err(priv, "Failed configuring VLAN filter\n");
456 mutex_unlock(&mdev->state_lock);
461 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
464 for (i = ETH_ALEN - 1; i >= 0; --i) {
465 dst_mac[i] = src_mac & 0xff;
468 memset(&dst_mac[ETH_ALEN], 0, 2);
472 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
473 int qpn, u64 *reg_id)
477 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
478 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
479 return 0; /* do nothing */
481 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
482 MLX4_DOMAIN_NIC, reg_id);
484 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
487 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
492 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
493 unsigned char *mac, int *qpn, u64 *reg_id)
495 struct mlx4_en_dev *mdev = priv->mdev;
496 struct mlx4_dev *dev = mdev->dev;
499 switch (dev->caps.steering_mode) {
500 case MLX4_STEERING_MODE_B0: {
505 memcpy(&gid[10], mac, ETH_ALEN);
508 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
511 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
512 struct mlx4_spec_list spec_eth = { {NULL} };
513 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
515 struct mlx4_net_trans_rule rule = {
516 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
519 .promisc_mode = MLX4_FS_REGULAR,
520 .priority = MLX4_DOMAIN_NIC,
523 rule.port = priv->port;
525 INIT_LIST_HEAD(&rule.list);
527 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
528 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
529 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
530 list_add_tail(&spec_eth.list, &rule.list);
532 err = mlx4_flow_attach(dev, &rule, reg_id);
539 en_warn(priv, "Failed Attaching Unicast\n");
544 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
545 unsigned char *mac, int qpn, u64 reg_id)
547 struct mlx4_en_dev *mdev = priv->mdev;
548 struct mlx4_dev *dev = mdev->dev;
550 switch (dev->caps.steering_mode) {
551 case MLX4_STEERING_MODE_B0: {
556 memcpy(&gid[10], mac, ETH_ALEN);
559 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
562 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
563 mlx4_flow_detach(dev, reg_id);
567 en_err(priv, "Invalid steering mode.\n");
571 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
573 struct mlx4_en_dev *mdev = priv->mdev;
574 struct mlx4_dev *dev = mdev->dev;
577 int *qpn = &priv->base_qpn;
578 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
580 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
581 priv->dev->dev_addr);
582 index = mlx4_register_mac(dev, priv->port, mac);
585 en_err(priv, "Failed adding MAC: %pM\n",
586 priv->dev->dev_addr);
590 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
591 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
592 *qpn = base_qpn + index;
596 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
597 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
599 en_err(priv, "Failed to reserve qp for mac registration\n");
600 mlx4_unregister_mac(dev, priv->port, mac);
607 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
609 struct mlx4_en_dev *mdev = priv->mdev;
610 struct mlx4_dev *dev = mdev->dev;
611 int qpn = priv->base_qpn;
613 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
614 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
615 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
616 priv->dev->dev_addr);
617 mlx4_unregister_mac(dev, priv->port, mac);
619 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
621 mlx4_qp_release_range(dev, qpn, 1);
622 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
626 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
627 unsigned char *new_mac, unsigned char *prev_mac)
629 struct mlx4_en_dev *mdev = priv->mdev;
630 struct mlx4_dev *dev = mdev->dev;
632 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
634 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
635 struct hlist_head *bucket;
636 unsigned int mac_hash;
637 struct mlx4_mac_entry *entry;
638 struct hlist_node *tmp;
639 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
641 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
642 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
643 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
644 mlx4_en_uc_steer_release(priv, entry->mac,
646 mlx4_unregister_mac(dev, priv->port,
648 hlist_del_rcu(&entry->hlist);
650 memcpy(entry->mac, new_mac, ETH_ALEN);
652 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
653 hlist_add_head_rcu(&entry->hlist,
654 &priv->mac_hash[mac_hash]);
655 mlx4_register_mac(dev, priv->port, new_mac_u64);
656 err = mlx4_en_uc_steer_add(priv, new_mac,
661 if (priv->tunnel_reg_id) {
662 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
663 priv->tunnel_reg_id = 0;
665 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
666 &priv->tunnel_reg_id);
673 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
676 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
677 unsigned char new_mac[ETH_ALEN + 2])
682 /* Remove old MAC and insert the new one */
683 err = mlx4_en_replace_mac(priv, priv->base_qpn,
684 new_mac, priv->current_mac);
686 en_err(priv, "Failed changing HW MAC address\n");
688 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
691 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
696 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
698 struct mlx4_en_priv *priv = netdev_priv(dev);
699 struct mlx4_en_dev *mdev = priv->mdev;
700 struct sockaddr *saddr = addr;
701 unsigned char new_mac[ETH_ALEN + 2];
704 if (!is_valid_ether_addr(saddr->sa_data))
705 return -EADDRNOTAVAIL;
707 mutex_lock(&mdev->state_lock);
708 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
709 err = mlx4_en_do_set_mac(priv, new_mac);
711 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
712 mutex_unlock(&mdev->state_lock);
717 static void mlx4_en_clear_list(struct net_device *dev)
719 struct mlx4_en_priv *priv = netdev_priv(dev);
720 struct mlx4_en_mc_list *tmp, *mc_to_del;
722 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
723 list_del(&mc_to_del->list);
728 static void mlx4_en_cache_mclist(struct net_device *dev)
730 struct mlx4_en_priv *priv = netdev_priv(dev);
731 struct netdev_hw_addr *ha;
732 struct mlx4_en_mc_list *tmp;
734 mlx4_en_clear_list(dev);
735 netdev_for_each_mc_addr(ha, dev) {
736 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
738 mlx4_en_clear_list(dev);
741 memcpy(tmp->addr, ha->addr, ETH_ALEN);
742 list_add_tail(&tmp->list, &priv->mc_list);
746 static void update_mclist_flags(struct mlx4_en_priv *priv,
747 struct list_head *dst,
748 struct list_head *src)
750 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
753 /* Find all the entries that should be removed from dst,
754 * These are the entries that are not found in src
756 list_for_each_entry(dst_tmp, dst, list) {
758 list_for_each_entry(src_tmp, src, list) {
759 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
765 dst_tmp->action = MCLIST_REM;
768 /* Add entries that exist in src but not in dst
769 * mark them as need to add
771 list_for_each_entry(src_tmp, src, list) {
773 list_for_each_entry(dst_tmp, dst, list) {
774 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
775 dst_tmp->action = MCLIST_NONE;
781 new_mc = kmemdup(src_tmp,
782 sizeof(struct mlx4_en_mc_list),
787 new_mc->action = MCLIST_ADD;
788 list_add_tail(&new_mc->list, dst);
793 static void mlx4_en_set_rx_mode(struct net_device *dev)
795 struct mlx4_en_priv *priv = netdev_priv(dev);
800 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
803 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
804 struct mlx4_en_dev *mdev)
808 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
809 if (netif_msg_rx_status(priv))
810 en_warn(priv, "Entering promiscuous mode\n");
811 priv->flags |= MLX4_EN_FLAG_PROMISC;
813 /* Enable promiscouos mode */
814 switch (mdev->dev->caps.steering_mode) {
815 case MLX4_STEERING_MODE_DEVICE_MANAGED:
816 err = mlx4_flow_steer_promisc_add(mdev->dev,
819 MLX4_FS_ALL_DEFAULT);
821 en_err(priv, "Failed enabling promiscuous mode\n");
822 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
825 case MLX4_STEERING_MODE_B0:
826 err = mlx4_unicast_promisc_add(mdev->dev,
830 en_err(priv, "Failed enabling unicast promiscuous mode\n");
832 /* Add the default qp number as multicast
835 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
836 err = mlx4_multicast_promisc_add(mdev->dev,
840 en_err(priv, "Failed enabling multicast promiscuous mode\n");
841 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
845 case MLX4_STEERING_MODE_A0:
846 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
851 en_err(priv, "Failed enabling promiscuous mode\n");
855 /* Disable port multicast filter (unconditionally) */
856 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
857 0, MLX4_MCAST_DISABLE);
859 en_err(priv, "Failed disabling multicast filter\n");
863 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
864 struct mlx4_en_dev *mdev)
868 if (netif_msg_rx_status(priv))
869 en_warn(priv, "Leaving promiscuous mode\n");
870 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
872 /* Disable promiscouos mode */
873 switch (mdev->dev->caps.steering_mode) {
874 case MLX4_STEERING_MODE_DEVICE_MANAGED:
875 err = mlx4_flow_steer_promisc_remove(mdev->dev,
877 MLX4_FS_ALL_DEFAULT);
879 en_err(priv, "Failed disabling promiscuous mode\n");
880 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
883 case MLX4_STEERING_MODE_B0:
884 err = mlx4_unicast_promisc_remove(mdev->dev,
888 en_err(priv, "Failed disabling unicast promiscuous mode\n");
889 /* Disable Multicast promisc */
890 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
891 err = mlx4_multicast_promisc_remove(mdev->dev,
895 en_err(priv, "Failed disabling multicast promiscuous mode\n");
896 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
900 case MLX4_STEERING_MODE_A0:
901 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
905 en_err(priv, "Failed disabling promiscuous mode\n");
910 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
911 struct net_device *dev,
912 struct mlx4_en_dev *mdev)
914 struct mlx4_en_mc_list *mclist, *tmp;
916 u8 mc_list[16] = {0};
919 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
920 if (dev->flags & IFF_ALLMULTI) {
921 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
922 0, MLX4_MCAST_DISABLE);
924 en_err(priv, "Failed disabling multicast filter\n");
926 /* Add the default qp number as multicast promisc */
927 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
928 switch (mdev->dev->caps.steering_mode) {
929 case MLX4_STEERING_MODE_DEVICE_MANAGED:
930 err = mlx4_flow_steer_promisc_add(mdev->dev,
936 case MLX4_STEERING_MODE_B0:
937 err = mlx4_multicast_promisc_add(mdev->dev,
942 case MLX4_STEERING_MODE_A0:
946 en_err(priv, "Failed entering multicast promisc mode\n");
947 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
950 /* Disable Multicast promisc */
951 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
952 switch (mdev->dev->caps.steering_mode) {
953 case MLX4_STEERING_MODE_DEVICE_MANAGED:
954 err = mlx4_flow_steer_promisc_remove(mdev->dev,
959 case MLX4_STEERING_MODE_B0:
960 err = mlx4_multicast_promisc_remove(mdev->dev,
965 case MLX4_STEERING_MODE_A0:
969 en_err(priv, "Failed disabling multicast promiscuous mode\n");
970 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
973 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
974 0, MLX4_MCAST_DISABLE);
976 en_err(priv, "Failed disabling multicast filter\n");
978 /* Flush mcast filter and init it with broadcast address */
979 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
980 1, MLX4_MCAST_CONFIG);
982 /* Update multicast list - we cache all addresses so they won't
983 * change while HW is updated holding the command semaphor */
984 netif_addr_lock_bh(dev);
985 mlx4_en_cache_mclist(dev);
986 netif_addr_unlock_bh(dev);
987 list_for_each_entry(mclist, &priv->mc_list, list) {
988 mcast_addr = mlx4_mac_to_u64(mclist->addr);
989 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
990 mcast_addr, 0, MLX4_MCAST_CONFIG);
992 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
993 0, MLX4_MCAST_ENABLE);
995 en_err(priv, "Failed enabling multicast filter\n");
997 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
998 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
999 if (mclist->action == MCLIST_REM) {
1000 /* detach this address and delete from list */
1001 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1002 mc_list[5] = priv->port;
1003 err = mlx4_multicast_detach(mdev->dev,
1004 &priv->rss_map.indir_qp,
1009 en_err(priv, "Fail to detach multicast address\n");
1011 if (mclist->tunnel_reg_id) {
1012 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1014 en_err(priv, "Failed to detach multicast address\n");
1017 /* remove from list */
1018 list_del(&mclist->list);
1020 } else if (mclist->action == MCLIST_ADD) {
1021 /* attach the address */
1022 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1023 /* needed for B0 steering support */
1024 mc_list[5] = priv->port;
1025 err = mlx4_multicast_attach(mdev->dev,
1026 &priv->rss_map.indir_qp,
1032 en_err(priv, "Fail to attach multicast address\n");
1034 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1035 &mclist->tunnel_reg_id);
1037 en_err(priv, "Failed to attach multicast address\n");
1043 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1044 struct net_device *dev,
1045 struct mlx4_en_dev *mdev)
1047 struct netdev_hw_addr *ha;
1048 struct mlx4_mac_entry *entry;
1049 struct hlist_node *tmp;
1053 struct hlist_head *bucket;
1058 /* Note that we do not need to protect our mac_hash traversal with rcu,
1059 * since all modification code is protected by mdev->state_lock
1062 /* find what to remove */
1063 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1064 bucket = &priv->mac_hash[i];
1065 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1067 netdev_for_each_uc_addr(ha, dev) {
1068 if (ether_addr_equal_64bits(entry->mac,
1075 /* MAC address of the port is not in uc list */
1076 if (ether_addr_equal_64bits(entry->mac,
1081 mac = mlx4_mac_to_u64(entry->mac);
1082 mlx4_en_uc_steer_release(priv, entry->mac,
1085 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1087 hlist_del_rcu(&entry->hlist);
1088 kfree_rcu(entry, rcu);
1089 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1090 entry->mac, priv->port);
1096 /* if we didn't remove anything, there is no use in trying to add
1097 * again once we are in a forced promisc mode state
1099 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1102 prev_flags = priv->flags;
1103 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1105 /* find what to add */
1106 netdev_for_each_uc_addr(ha, dev) {
1108 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1109 hlist_for_each_entry(entry, bucket, hlist) {
1110 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1117 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1119 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1120 ha->addr, priv->port);
1121 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1124 mac = mlx4_mac_to_u64(ha->addr);
1125 memcpy(entry->mac, ha->addr, ETH_ALEN);
1126 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1128 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1129 ha->addr, priv->port, err);
1131 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1134 err = mlx4_en_uc_steer_add(priv, ha->addr,
1138 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1139 ha->addr, priv->port, err);
1140 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1142 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1145 unsigned int mac_hash;
1146 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1147 ha->addr, priv->port);
1148 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1149 bucket = &priv->mac_hash[mac_hash];
1150 hlist_add_head_rcu(&entry->hlist, bucket);
1155 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1156 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1158 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1159 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1164 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1166 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1168 struct mlx4_en_dev *mdev = priv->mdev;
1169 struct net_device *dev = priv->dev;
1171 mutex_lock(&mdev->state_lock);
1172 if (!mdev->device_up) {
1173 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1176 if (!priv->port_up) {
1177 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1181 if (!netif_carrier_ok(dev)) {
1182 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1183 if (priv->port_state.link_state) {
1184 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1185 netif_carrier_on(dev);
1186 en_dbg(LINK, priv, "Link Up\n");
1191 if (dev->priv_flags & IFF_UNICAST_FLT)
1192 mlx4_en_do_uc_filter(priv, dev, mdev);
1194 /* Promsicuous mode: disable all filters */
1195 if ((dev->flags & IFF_PROMISC) ||
1196 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1197 mlx4_en_set_promisc_mode(priv, mdev);
1201 /* Not in promiscuous mode */
1202 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1203 mlx4_en_clear_promisc_mode(priv, mdev);
1205 mlx4_en_do_multicast(priv, dev, mdev);
1207 mutex_unlock(&mdev->state_lock);
1210 #ifdef CONFIG_NET_POLL_CONTROLLER
1211 static void mlx4_en_netpoll(struct net_device *dev)
1213 struct mlx4_en_priv *priv = netdev_priv(dev);
1214 struct mlx4_en_cq *cq;
1217 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1218 cq = priv->tx_cq[TX][i];
1219 napi_schedule(&cq->napi);
1224 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1228 int *qpn = &priv->base_qpn;
1229 struct mlx4_mac_entry *entry;
1231 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
1235 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1236 &priv->tunnel_reg_id);
1240 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1246 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1247 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1248 entry->reg_id = reg_id;
1249 hlist_add_head_rcu(&entry->hlist,
1250 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1255 if (priv->tunnel_reg_id)
1256 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1259 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1263 static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1267 int qpn = priv->base_qpn;
1268 struct hlist_head *bucket;
1269 struct hlist_node *tmp;
1270 struct mlx4_mac_entry *entry;
1272 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1273 bucket = &priv->mac_hash[i];
1274 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1275 mac = mlx4_mac_to_u64(entry->mac);
1276 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1278 mlx4_en_uc_steer_release(priv, entry->mac,
1279 qpn, entry->reg_id);
1281 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1282 hlist_del_rcu(&entry->hlist);
1283 kfree_rcu(entry, rcu);
1287 if (priv->tunnel_reg_id) {
1288 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1289 priv->tunnel_reg_id = 0;
1293 static void mlx4_en_tx_timeout(struct net_device *dev)
1295 struct mlx4_en_priv *priv = netdev_priv(dev);
1296 struct mlx4_en_dev *mdev = priv->mdev;
1299 if (netif_msg_timer(priv))
1300 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1302 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1303 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
1305 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1307 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1308 i, tx_ring->qpn, tx_ring->sp_cqn,
1309 tx_ring->cons, tx_ring->prod);
1312 priv->port_stats.tx_timeout++;
1313 en_dbg(DRV, priv, "Scheduling watchdog\n");
1314 queue_work(mdev->workqueue, &priv->watchdog_task);
1318 static struct rtnl_link_stats64 *
1319 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1321 struct mlx4_en_priv *priv = netdev_priv(dev);
1323 spin_lock_bh(&priv->stats_lock);
1324 netdev_stats_to_stats64(stats, &dev->stats);
1325 spin_unlock_bh(&priv->stats_lock);
1330 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1332 struct mlx4_en_cq *cq;
1335 /* If we haven't received a specific coalescing setting
1336 * (module param), we set the moderation parameters as follows:
1337 * - moder_cnt is set to the number of mtu sized packets to
1338 * satisfy our coalescing target.
1339 * - moder_time is set to a fixed value.
1341 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1342 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1343 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1344 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1345 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1346 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1348 /* Setup cq moderation params */
1349 for (i = 0; i < priv->rx_ring_num; i++) {
1350 cq = priv->rx_cq[i];
1351 cq->moder_cnt = priv->rx_frames;
1352 cq->moder_time = priv->rx_usecs;
1353 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1354 priv->last_moder_packets[i] = 0;
1355 priv->last_moder_bytes[i] = 0;
1358 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1359 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1360 cq = priv->tx_cq[t][i];
1361 cq->moder_cnt = priv->tx_frames;
1362 cq->moder_time = priv->tx_usecs;
1366 /* Reset auto-moderation params */
1367 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1368 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1369 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1370 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1371 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1372 priv->adaptive_rx_coal = 1;
1373 priv->last_moder_jiffies = 0;
1374 priv->last_moder_tx_packets = 0;
1377 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1379 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1380 struct mlx4_en_cq *cq;
1381 unsigned long packets;
1383 unsigned long avg_pkt_size;
1384 unsigned long rx_packets;
1385 unsigned long rx_bytes;
1386 unsigned long rx_pkt_diff;
1390 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1393 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1394 spin_lock_bh(&priv->stats_lock);
1395 rx_packets = priv->rx_ring[ring]->packets;
1396 rx_bytes = priv->rx_ring[ring]->bytes;
1397 spin_unlock_bh(&priv->stats_lock);
1399 rx_pkt_diff = ((unsigned long) (rx_packets -
1400 priv->last_moder_packets[ring]));
1401 packets = rx_pkt_diff;
1402 rate = packets * HZ / period;
1403 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1404 priv->last_moder_bytes[ring])) / packets : 0;
1406 /* Apply auto-moderation only when packet rate
1407 * exceeds a rate that it matters */
1408 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1409 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1410 if (rate < priv->pkt_rate_low)
1411 moder_time = priv->rx_usecs_low;
1412 else if (rate > priv->pkt_rate_high)
1413 moder_time = priv->rx_usecs_high;
1415 moder_time = (rate - priv->pkt_rate_low) *
1416 (priv->rx_usecs_high - priv->rx_usecs_low) /
1417 (priv->pkt_rate_high - priv->pkt_rate_low) +
1420 moder_time = priv->rx_usecs_low;
1423 if (moder_time != priv->last_moder_time[ring]) {
1424 priv->last_moder_time[ring] = moder_time;
1425 cq = priv->rx_cq[ring];
1426 cq->moder_time = moder_time;
1427 cq->moder_cnt = priv->rx_frames;
1428 err = mlx4_en_set_cq_moder(priv, cq);
1430 en_err(priv, "Failed modifying moderation for cq:%d\n",
1433 priv->last_moder_packets[ring] = rx_packets;
1434 priv->last_moder_bytes[ring] = rx_bytes;
1437 priv->last_moder_jiffies = jiffies;
1440 static void mlx4_en_do_get_stats(struct work_struct *work)
1442 struct delayed_work *delay = to_delayed_work(work);
1443 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1445 struct mlx4_en_dev *mdev = priv->mdev;
1448 mutex_lock(&mdev->state_lock);
1449 if (mdev->device_up) {
1450 if (priv->port_up) {
1451 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1453 en_dbg(HW, priv, "Could not update stats\n");
1455 mlx4_en_auto_moderation(priv);
1458 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1460 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1461 mlx4_en_do_set_mac(priv, priv->current_mac);
1462 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1464 mutex_unlock(&mdev->state_lock);
1467 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1470 static void mlx4_en_service_task(struct work_struct *work)
1472 struct delayed_work *delay = to_delayed_work(work);
1473 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1475 struct mlx4_en_dev *mdev = priv->mdev;
1477 mutex_lock(&mdev->state_lock);
1478 if (mdev->device_up) {
1479 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1480 mlx4_en_ptp_overflow_check(mdev);
1482 mlx4_en_recover_from_oom(priv);
1483 queue_delayed_work(mdev->workqueue, &priv->service_task,
1484 SERVICE_TASK_DELAY);
1486 mutex_unlock(&mdev->state_lock);
1489 static void mlx4_en_linkstate(struct work_struct *work)
1491 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1493 struct mlx4_en_dev *mdev = priv->mdev;
1494 int linkstate = priv->link_state;
1496 mutex_lock(&mdev->state_lock);
1497 /* If observable port state changed set carrier state and
1498 * report to system log */
1499 if (priv->last_link_state != linkstate) {
1500 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1501 en_info(priv, "Link Down\n");
1502 netif_carrier_off(priv->dev);
1504 en_info(priv, "Link Up\n");
1505 netif_carrier_on(priv->dev);
1508 priv->last_link_state = linkstate;
1509 mutex_unlock(&mdev->state_lock);
1512 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1514 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1515 int numa_node = priv->mdev->dev->numa_node;
1517 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1520 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1521 ring->affinity_mask);
1525 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1527 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1530 static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
1533 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
1534 int rr_index = tx_ring_idx;
1536 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
1537 tx_ring->recycle_ring = priv->rx_ring[rr_index];
1538 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
1539 TX_XDP, tx_ring_idx, rr_index);
1542 int mlx4_en_start_port(struct net_device *dev)
1544 struct mlx4_en_priv *priv = netdev_priv(dev);
1545 struct mlx4_en_dev *mdev = priv->mdev;
1546 struct mlx4_en_cq *cq;
1547 struct mlx4_en_tx_ring *tx_ring;
1552 u8 mc_list[16] = {0};
1554 if (priv->port_up) {
1555 en_dbg(DRV, priv, "start port called while port already up\n");
1559 INIT_LIST_HEAD(&priv->mc_list);
1560 INIT_LIST_HEAD(&priv->curr_list);
1561 INIT_LIST_HEAD(&priv->ethtool_list);
1562 memset(&priv->ethtool_rules[0], 0,
1563 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1565 /* Calculate Rx buf size */
1566 dev->mtu = min(dev->mtu, priv->max_mtu);
1567 mlx4_en_calc_rx_buf(dev);
1568 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1570 /* Configure rx cq's and rings */
1571 err = mlx4_en_activate_rx_rings(priv);
1573 en_err(priv, "Failed to activate RX rings\n");
1576 for (i = 0; i < priv->rx_ring_num; i++) {
1577 cq = priv->rx_cq[i];
1579 err = mlx4_en_init_affinity_hint(priv, i);
1581 en_err(priv, "Failed preparing IRQ affinity hint\n");
1585 err = mlx4_en_activate_cq(priv, cq, i);
1587 en_err(priv, "Failed activating Rx CQ\n");
1588 mlx4_en_free_affinity_hint(priv, i);
1592 for (j = 0; j < cq->size; j++) {
1593 struct mlx4_cqe *cqe = NULL;
1595 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1597 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1600 err = mlx4_en_set_cq_moder(priv, cq);
1602 en_err(priv, "Failed setting cq moderation parameters\n");
1603 mlx4_en_deactivate_cq(priv, cq);
1604 mlx4_en_free_affinity_hint(priv, i);
1607 mlx4_en_arm_cq(priv, cq);
1608 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1613 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1614 err = mlx4_en_get_qp(priv);
1616 en_err(priv, "Failed getting eth qp\n");
1619 mdev->mac_removed[priv->port] = 0;
1621 priv->counter_index =
1622 mlx4_get_default_counter_index(mdev->dev, priv->port);
1624 err = mlx4_en_config_rss_steer(priv);
1626 en_err(priv, "Failed configuring rss steering\n");
1630 err = mlx4_en_create_drop_qp(priv);
1634 /* Configure tx cq's and rings */
1635 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1636 u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1;
1638 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1640 cq = priv->tx_cq[t][i];
1641 err = mlx4_en_activate_cq(priv, cq, i);
1643 en_err(priv, "Failed allocating Tx CQ\n");
1646 err = mlx4_en_set_cq_moder(priv, cq);
1648 en_err(priv, "Failed setting cq moderation parameters\n");
1649 mlx4_en_deactivate_cq(priv, cq);
1653 "Resetting index of collapsed CQ:%d to -1\n", i);
1654 cq->buf->wqe_index = cpu_to_be16(0xffff);
1656 /* Configure ring */
1657 tx_ring = priv->tx_ring[t][i];
1658 err = mlx4_en_activate_tx_ring(priv, tx_ring,
1660 i / num_tx_rings_p_up);
1662 en_err(priv, "Failed allocating Tx ring\n");
1663 mlx4_en_deactivate_cq(priv, cq);
1667 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1668 tx_ring->recycle_ring = NULL;
1670 mlx4_en_init_recycle_ring(priv, i);
1673 /* Arm CQ for TX completions */
1674 mlx4_en_arm_cq(priv, cq);
1676 /* Set initial ownership of all Tx TXBBs to SW (1) */
1677 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1678 *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
1682 /* Configure port */
1683 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1684 priv->rx_skb_size + ETH_FCS_LEN,
1685 priv->prof->tx_pause,
1687 priv->prof->rx_pause,
1688 priv->prof->rx_ppp);
1690 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1694 /* Set default qp number */
1695 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1697 en_err(priv, "Failed setting default qp numbers\n");
1701 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1702 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1704 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1711 en_dbg(HW, priv, "Initializing port\n");
1712 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1714 en_err(priv, "Failed Initializing port\n");
1718 /* Set Unicast and VXLAN steering rules */
1719 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1720 mlx4_en_set_rss_steer_rules(priv))
1721 mlx4_warn(mdev, "Failed setting steering rules\n");
1723 /* Attach rx QP to bradcast address */
1724 eth_broadcast_addr(&mc_list[10]);
1725 mc_list[5] = priv->port; /* needed for B0 steering support */
1726 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1727 priv->port, 0, MLX4_PROT_ETH,
1728 &priv->broadcast_id))
1729 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1731 /* Must redo promiscuous mode setup. */
1732 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1734 /* Schedule multicast task to populate multicast list */
1735 queue_work(mdev->workqueue, &priv->rx_mode_task);
1737 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1738 udp_tunnel_get_rx_info(dev);
1740 priv->port_up = true;
1742 /* Process all completions if exist to prevent
1743 * the queues freezing if they are full
1745 for (i = 0; i < priv->rx_ring_num; i++)
1746 napi_schedule(&priv->rx_cq[i]->napi);
1748 netif_tx_start_all_queues(dev);
1749 netif_device_attach(dev);
1754 if (t == MLX4_EN_NUM_TX_TYPES) {
1756 i = priv->tx_ring_num[t];
1760 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1761 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1765 i = priv->tx_ring_num[t];
1767 mlx4_en_destroy_drop_qp(priv);
1769 mlx4_en_release_rss_steer(priv);
1771 mlx4_en_put_qp(priv);
1773 while (rx_index--) {
1774 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1775 mlx4_en_free_affinity_hint(priv, rx_index);
1777 for (i = 0; i < priv->rx_ring_num; i++)
1778 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1780 return err; /* need to close devices */
1784 void mlx4_en_stop_port(struct net_device *dev, int detach)
1786 struct mlx4_en_priv *priv = netdev_priv(dev);
1787 struct mlx4_en_dev *mdev = priv->mdev;
1788 struct mlx4_en_mc_list *mclist, *tmp;
1789 struct ethtool_flow_id *flow, *tmp_flow;
1791 u8 mc_list[16] = {0};
1793 if (!priv->port_up) {
1794 en_dbg(DRV, priv, "stop port called while port already down\n");
1799 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1801 /* Synchronize with tx routine */
1802 netif_tx_lock_bh(dev);
1804 netif_device_detach(dev);
1805 netif_tx_stop_all_queues(dev);
1806 netif_tx_unlock_bh(dev);
1808 netif_tx_disable(dev);
1810 /* Set port as not active */
1811 priv->port_up = false;
1812 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
1814 /* Promsicuous mode */
1815 if (mdev->dev->caps.steering_mode ==
1816 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1817 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1818 MLX4_EN_FLAG_MC_PROMISC);
1819 mlx4_flow_steer_promisc_remove(mdev->dev,
1821 MLX4_FS_ALL_DEFAULT);
1822 mlx4_flow_steer_promisc_remove(mdev->dev,
1824 MLX4_FS_MC_DEFAULT);
1825 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1826 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1828 /* Disable promiscouos mode */
1829 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1832 /* Disable Multicast promisc */
1833 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1834 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1836 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1840 /* Detach All multicasts */
1841 eth_broadcast_addr(&mc_list[10]);
1842 mc_list[5] = priv->port; /* needed for B0 steering support */
1843 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1844 MLX4_PROT_ETH, priv->broadcast_id);
1845 list_for_each_entry(mclist, &priv->curr_list, list) {
1846 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1847 mc_list[5] = priv->port;
1848 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1849 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1850 if (mclist->tunnel_reg_id)
1851 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1853 mlx4_en_clear_list(dev);
1854 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1855 list_del(&mclist->list);
1859 /* Flush multicast filter */
1860 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1862 /* Remove flow steering rules for the port*/
1863 if (mdev->dev->caps.steering_mode ==
1864 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1866 list_for_each_entry_safe(flow, tmp_flow,
1867 &priv->ethtool_list, list) {
1868 mlx4_flow_detach(mdev->dev, flow->id);
1869 list_del(&flow->list);
1873 mlx4_en_destroy_drop_qp(priv);
1876 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
1877 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1878 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1879 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1884 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
1885 for (i = 0; i < priv->tx_ring_num[t]; i++)
1886 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
1888 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1889 mlx4_en_delete_rss_steer_rules(priv);
1892 mlx4_en_release_rss_steer(priv);
1894 /* Unregister Mac address for the port */
1895 mlx4_en_put_qp(priv);
1896 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1897 mdev->mac_removed[priv->port] = 1;
1900 for (i = 0; i < priv->rx_ring_num; i++) {
1901 struct mlx4_en_cq *cq = priv->rx_cq[i];
1903 napi_synchronize(&cq->napi);
1904 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1905 mlx4_en_deactivate_cq(priv, cq);
1907 mlx4_en_free_affinity_hint(priv, i);
1911 static void mlx4_en_restart(struct work_struct *work)
1913 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1915 struct mlx4_en_dev *mdev = priv->mdev;
1916 struct net_device *dev = priv->dev;
1918 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1921 mutex_lock(&mdev->state_lock);
1922 if (priv->port_up) {
1923 mlx4_en_stop_port(dev, 1);
1924 if (mlx4_en_start_port(dev))
1925 en_err(priv, "Failed restarting port %d\n", priv->port);
1927 mutex_unlock(&mdev->state_lock);
1931 static void mlx4_en_clear_stats(struct net_device *dev)
1933 struct mlx4_en_priv *priv = netdev_priv(dev);
1934 struct mlx4_en_dev *mdev = priv->mdev;
1935 struct mlx4_en_tx_ring **tx_ring;
1938 if (!mlx4_is_slave(mdev->dev))
1939 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1940 en_dbg(HW, priv, "Failed dumping statistics\n");
1942 memset(&priv->pstats, 0, sizeof(priv->pstats));
1943 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1944 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1945 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
1946 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
1947 memset(&priv->rx_priority_flowstats, 0,
1948 sizeof(priv->rx_priority_flowstats));
1949 memset(&priv->tx_priority_flowstats, 0,
1950 sizeof(priv->tx_priority_flowstats));
1951 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
1953 tx_ring = priv->tx_ring[TX];
1954 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1955 tx_ring[i]->bytes = 0;
1956 tx_ring[i]->packets = 0;
1957 tx_ring[i]->tx_csum = 0;
1958 tx_ring[i]->tx_dropped = 0;
1959 tx_ring[i]->queue_stopped = 0;
1960 tx_ring[i]->wake_queue = 0;
1961 tx_ring[i]->tso_packets = 0;
1962 tx_ring[i]->xmit_more = 0;
1964 for (i = 0; i < priv->rx_ring_num; i++) {
1965 priv->rx_ring[i]->bytes = 0;
1966 priv->rx_ring[i]->packets = 0;
1967 priv->rx_ring[i]->csum_ok = 0;
1968 priv->rx_ring[i]->csum_none = 0;
1969 priv->rx_ring[i]->csum_complete = 0;
1973 static int mlx4_en_open(struct net_device *dev)
1975 struct mlx4_en_priv *priv = netdev_priv(dev);
1976 struct mlx4_en_dev *mdev = priv->mdev;
1979 mutex_lock(&mdev->state_lock);
1981 if (!mdev->device_up) {
1982 en_err(priv, "Cannot open - device down/disabled\n");
1987 /* Reset HW statistics and SW counters */
1988 mlx4_en_clear_stats(dev);
1990 err = mlx4_en_start_port(dev);
1992 en_err(priv, "Failed starting port:%d\n", priv->port);
1995 mutex_unlock(&mdev->state_lock);
2000 static int mlx4_en_close(struct net_device *dev)
2002 struct mlx4_en_priv *priv = netdev_priv(dev);
2003 struct mlx4_en_dev *mdev = priv->mdev;
2005 en_dbg(IFDOWN, priv, "Close port called\n");
2007 mutex_lock(&mdev->state_lock);
2009 mlx4_en_stop_port(dev, 0);
2010 netif_carrier_off(dev);
2012 mutex_unlock(&mdev->state_lock);
2016 static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
2020 #ifdef CONFIG_RFS_ACCEL
2021 priv->dev->rx_cpu_rmap = NULL;
2024 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2025 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2026 if (priv->tx_ring[t] && priv->tx_ring[t][i])
2027 mlx4_en_destroy_tx_ring(priv,
2028 &priv->tx_ring[t][i]);
2029 if (priv->tx_cq[t] && priv->tx_cq[t][i])
2030 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2034 for (i = 0; i < priv->rx_ring_num; i++) {
2035 if (priv->rx_ring[i])
2036 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2037 priv->prof->rx_ring_size, priv->stride);
2039 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2044 static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
2046 struct mlx4_en_port_profile *prof = priv->prof;
2050 /* Create tx Rings */
2051 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2052 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2053 node = cpu_to_node(i % num_online_cpus());
2054 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
2055 prof->tx_ring_size, i, t, node))
2058 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
2060 TXBB_SIZE, node, i))
2065 /* Create rx Rings */
2066 for (i = 0; i < priv->rx_ring_num; i++) {
2067 node = cpu_to_node(i % num_online_cpus());
2068 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2069 prof->rx_ring_size, i, RX, node))
2072 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2073 prof->rx_ring_size, priv->stride,
2078 #ifdef CONFIG_RFS_ACCEL
2079 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
2085 en_err(priv, "Failed to allocate NIC resources\n");
2086 for (i = 0; i < priv->rx_ring_num; i++) {
2087 if (priv->rx_ring[i])
2088 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2092 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2094 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2095 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2096 if (priv->tx_ring[t][i])
2097 mlx4_en_destroy_tx_ring(priv,
2098 &priv->tx_ring[t][i]);
2099 if (priv->tx_cq[t][i])
2100 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2106 static void mlx4_en_shutdown(struct net_device *dev)
2109 netif_device_detach(dev);
2114 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2115 struct mlx4_en_priv *src,
2116 struct mlx4_en_port_profile *prof)
2120 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2121 sizeof(dst->hwtstamp_config));
2122 dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
2123 dst->rx_ring_num = prof->rx_ring_num;
2124 dst->flags = prof->flags;
2125 dst->mdev = src->mdev;
2126 dst->port = src->port;
2127 dst->dev = src->dev;
2129 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2130 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2132 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2133 dst->tx_ring_num[t] = prof->tx_ring_num[t];
2134 if (!dst->tx_ring_num[t])
2137 dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
2138 MAX_TX_RINGS, GFP_KERNEL);
2139 if (!dst->tx_ring[t])
2142 dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
2143 MAX_TX_RINGS, GFP_KERNEL);
2144 if (!dst->tx_cq[t]) {
2145 kfree(dst->tx_ring[t]);
2154 kfree(dst->tx_ring[t]);
2155 kfree(dst->tx_cq[t]);
2160 static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2161 struct mlx4_en_priv *src)
2164 memcpy(dst->rx_ring, src->rx_ring,
2165 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2166 memcpy(dst->rx_cq, src->rx_cq,
2167 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2168 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2169 sizeof(dst->hwtstamp_config));
2170 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2171 dst->tx_ring_num[t] = src->tx_ring_num[t];
2172 dst->tx_ring[t] = src->tx_ring[t];
2173 dst->tx_cq[t] = src->tx_cq[t];
2175 dst->rx_ring_num = src->rx_ring_num;
2176 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2179 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2180 struct mlx4_en_priv *tmp,
2181 struct mlx4_en_port_profile *prof)
2185 mlx4_en_copy_priv(tmp, priv, prof);
2187 if (mlx4_en_alloc_resources(tmp)) {
2189 "%s: Resource allocation failed, using previous configuration\n",
2191 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2192 kfree(tmp->tx_ring[t]);
2193 kfree(tmp->tx_cq[t]);
2200 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2201 struct mlx4_en_priv *tmp)
2203 mlx4_en_free_resources(priv);
2204 mlx4_en_update_priv(priv, tmp);
2207 void mlx4_en_destroy_netdev(struct net_device *dev)
2209 struct mlx4_en_priv *priv = netdev_priv(dev);
2210 struct mlx4_en_dev *mdev = priv->mdev;
2211 bool shutdown = mdev->dev->persist->interface_state &
2212 MLX4_INTERFACE_STATE_SHUTDOWN;
2215 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2217 /* Unregister device - this will close the port if it was up */
2218 if (priv->registered) {
2219 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2222 mlx4_en_shutdown(dev);
2224 unregister_netdev(dev);
2227 if (priv->allocated)
2228 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2230 cancel_delayed_work(&priv->stats_task);
2231 cancel_delayed_work(&priv->service_task);
2232 /* flush any pending task for this netdev */
2233 flush_workqueue(mdev->workqueue);
2235 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2236 mlx4_en_remove_timestamp(mdev);
2238 /* Detach the netdev so tasks would not attempt to access it */
2239 mutex_lock(&mdev->state_lock);
2240 mdev->pndev[priv->port] = NULL;
2241 mdev->upper[priv->port] = NULL;
2242 mutex_unlock(&mdev->state_lock);
2244 #ifdef CONFIG_RFS_ACCEL
2245 mlx4_en_cleanup_filters(priv);
2248 mlx4_en_free_resources(priv);
2250 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2251 kfree(priv->tx_ring[t]);
2252 kfree(priv->tx_cq[t]);
2259 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2261 struct mlx4_en_priv *priv = netdev_priv(dev);
2262 struct mlx4_en_dev *mdev = priv->mdev;
2265 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2268 if (priv->tx_ring_num[TX_XDP] && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
2269 en_err(priv, "MTU size:%d requires frags but XDP running\n",
2275 if (netif_running(dev)) {
2276 mutex_lock(&mdev->state_lock);
2277 if (!mdev->device_up) {
2278 /* NIC is probably restarting - let watchdog task reset
2280 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2282 mlx4_en_stop_port(dev, 1);
2283 err = mlx4_en_start_port(dev);
2285 en_err(priv, "Failed restarting port:%d\n",
2287 queue_work(mdev->workqueue, &priv->watchdog_task);
2290 mutex_unlock(&mdev->state_lock);
2295 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2297 struct mlx4_en_priv *priv = netdev_priv(dev);
2298 struct mlx4_en_dev *mdev = priv->mdev;
2299 struct hwtstamp_config config;
2301 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2304 /* reserved for future extensions */
2308 /* device doesn't support time stamping */
2309 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2312 /* TX HW timestamp */
2313 switch (config.tx_type) {
2314 case HWTSTAMP_TX_OFF:
2315 case HWTSTAMP_TX_ON:
2321 /* RX HW timestamp */
2322 switch (config.rx_filter) {
2323 case HWTSTAMP_FILTER_NONE:
2325 case HWTSTAMP_FILTER_ALL:
2326 case HWTSTAMP_FILTER_SOME:
2327 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2328 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2329 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2330 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2331 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2332 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2333 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2334 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2335 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2336 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2337 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2338 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2339 config.rx_filter = HWTSTAMP_FILTER_ALL;
2345 if (mlx4_en_reset_config(dev, config, dev->features)) {
2346 config.tx_type = HWTSTAMP_TX_OFF;
2347 config.rx_filter = HWTSTAMP_FILTER_NONE;
2350 return copy_to_user(ifr->ifr_data, &config,
2351 sizeof(config)) ? -EFAULT : 0;
2354 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2356 struct mlx4_en_priv *priv = netdev_priv(dev);
2358 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2359 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2362 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2366 return mlx4_en_hwtstamp_set(dev, ifr);
2368 return mlx4_en_hwtstamp_get(dev, ifr);
2374 static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2375 netdev_features_t features)
2377 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2378 struct mlx4_en_dev *mdev = en_priv->mdev;
2380 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
2381 * enable/disable make sure S-TAG flag is always in same state as
2384 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2385 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2386 features |= NETIF_F_HW_VLAN_STAG_RX;
2388 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2393 static int mlx4_en_set_features(struct net_device *netdev,
2394 netdev_features_t features)
2396 struct mlx4_en_priv *priv = netdev_priv(netdev);
2400 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2401 en_info(priv, "Turn %s RX-FCS\n",
2402 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2406 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2407 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2409 en_info(priv, "Turn %s RX-ALL\n",
2410 ignore_fcs_value ? "ON" : "OFF");
2411 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2412 priv->port, ignore_fcs_value);
2417 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2418 en_info(priv, "Turn %s RX vlan strip offload\n",
2419 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2423 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2424 en_info(priv, "Turn %s TX vlan strip offload\n",
2425 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2427 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2428 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2429 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2431 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2432 en_info(priv, "Turn %s loopback\n",
2433 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2434 mlx4_en_update_loopback_state(netdev, features);
2438 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2447 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2449 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2450 struct mlx4_en_dev *mdev = en_priv->mdev;
2451 u64 mac_u64 = mlx4_mac_to_u64(mac);
2453 if (is_multicast_ether_addr(mac))
2456 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2459 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
2462 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2463 struct mlx4_en_dev *mdev = en_priv->mdev;
2465 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
2469 static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2472 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2473 struct mlx4_en_dev *mdev = en_priv->mdev;
2475 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2479 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2481 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2482 struct mlx4_en_dev *mdev = en_priv->mdev;
2484 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2487 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2489 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2490 struct mlx4_en_dev *mdev = en_priv->mdev;
2492 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2495 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2497 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2498 struct mlx4_en_dev *mdev = en_priv->mdev;
2500 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2503 static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2504 struct ifla_vf_stats *vf_stats)
2506 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2507 struct mlx4_en_dev *mdev = en_priv->mdev;
2509 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2512 #define PORT_ID_BYTE_LEN 8
2513 static int mlx4_en_get_phys_port_id(struct net_device *dev,
2514 struct netdev_phys_item_id *ppid)
2516 struct mlx4_en_priv *priv = netdev_priv(dev);
2517 struct mlx4_dev *mdev = priv->mdev->dev;
2519 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2524 ppid->id_len = sizeof(phys_port_id);
2525 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2526 ppid->id[i] = phys_port_id & 0xff;
2532 static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2535 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2538 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2542 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2543 VXLAN_STEER_BY_OUTER_MAC, 1);
2546 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2551 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2553 NETIF_F_TSO | NETIF_F_TSO6 |
2554 NETIF_F_GSO_UDP_TUNNEL |
2555 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2556 NETIF_F_GSO_PARTIAL;
2559 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2562 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2564 /* unset offloads */
2565 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2567 NETIF_F_TSO | NETIF_F_TSO6 |
2568 NETIF_F_GSO_UDP_TUNNEL |
2569 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2570 NETIF_F_GSO_PARTIAL);
2572 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2573 VXLAN_STEER_BY_OUTER_MAC, 0);
2575 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2577 priv->vxlan_port = 0;
2580 static void mlx4_en_add_vxlan_port(struct net_device *dev,
2581 struct udp_tunnel_info *ti)
2583 struct mlx4_en_priv *priv = netdev_priv(dev);
2584 __be16 port = ti->port;
2585 __be16 current_port;
2587 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2590 if (ti->sa_family != AF_INET)
2593 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2596 current_port = priv->vxlan_port;
2597 if (current_port && current_port != port) {
2598 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2599 ntohs(current_port), ntohs(port));
2603 priv->vxlan_port = port;
2604 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2607 static void mlx4_en_del_vxlan_port(struct net_device *dev,
2608 struct udp_tunnel_info *ti)
2610 struct mlx4_en_priv *priv = netdev_priv(dev);
2611 __be16 port = ti->port;
2612 __be16 current_port;
2614 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2617 if (ti->sa_family != AF_INET)
2620 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2623 current_port = priv->vxlan_port;
2624 if (current_port != port) {
2625 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2629 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2632 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2633 struct net_device *dev,
2634 netdev_features_t features)
2636 features = vlan_features_check(skb, features);
2637 features = vxlan_features_check(skb, features);
2639 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
2640 * support inner IPv6 checksums and segmentation so we need to
2641 * strip that feature if this is an IPv6 encapsulated frame.
2643 if (skb->encapsulation &&
2644 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2645 struct mlx4_en_priv *priv = netdev_priv(dev);
2647 if (!priv->vxlan_port ||
2648 (ip_hdr(skb)->version != 4) ||
2649 (udp_hdr(skb)->dest != priv->vxlan_port))
2650 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2656 static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
2658 struct mlx4_en_priv *priv = netdev_priv(dev);
2659 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
2660 struct mlx4_update_qp_params params;
2663 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2666 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2667 if (maxrate >> 12) {
2668 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2669 params.rate_val = maxrate / 1000;
2670 } else if (maxrate) {
2671 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2672 params.rate_val = maxrate;
2673 } else { /* zero serves to revoke the QP rate-limitation */
2674 params.rate_unit = 0;
2675 params.rate_val = 0;
2678 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2683 static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2685 struct mlx4_en_priv *priv = netdev_priv(dev);
2686 struct mlx4_en_dev *mdev = priv->mdev;
2687 struct mlx4_en_port_profile new_prof;
2688 struct bpf_prog *old_prog;
2689 struct mlx4_en_priv *tmp;
2696 xdp_ring_num = prog ? priv->rx_ring_num : 0;
2698 /* No need to reconfigure buffers when simply swapping the
2699 * program for a new one.
2701 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
2703 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2705 return PTR_ERR(prog);
2707 mutex_lock(&mdev->state_lock);
2708 for (i = 0; i < priv->rx_ring_num; i++) {
2709 old_prog = rcu_dereference_protected(
2710 priv->rx_ring[i]->xdp_prog,
2711 lockdep_is_held(&mdev->state_lock));
2712 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2714 bpf_prog_put(old_prog);
2716 mutex_unlock(&mdev->state_lock);
2720 if (priv->num_frags > 1) {
2721 en_err(priv, "Cannot set XDP if MTU requires multiple frags\n");
2725 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2730 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2732 err = PTR_ERR(prog);
2737 mutex_lock(&mdev->state_lock);
2738 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
2739 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
2741 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
2743 new_prof.tx_ring_num[TX] =
2744 MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP);
2745 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
2748 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
2751 bpf_prog_sub(prog, priv->rx_ring_num - 1);
2755 if (priv->port_up) {
2757 mlx4_en_stop_port(dev, 1);
2760 mlx4_en_safe_replace_resources(priv, tmp);
2762 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
2764 for (i = 0; i < priv->rx_ring_num; i++) {
2765 old_prog = rcu_dereference_protected(
2766 priv->rx_ring[i]->xdp_prog,
2767 lockdep_is_held(&mdev->state_lock));
2768 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
2770 bpf_prog_put(old_prog);
2774 err = mlx4_en_start_port(dev);
2776 en_err(priv, "Failed starting port %d for XDP change\n",
2778 queue_work(mdev->workqueue, &priv->watchdog_task);
2783 mutex_unlock(&mdev->state_lock);
2789 static bool mlx4_xdp_attached(struct net_device *dev)
2791 struct mlx4_en_priv *priv = netdev_priv(dev);
2793 return !!priv->tx_ring_num[TX_XDP];
2796 static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
2798 switch (xdp->command) {
2799 case XDP_SETUP_PROG:
2800 return mlx4_xdp_set(dev, xdp->prog);
2801 case XDP_QUERY_PROG:
2802 xdp->prog_attached = mlx4_xdp_attached(dev);
2809 static const struct net_device_ops mlx4_netdev_ops = {
2810 .ndo_open = mlx4_en_open,
2811 .ndo_stop = mlx4_en_close,
2812 .ndo_start_xmit = mlx4_en_xmit,
2813 .ndo_select_queue = mlx4_en_select_queue,
2814 .ndo_get_stats64 = mlx4_en_get_stats64,
2815 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2816 .ndo_set_mac_address = mlx4_en_set_mac,
2817 .ndo_validate_addr = eth_validate_addr,
2818 .ndo_change_mtu = mlx4_en_change_mtu,
2819 .ndo_do_ioctl = mlx4_en_ioctl,
2820 .ndo_tx_timeout = mlx4_en_tx_timeout,
2821 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2822 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2823 #ifdef CONFIG_NET_POLL_CONTROLLER
2824 .ndo_poll_controller = mlx4_en_netpoll,
2826 .ndo_set_features = mlx4_en_set_features,
2827 .ndo_fix_features = mlx4_en_fix_features,
2828 .ndo_setup_tc = __mlx4_en_setup_tc,
2829 #ifdef CONFIG_RFS_ACCEL
2830 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2832 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2833 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2834 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2835 .ndo_features_check = mlx4_en_features_check,
2836 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2837 .ndo_xdp = mlx4_xdp,
2840 static const struct net_device_ops mlx4_netdev_ops_master = {
2841 .ndo_open = mlx4_en_open,
2842 .ndo_stop = mlx4_en_close,
2843 .ndo_start_xmit = mlx4_en_xmit,
2844 .ndo_select_queue = mlx4_en_select_queue,
2845 .ndo_get_stats64 = mlx4_en_get_stats64,
2846 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2847 .ndo_set_mac_address = mlx4_en_set_mac,
2848 .ndo_validate_addr = eth_validate_addr,
2849 .ndo_change_mtu = mlx4_en_change_mtu,
2850 .ndo_tx_timeout = mlx4_en_tx_timeout,
2851 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2852 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2853 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2854 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2855 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
2856 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2857 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2858 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2859 .ndo_get_vf_config = mlx4_en_get_vf_config,
2860 #ifdef CONFIG_NET_POLL_CONTROLLER
2861 .ndo_poll_controller = mlx4_en_netpoll,
2863 .ndo_set_features = mlx4_en_set_features,
2864 .ndo_fix_features = mlx4_en_fix_features,
2865 .ndo_setup_tc = __mlx4_en_setup_tc,
2866 #ifdef CONFIG_RFS_ACCEL
2867 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2869 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2870 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2871 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
2872 .ndo_features_check = mlx4_en_features_check,
2873 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
2874 .ndo_xdp = mlx4_xdp,
2877 struct mlx4_en_bond {
2878 struct work_struct work;
2879 struct mlx4_en_priv *priv;
2881 struct mlx4_port_map port_map;
2884 static void mlx4_en_bond_work(struct work_struct *work)
2886 struct mlx4_en_bond *bond = container_of(work,
2887 struct mlx4_en_bond,
2890 struct mlx4_dev *dev = bond->priv->mdev->dev;
2892 if (bond->is_bonded) {
2893 if (!mlx4_is_bonded(dev)) {
2894 err = mlx4_bond(dev);
2896 en_err(bond->priv, "Fail to bond device\n");
2899 err = mlx4_port_map_set(dev, &bond->port_map);
2901 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2902 bond->port_map.port1,
2903 bond->port_map.port2,
2906 } else if (mlx4_is_bonded(dev)) {
2907 err = mlx4_unbond(dev);
2909 en_err(bond->priv, "Fail to unbond device\n");
2911 dev_put(bond->priv->dev);
2915 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2916 u8 v2p_p1, u8 v2p_p2)
2918 struct mlx4_en_bond *bond = NULL;
2920 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2924 INIT_WORK(&bond->work, mlx4_en_bond_work);
2926 bond->is_bonded = is_bonded;
2927 bond->port_map.port1 = v2p_p1;
2928 bond->port_map.port2 = v2p_p2;
2929 dev_hold(priv->dev);
2930 queue_work(priv->mdev->workqueue, &bond->work);
2934 int mlx4_en_netdev_event(struct notifier_block *this,
2935 unsigned long event, void *ptr)
2937 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2939 struct mlx4_en_dev *mdev;
2940 struct mlx4_dev *dev;
2941 int i, num_eth_ports = 0;
2942 bool do_bond = true;
2943 struct mlx4_en_priv *priv;
2947 if (!net_eq(dev_net(ndev), &init_net))
2950 mdev = container_of(this, struct mlx4_en_dev, nb);
2953 /* Go into this mode only when two network devices set on two ports
2954 * of the same mlx4 device are slaves of the same bonding master
2956 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2958 if (!port && (mdev->pndev[i] == ndev))
2960 mdev->upper[i] = mdev->pndev[i] ?
2961 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2962 /* condition not met: network device is a slave */
2963 if (!mdev->upper[i])
2965 if (num_eth_ports < 2)
2967 /* condition not met: same master */
2968 if (mdev->upper[i] != mdev->upper[i-1])
2971 /* condition not met: 2 salves */
2972 do_bond = (num_eth_ports == 2) ? do_bond : false;
2974 /* handle only events that come with enough info */
2975 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2978 priv = netdev_priv(ndev);
2980 struct netdev_notifier_bonding_info *notifier_info = ptr;
2981 struct netdev_bonding_info *bonding_info =
2982 ¬ifier_info->bonding_info;
2984 /* required mode 1, 2 or 4 */
2985 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2986 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2987 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2990 /* require exactly 2 slaves */
2991 if (bonding_info->master.num_slaves != 2)
2996 if (bonding_info->master.bond_mode ==
2997 BOND_MODE_ACTIVEBACKUP) {
2998 /* in active-backup mode virtual ports are
2999 * mapped to the physical port of the active
3001 if (bonding_info->slave.state ==
3002 BOND_STATE_BACKUP) {
3010 } else { /* BOND_STATE_ACTIVE */
3019 } else { /* Active-Active */
3020 /* in active-active mode a virtual port is
3021 * mapped to the native physical port if and only
3022 * if the physical port is up */
3023 __s8 link = bonding_info->slave.link;
3029 if ((link == BOND_LINK_UP) ||
3030 (link == BOND_LINK_FAIL)) {
3035 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
3045 mlx4_en_queue_bond_work(priv, do_bond,
3046 v2p_port1, v2p_port2);
3051 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
3052 struct mlx4_en_stats_bitmap *stats_bitmap,
3053 u8 rx_ppp, u8 rx_pause,
3054 u8 tx_ppp, u8 tx_pause)
3056 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
3058 if (!mlx4_is_slave(dev) &&
3059 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
3060 mutex_lock(&stats_bitmap->mutex);
3061 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
3064 bitmap_set(stats_bitmap->bitmap, last_i,
3065 NUM_FLOW_PRIORITY_STATS_RX);
3066 last_i += NUM_FLOW_PRIORITY_STATS_RX;
3068 if (rx_pause && !(rx_ppp))
3069 bitmap_set(stats_bitmap->bitmap, last_i,
3071 last_i += NUM_FLOW_STATS_RX;
3074 bitmap_set(stats_bitmap->bitmap, last_i,
3075 NUM_FLOW_PRIORITY_STATS_TX);
3076 last_i += NUM_FLOW_PRIORITY_STATS_TX;
3078 if (tx_pause && !(tx_ppp))
3079 bitmap_set(stats_bitmap->bitmap, last_i,
3081 last_i += NUM_FLOW_STATS_TX;
3083 mutex_unlock(&stats_bitmap->mutex);
3087 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
3088 struct mlx4_en_stats_bitmap *stats_bitmap,
3089 u8 rx_ppp, u8 rx_pause,
3090 u8 tx_ppp, u8 tx_pause)
3094 mutex_init(&stats_bitmap->mutex);
3095 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
3097 if (mlx4_is_slave(dev)) {
3098 bitmap_set(stats_bitmap->bitmap, last_i +
3099 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
3100 bitmap_set(stats_bitmap->bitmap, last_i +
3101 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
3102 bitmap_set(stats_bitmap->bitmap, last_i +
3103 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
3104 bitmap_set(stats_bitmap->bitmap, last_i +
3105 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
3106 bitmap_set(stats_bitmap->bitmap, last_i +
3107 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
3108 bitmap_set(stats_bitmap->bitmap, last_i +
3109 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
3111 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
3113 last_i += NUM_MAIN_STATS;
3115 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
3116 last_i += NUM_PORT_STATS;
3118 if (mlx4_is_master(dev))
3119 bitmap_set(stats_bitmap->bitmap, last_i,
3121 last_i += NUM_PF_STATS;
3123 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
3126 last_i += NUM_FLOW_STATS;
3128 if (!mlx4_is_slave(dev))
3129 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
3130 last_i += NUM_PKT_STATS;
3132 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
3133 last_i += NUM_XDP_STATS;
3136 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3137 struct mlx4_en_port_profile *prof)
3139 struct net_device *dev;
3140 struct mlx4_en_priv *priv;
3144 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
3145 MAX_TX_RINGS, MAX_RX_RINGS);
3149 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
3150 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
3152 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
3153 dev->dev_port = port - 1;
3156 * Initialize driver private data
3159 priv = netdev_priv(dev);
3160 memset(priv, 0, sizeof(struct mlx4_en_priv));
3161 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
3162 spin_lock_init(&priv->stats_lock);
3163 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3164 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
3165 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
3166 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3167 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
3168 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
3169 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
3170 #ifdef CONFIG_RFS_ACCEL
3171 INIT_LIST_HEAD(&priv->filters);
3172 spin_lock_init(&priv->filters_lock);
3177 priv->ddev = &mdev->pdev->dev;
3180 priv->port_up = false;
3181 priv->flags = prof->flags;
3182 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
3183 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
3184 MLX4_WQE_CTRL_SOLICITED);
3185 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
3186 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
3187 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
3189 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
3190 priv->tx_ring_num[t] = prof->tx_ring_num[t];
3191 if (!priv->tx_ring_num[t])
3194 priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
3195 MAX_TX_RINGS, GFP_KERNEL);
3196 if (!priv->tx_ring[t]) {
3200 priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
3201 MAX_TX_RINGS, GFP_KERNEL);
3202 if (!priv->tx_cq[t]) {
3203 kfree(priv->tx_ring[t]);
3208 priv->rx_ring_num = prof->rx_ring_num;
3209 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
3210 priv->cqe_size = mdev->dev->caps.cqe_size;
3211 priv->mac_index = -1;
3212 priv->msg_enable = MLX4_EN_MSG_LEVEL;
3213 #ifdef CONFIG_MLX4_EN_DCB
3214 if (!mlx4_is_slave(priv->mdev->dev)) {
3215 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3216 DCB_CAP_DCBX_VER_IEEE;
3217 priv->flags |= MLX4_EN_DCB_ENABLED;
3218 priv->cee_config.pfc_state = false;
3220 for (i = 0; i < MLX4_EN_NUM_UP; i++)
3221 priv->cee_config.dcb_pfc[i] = pfc_disabled;
3223 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
3224 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
3226 en_info(priv, "enabling only PFC DCB ops\n");
3227 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
3232 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
3233 INIT_HLIST_HEAD(&priv->mac_hash[i]);
3235 /* Query for default mac and max mtu */
3236 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
3238 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
3239 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
3240 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
3242 /* Set default MAC */
3243 dev->addr_len = ETH_ALEN;
3244 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3245 if (!is_valid_ether_addr(dev->dev_addr)) {
3246 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
3247 priv->port, dev->dev_addr);
3250 } else if (mlx4_is_slave(priv->mdev->dev) &&
3251 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
3252 /* Random MAC was assigned in mlx4_slave_cap
3253 * in mlx4_core module
3255 dev->addr_assign_type |= NET_ADDR_RANDOM;
3256 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
3259 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
3261 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3262 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
3263 err = mlx4_en_alloc_resources(priv);
3267 /* Initialize time stamping config */
3268 priv->hwtstamp_config.flags = 0;
3269 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3270 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3272 /* Allocate page for receive rings */
3273 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
3276 en_err(priv, "Failed to allocate page for rx qps\n");
3279 priv->allocated = 1;
3282 * Initialize netdev entry points
3284 if (mlx4_is_master(priv->mdev->dev))
3285 dev->netdev_ops = &mlx4_netdev_ops_master;
3287 dev->netdev_ops = &mlx4_netdev_ops;
3288 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
3289 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
3290 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
3292 dev->ethtool_ops = &mlx4_en_ethtool_ops;
3295 * Set driver features
3297 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3298 if (mdev->LSO_support)
3299 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3301 dev->vlan_features = dev->hw_features;
3303 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
3304 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
3305 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3306 NETIF_F_HW_VLAN_CTAG_FILTER;
3307 dev->hw_features |= NETIF_F_LOOPBACK |
3308 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3310 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3311 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
3312 NETIF_F_HW_VLAN_STAG_FILTER;
3313 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3316 if (mlx4_is_slave(mdev->dev)) {
3317 bool vlan_offload_disabled;
3320 err = get_phv_bit(mdev->dev, port, &phv);
3322 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3323 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3325 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
3326 &vlan_offload_disabled);
3327 if (!err && vlan_offload_disabled) {
3328 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3329 NETIF_F_HW_VLAN_CTAG_RX |
3330 NETIF_F_HW_VLAN_STAG_TX |
3331 NETIF_F_HW_VLAN_STAG_RX);
3332 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3333 NETIF_F_HW_VLAN_CTAG_RX |
3334 NETIF_F_HW_VLAN_STAG_TX |
3335 NETIF_F_HW_VLAN_STAG_RX);
3338 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3339 !(mdev->dev->caps.flags2 &
3340 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3341 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3344 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3345 dev->hw_features |= NETIF_F_RXFCS;
3347 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3348 dev->hw_features |= NETIF_F_RXALL;
3350 if (mdev->dev->caps.steering_mode ==
3351 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3352 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
3353 dev->hw_features |= NETIF_F_NTUPLE;
3355 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3356 dev->priv_flags |= IFF_UNICAST_FLT;
3358 /* Setting a default hash function value */
3359 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3360 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3361 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3362 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3365 "No RSS hash capabilities exposed, using Toeplitz\n");
3366 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3369 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3370 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3371 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3372 NETIF_F_GSO_PARTIAL;
3373 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3374 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3375 NETIF_F_GSO_PARTIAL;
3376 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3379 /* MTU range: 46 - hw-specific max */
3380 dev->min_mtu = MLX4_EN_MIN_MTU;
3381 dev->max_mtu = priv->max_mtu;
3383 mdev->pndev[port] = dev;
3384 mdev->upper[port] = NULL;
3386 netif_carrier_off(dev);
3387 mlx4_en_set_default_moderation(priv);
3389 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
3390 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3392 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3394 /* Configure port */
3395 mlx4_en_calc_rx_buf(dev);
3396 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
3397 priv->rx_skb_size + ETH_FCS_LEN,
3398 prof->tx_pause, prof->tx_ppp,
3399 prof->rx_pause, prof->rx_ppp);
3401 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3406 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3407 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
3409 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3416 en_warn(priv, "Initializing port\n");
3417 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3419 en_err(priv, "Failed Initializing port\n");
3422 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
3424 /* Initialize time stamp mechanism */
3425 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
3426 mlx4_en_init_timestamp(mdev);
3428 queue_delayed_work(mdev->workqueue, &priv->service_task,
3429 SERVICE_TASK_DELAY);
3431 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3432 mdev->profile.prof[priv->port].rx_ppp,
3433 mdev->profile.prof[priv->port].rx_pause,
3434 mdev->profile.prof[priv->port].tx_ppp,
3435 mdev->profile.prof[priv->port].tx_pause);
3437 err = register_netdev(dev);
3439 en_err(priv, "Netdev registration failed for port %d\n", port);
3443 priv->registered = 1;
3444 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3451 kfree(priv->tx_ring[t]);
3452 kfree(priv->tx_cq[t]);
3455 mlx4_en_destroy_netdev(dev);
3459 int mlx4_en_reset_config(struct net_device *dev,
3460 struct hwtstamp_config ts_config,
3461 netdev_features_t features)
3463 struct mlx4_en_priv *priv = netdev_priv(dev);
3464 struct mlx4_en_dev *mdev = priv->mdev;
3465 struct mlx4_en_port_profile new_prof;
3466 struct mlx4_en_priv *tmp;
3470 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3471 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
3472 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3473 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
3474 return 0; /* Nothing to change */
3476 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3477 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3478 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3479 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3483 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3487 mutex_lock(&mdev->state_lock);
3489 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3490 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3492 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
3496 if (priv->port_up) {
3498 mlx4_en_stop_port(dev, 1);
3501 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
3502 ts_config.rx_filter,
3503 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
3505 mlx4_en_safe_replace_resources(priv, tmp);
3507 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3508 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3509 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3511 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3512 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3513 /* RX time-stamping is OFF, update the RX vlan offload
3514 * to the latest wanted state
3516 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3517 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3519 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3522 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3523 if (features & NETIF_F_RXFCS)
3524 dev->features |= NETIF_F_RXFCS;
3526 dev->features &= ~NETIF_F_RXFCS;
3529 /* RX vlan offload and RX time-stamping can't co-exist !
3530 * Regardless of the caller's choice,
3531 * Turn Off RX vlan offload in case of time-stamping is ON
3533 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3534 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3535 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3536 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3540 err = mlx4_en_start_port(dev);
3542 en_err(priv, "Failed starting port\n");
3546 mutex_unlock(&mdev->state_lock);
3549 netdev_features_change(dev);