2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
39 #include <asm-generic/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
44 static const char rocker_driver_name[] = "rocker";
46 static const struct pci_device_id rocker_pci_id_table[] = {
47 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
51 struct rocker_flow_tbl_key {
53 enum rocker_of_dpa_table_id tbl_id;
58 enum rocker_of_dpa_table_id goto_tbl;
64 enum rocker_of_dpa_table_id goto_tbl;
73 u8 eth_dst_mask[ETH_ALEN];
76 enum rocker_of_dpa_table_id goto_tbl;
83 enum rocker_of_dpa_table_id goto_tbl;
88 u8 eth_dst_mask[ETH_ALEN];
93 enum rocker_of_dpa_table_id goto_tbl;
100 u8 eth_src[ETH_ALEN];
101 u8 eth_src_mask[ETH_ALEN];
102 u8 eth_dst[ETH_ALEN];
103 u8 eth_dst_mask[ETH_ALEN];
116 struct rocker_flow_tbl_entry {
117 struct hlist_node entry;
120 struct rocker_flow_tbl_key key;
122 u32 key_crc32; /* key */
125 struct rocker_group_tbl_entry {
126 struct hlist_node entry;
128 u32 group_id; /* key */
136 u8 eth_src[ETH_ALEN];
137 u8 eth_dst[ETH_ALEN];
142 u8 eth_src[ETH_ALEN];
143 u8 eth_dst[ETH_ALEN];
151 struct rocker_fdb_tbl_entry {
152 struct hlist_node entry;
153 u32 key_crc32; /* key */
155 struct rocker_fdb_tbl_key {
162 struct rocker_internal_vlan_tbl_entry {
163 struct hlist_node entry;
164 int ifindex; /* key */
169 struct rocker_neigh_tbl_entry {
170 struct hlist_node entry;
171 __be32 ip_addr; /* key */
172 struct net_device *dev;
175 u8 eth_dst[ETH_ALEN];
179 struct rocker_desc_info {
180 char *data; /* mapped */
183 struct rocker_desc *desc;
187 struct rocker_dma_ring_info {
191 struct rocker_desc *desc; /* mapped */
193 struct rocker_desc_info *desc_info;
200 ROCKER_CTRL_LINK_LOCAL_MCAST,
201 ROCKER_CTRL_LOCAL_ARP,
202 ROCKER_CTRL_IPV4_MCAST,
203 ROCKER_CTRL_IPV6_MCAST,
204 ROCKER_CTRL_DFLT_BRIDGING,
208 #define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
209 #define ROCKER_N_INTERNAL_VLANS 255
210 #define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
211 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
214 struct net_device *dev;
215 struct net_device *bridge_dev;
216 struct rocker *rocker;
217 unsigned int port_number;
219 __be16 internal_vlan_id;
222 bool ctrls[ROCKER_CTRL_MAX];
223 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
224 struct napi_struct napi_tx;
225 struct napi_struct napi_rx;
226 struct rocker_dma_ring_info tx_ring;
227 struct rocker_dma_ring_info rx_ring;
228 struct list_head trans_mem;
232 struct pci_dev *pdev;
234 struct msix_entry *msix_entries;
235 unsigned int port_count;
236 struct rocker_port **ports;
240 spinlock_t cmd_ring_lock; /* for cmd ring accesses */
241 struct rocker_dma_ring_info cmd_ring;
242 struct rocker_dma_ring_info event_ring;
243 DECLARE_HASHTABLE(flow_tbl, 16);
244 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
245 u64 flow_tbl_next_cookie;
246 DECLARE_HASHTABLE(group_tbl, 16);
247 spinlock_t group_tbl_lock; /* for group tbl accesses */
248 DECLARE_HASHTABLE(fdb_tbl, 16);
249 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
250 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
251 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
252 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
253 DECLARE_HASHTABLE(neigh_tbl, 16);
254 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
255 u32 neigh_tbl_next_index;
258 static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
259 static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
260 static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
261 static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
262 static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
263 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
264 static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
265 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
266 static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
268 /* Rocker priority levels for flow table entries. Higher
269 * priority match takes precedence over lower priority match.
273 ROCKER_PRIORITY_UNKNOWN = 0,
274 ROCKER_PRIORITY_IG_PORT = 1,
275 ROCKER_PRIORITY_VLAN = 1,
276 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
277 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
278 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
279 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
280 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
281 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
282 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
283 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
284 ROCKER_PRIORITY_ACL_CTRL = 3,
285 ROCKER_PRIORITY_ACL_NORMAL = 2,
286 ROCKER_PRIORITY_ACL_DFLT = 1,
289 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
291 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
293 u16 _vlan_id = ntohs(vlan_id);
295 return (_vlan_id >= start && _vlan_id <= end);
298 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
299 u16 vid, bool *pop_vlan)
305 vlan_id = htons(vid);
307 vlan_id = rocker_port->internal_vlan_id;
315 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
318 if (rocker_vlan_id_is_internal(vlan_id))
321 return ntohs(vlan_id);
324 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
326 return !!rocker_port->bridge_dev;
329 static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
330 enum switchdev_trans trans, size_t size)
332 struct list_head *elem = NULL;
334 /* If in transaction prepare phase, allocate the memory
335 * and enqueue it on a per-port list. If in transaction
336 * commit phase, dequeue the memory from the per-port list
337 * rather than re-allocating the memory. The idea is the
338 * driver code paths for prepare and commit are identical
339 * so the memory allocated in the prepare phase is the
340 * memory used in the commit phase.
344 case SWITCHDEV_TRANS_PREPARE:
345 elem = kzalloc(size + sizeof(*elem), GFP_KERNEL);
348 list_add_tail(elem, &rocker_port->trans_mem);
350 case SWITCHDEV_TRANS_COMMIT:
351 BUG_ON(list_empty(&rocker_port->trans_mem));
352 elem = rocker_port->trans_mem.next;
355 case SWITCHDEV_TRANS_NONE:
356 elem = kzalloc(size + sizeof(*elem), GFP_KERNEL);
358 INIT_LIST_HEAD(elem);
364 return elem ? elem + 1 : NULL;
367 static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
368 enum switchdev_trans trans, size_t size)
370 return __rocker_port_mem_alloc(rocker_port, trans, size);
373 static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
374 enum switchdev_trans trans, size_t n,
377 return __rocker_port_mem_alloc(rocker_port, trans, n * size);
380 static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
382 struct list_head *elem;
384 /* Frees are ignored if in transaction prepare phase. The
385 * memory remains on the per-port list until freed in the
389 if (trans == SWITCHDEV_TRANS_PREPARE)
392 elem = (struct list_head *)mem - 1;
393 BUG_ON(!list_empty(elem));
398 wait_queue_head_t wait;
402 static void rocker_wait_reset(struct rocker_wait *wait)
407 static void rocker_wait_init(struct rocker_wait *wait)
409 init_waitqueue_head(&wait->wait);
410 rocker_wait_reset(wait);
413 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
414 enum switchdev_trans trans)
416 struct rocker_wait *wait;
418 wait = rocker_port_kzalloc(rocker_port, trans, sizeof(*wait));
421 rocker_wait_init(wait);
425 static void rocker_wait_destroy(enum switchdev_trans trans,
426 struct rocker_wait *wait)
428 rocker_port_kfree(trans, wait);
431 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
432 unsigned long timeout)
434 wait_event_timeout(wait->wait, wait->done, HZ / 10);
440 static void rocker_wait_wake_up(struct rocker_wait *wait)
443 wake_up(&wait->wait);
446 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
448 return rocker->msix_entries[vector].vector;
451 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
453 return rocker_msix_vector(rocker_port->rocker,
454 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
457 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
459 return rocker_msix_vector(rocker_port->rocker,
460 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
463 #define rocker_write32(rocker, reg, val) \
464 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
465 #define rocker_read32(rocker, reg) \
466 readl((rocker)->hw_addr + (ROCKER_ ## reg))
467 #define rocker_write64(rocker, reg, val) \
468 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
469 #define rocker_read64(rocker, reg) \
470 readq((rocker)->hw_addr + (ROCKER_ ## reg))
472 /*****************************
473 * HW basic testing functions
474 *****************************/
476 static int rocker_reg_test(const struct rocker *rocker)
478 const struct pci_dev *pdev = rocker->pdev;
484 rocker_write32(rocker, TEST_REG, rnd);
485 test_reg = rocker_read32(rocker, TEST_REG);
486 if (test_reg != rnd * 2) {
487 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
494 rnd |= prandom_u32();
495 rocker_write64(rocker, TEST_REG64, rnd);
496 test_reg = rocker_read64(rocker, TEST_REG64);
497 if (test_reg != rnd * 2) {
498 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
506 static int rocker_dma_test_one(const struct rocker *rocker,
507 struct rocker_wait *wait, u32 test_type,
508 dma_addr_t dma_handle, const unsigned char *buf,
509 const unsigned char *expect, size_t size)
511 const struct pci_dev *pdev = rocker->pdev;
514 rocker_wait_reset(wait);
515 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
517 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
518 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
522 for (i = 0; i < size; i++) {
523 if (buf[i] != expect[i]) {
524 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
525 buf[i], i, expect[i]);
532 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
533 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
535 static int rocker_dma_test_offset(const struct rocker *rocker,
536 struct rocker_wait *wait, int offset)
538 struct pci_dev *pdev = rocker->pdev;
539 unsigned char *alloc;
541 unsigned char *expect;
542 dma_addr_t dma_handle;
546 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
547 GFP_KERNEL | GFP_DMA);
550 buf = alloc + offset;
551 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
553 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
554 PCI_DMA_BIDIRECTIONAL);
555 if (pci_dma_mapping_error(pdev, dma_handle)) {
560 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
561 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
563 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
564 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
565 dma_handle, buf, expect,
566 ROCKER_TEST_DMA_BUF_SIZE);
570 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
571 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
572 dma_handle, buf, expect,
573 ROCKER_TEST_DMA_BUF_SIZE);
577 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
578 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
580 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
581 dma_handle, buf, expect,
582 ROCKER_TEST_DMA_BUF_SIZE);
587 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
588 PCI_DMA_BIDIRECTIONAL);
595 static int rocker_dma_test(const struct rocker *rocker,
596 struct rocker_wait *wait)
601 for (i = 0; i < 8; i++) {
602 err = rocker_dma_test_offset(rocker, wait, i);
609 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
611 struct rocker_wait *wait = dev_id;
613 rocker_wait_wake_up(wait);
618 static int rocker_basic_hw_test(const struct rocker *rocker)
620 const struct pci_dev *pdev = rocker->pdev;
621 struct rocker_wait wait;
624 err = rocker_reg_test(rocker);
626 dev_err(&pdev->dev, "reg test failed\n");
630 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
631 rocker_test_irq_handler, 0,
632 rocker_driver_name, &wait);
634 dev_err(&pdev->dev, "cannot assign test irq\n");
638 rocker_wait_init(&wait);
639 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
641 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
642 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
647 err = rocker_dma_test(rocker, &wait);
649 dev_err(&pdev->dev, "dma test failed\n");
652 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
660 #define ROCKER_TLV_ALIGNTO 8U
661 #define ROCKER_TLV_ALIGN(len) \
662 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
663 #define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
665 /* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
666 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
667 * | Header | Pad | Payload | Pad |
668 * | (struct rocker_tlv) | ing | | ing |
669 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
670 * <--------------------------- tlv->len -------------------------->
673 static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
676 int totlen = ROCKER_TLV_ALIGN(tlv->len);
678 *remaining -= totlen;
679 return (struct rocker_tlv *) ((char *) tlv + totlen);
682 static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
684 return remaining >= (int) ROCKER_TLV_HDRLEN &&
685 tlv->len >= ROCKER_TLV_HDRLEN &&
686 tlv->len <= remaining;
689 #define rocker_tlv_for_each(pos, head, len, rem) \
690 for (pos = head, rem = len; \
691 rocker_tlv_ok(pos, rem); \
692 pos = rocker_tlv_next(pos, &(rem)))
694 #define rocker_tlv_for_each_nested(pos, tlv, rem) \
695 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
696 rocker_tlv_len(tlv), rem)
698 static int rocker_tlv_attr_size(int payload)
700 return ROCKER_TLV_HDRLEN + payload;
703 static int rocker_tlv_total_size(int payload)
705 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
708 static int rocker_tlv_padlen(int payload)
710 return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
713 static int rocker_tlv_type(const struct rocker_tlv *tlv)
718 static void *rocker_tlv_data(const struct rocker_tlv *tlv)
720 return (char *) tlv + ROCKER_TLV_HDRLEN;
723 static int rocker_tlv_len(const struct rocker_tlv *tlv)
725 return tlv->len - ROCKER_TLV_HDRLEN;
728 static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
730 return *(u8 *) rocker_tlv_data(tlv);
733 static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
735 return *(u16 *) rocker_tlv_data(tlv);
738 static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
740 return *(__be16 *) rocker_tlv_data(tlv);
743 static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
745 return *(u32 *) rocker_tlv_data(tlv);
748 static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
750 return *(u64 *) rocker_tlv_data(tlv);
753 static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
754 const char *buf, int buf_len)
756 const struct rocker_tlv *tlv;
757 const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
760 memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
762 rocker_tlv_for_each(tlv, head, buf_len, rem) {
763 u32 type = rocker_tlv_type(tlv);
765 if (type > 0 && type <= maxtype)
770 static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
771 const struct rocker_tlv *tlv)
773 rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
774 rocker_tlv_len(tlv));
777 static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
778 const struct rocker_desc_info *desc_info)
780 rocker_tlv_parse(tb, maxtype, desc_info->data,
781 desc_info->desc->tlv_size);
784 static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
786 return (struct rocker_tlv *) ((char *) desc_info->data +
787 desc_info->tlv_size);
790 static int rocker_tlv_put(struct rocker_desc_info *desc_info,
791 int attrtype, int attrlen, const void *data)
793 int tail_room = desc_info->data_size - desc_info->tlv_size;
794 int total_size = rocker_tlv_total_size(attrlen);
795 struct rocker_tlv *tlv;
797 if (unlikely(tail_room < total_size))
800 tlv = rocker_tlv_start(desc_info);
801 desc_info->tlv_size += total_size;
802 tlv->type = attrtype;
803 tlv->len = rocker_tlv_attr_size(attrlen);
804 memcpy(rocker_tlv_data(tlv), data, attrlen);
805 memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
809 static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
810 int attrtype, u8 value)
812 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
815 static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
816 int attrtype, u16 value)
818 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
821 static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
822 int attrtype, __be16 value)
824 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
827 static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
828 int attrtype, u32 value)
830 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
833 static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
834 int attrtype, __be32 value)
836 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
839 static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
840 int attrtype, u64 value)
842 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
845 static struct rocker_tlv *
846 rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
848 struct rocker_tlv *start = rocker_tlv_start(desc_info);
850 if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
856 static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
857 struct rocker_tlv *start)
859 start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
862 static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
863 const struct rocker_tlv *start)
865 desc_info->tlv_size = (const char *) start - desc_info->data;
868 /******************************************
869 * DMA rings and descriptors manipulations
870 ******************************************/
872 static u32 __pos_inc(u32 pos, size_t limit)
874 return ++pos == limit ? 0 : pos;
877 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
879 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
894 case -ROCKER_EMSGSIZE:
896 case -ROCKER_ENOTSUP:
898 case -ROCKER_ENOBUFS:
905 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
907 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
910 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
912 u32 comp_err = desc_info->desc->comp_err;
914 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
917 static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
919 return (void *)(uintptr_t)desc_info->desc->cookie;
922 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
925 desc_info->desc->cookie = (uintptr_t) ptr;
928 static struct rocker_desc_info *
929 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
931 static struct rocker_desc_info *desc_info;
932 u32 head = __pos_inc(info->head, info->size);
934 desc_info = &info->desc_info[info->head];
935 if (head == info->tail)
936 return NULL; /* ring full */
937 desc_info->tlv_size = 0;
941 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
943 desc_info->desc->buf_size = desc_info->data_size;
944 desc_info->desc->tlv_size = desc_info->tlv_size;
947 static void rocker_desc_head_set(const struct rocker *rocker,
948 struct rocker_dma_ring_info *info,
949 const struct rocker_desc_info *desc_info)
951 u32 head = __pos_inc(info->head, info->size);
953 BUG_ON(head == info->tail);
954 rocker_desc_commit(desc_info);
956 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
959 static struct rocker_desc_info *
960 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
962 static struct rocker_desc_info *desc_info;
964 if (info->tail == info->head)
965 return NULL; /* nothing to be done between head and tail */
966 desc_info = &info->desc_info[info->tail];
967 if (!rocker_desc_gen(desc_info))
968 return NULL; /* gen bit not set, desc is not ready yet */
969 info->tail = __pos_inc(info->tail, info->size);
970 desc_info->tlv_size = desc_info->desc->tlv_size;
974 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
975 const struct rocker_dma_ring_info *info,
979 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
982 static unsigned long rocker_dma_ring_size_fix(size_t size)
984 return max(ROCKER_DMA_SIZE_MIN,
985 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
988 static int rocker_dma_ring_create(const struct rocker *rocker,
991 struct rocker_dma_ring_info *info)
995 BUG_ON(size != rocker_dma_ring_size_fix(size));
1000 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
1002 if (!info->desc_info)
1005 info->desc = pci_alloc_consistent(rocker->pdev,
1006 info->size * sizeof(*info->desc),
1009 kfree(info->desc_info);
1013 for (i = 0; i < info->size; i++)
1014 info->desc_info[i].desc = &info->desc[i];
1016 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
1017 ROCKER_DMA_DESC_CTRL_RESET);
1018 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
1019 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
1024 static void rocker_dma_ring_destroy(const struct rocker *rocker,
1025 const struct rocker_dma_ring_info *info)
1027 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
1029 pci_free_consistent(rocker->pdev,
1030 info->size * sizeof(struct rocker_desc),
1031 info->desc, info->mapaddr);
1032 kfree(info->desc_info);
1035 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
1036 struct rocker_dma_ring_info *info)
1040 BUG_ON(info->head || info->tail);
1042 /* When ring is consumer, we need to advance head for each desc.
1043 * That tells hw that the desc is ready to be used by it.
1045 for (i = 0; i < info->size - 1; i++)
1046 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
1047 rocker_desc_commit(&info->desc_info[i]);
1050 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
1051 const struct rocker_dma_ring_info *info,
1052 int direction, size_t buf_size)
1054 struct pci_dev *pdev = rocker->pdev;
1058 for (i = 0; i < info->size; i++) {
1059 struct rocker_desc_info *desc_info = &info->desc_info[i];
1060 struct rocker_desc *desc = &info->desc[i];
1061 dma_addr_t dma_handle;
1064 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
1070 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1071 if (pci_dma_mapping_error(pdev, dma_handle)) {
1077 desc_info->data = buf;
1078 desc_info->data_size = buf_size;
1079 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1081 desc->buf_addr = dma_handle;
1082 desc->buf_size = buf_size;
1087 for (i--; i >= 0; i--) {
1088 const struct rocker_desc_info *desc_info = &info->desc_info[i];
1090 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1091 desc_info->data_size, direction);
1092 kfree(desc_info->data);
1097 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
1098 const struct rocker_dma_ring_info *info,
1101 struct pci_dev *pdev = rocker->pdev;
1104 for (i = 0; i < info->size; i++) {
1105 const struct rocker_desc_info *desc_info = &info->desc_info[i];
1106 struct rocker_desc *desc = &info->desc[i];
1110 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1111 desc_info->data_size, direction);
1112 kfree(desc_info->data);
1116 static int rocker_dma_rings_init(struct rocker *rocker)
1118 const struct pci_dev *pdev = rocker->pdev;
1121 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1122 ROCKER_DMA_CMD_DEFAULT_SIZE,
1125 dev_err(&pdev->dev, "failed to create command dma ring\n");
1129 spin_lock_init(&rocker->cmd_ring_lock);
1131 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1132 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1134 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1135 goto err_dma_cmd_ring_bufs_alloc;
1138 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1139 ROCKER_DMA_EVENT_DEFAULT_SIZE,
1140 &rocker->event_ring);
1142 dev_err(&pdev->dev, "failed to create event dma ring\n");
1143 goto err_dma_event_ring_create;
1146 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1147 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1149 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1150 goto err_dma_event_ring_bufs_alloc;
1152 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1155 err_dma_event_ring_bufs_alloc:
1156 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1157 err_dma_event_ring_create:
1158 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1159 PCI_DMA_BIDIRECTIONAL);
1160 err_dma_cmd_ring_bufs_alloc:
1161 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1165 static void rocker_dma_rings_fini(struct rocker *rocker)
1167 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1168 PCI_DMA_BIDIRECTIONAL);
1169 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1170 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1171 PCI_DMA_BIDIRECTIONAL);
1172 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1175 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
1176 struct rocker_desc_info *desc_info,
1177 struct sk_buff *skb, size_t buf_len)
1179 const struct rocker *rocker = rocker_port->rocker;
1180 struct pci_dev *pdev = rocker->pdev;
1181 dma_addr_t dma_handle;
1183 dma_handle = pci_map_single(pdev, skb->data, buf_len,
1184 PCI_DMA_FROMDEVICE);
1185 if (pci_dma_mapping_error(pdev, dma_handle))
1187 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1188 goto tlv_put_failure;
1189 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1190 goto tlv_put_failure;
1194 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1195 desc_info->tlv_size = 0;
1199 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
1201 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1204 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
1205 struct rocker_desc_info *desc_info)
1207 struct net_device *dev = rocker_port->dev;
1208 struct sk_buff *skb;
1209 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1212 /* Ensure that hw will see tlv_size zero in case of an error.
1213 * That tells hw to use another descriptor.
1215 rocker_desc_cookie_ptr_set(desc_info, NULL);
1216 desc_info->tlv_size = 0;
1218 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1221 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
1223 dev_kfree_skb_any(skb);
1226 rocker_desc_cookie_ptr_set(desc_info, skb);
1230 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1231 const struct rocker_tlv **attrs)
1233 struct pci_dev *pdev = rocker->pdev;
1234 dma_addr_t dma_handle;
1237 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1238 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1240 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1241 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1242 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1245 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1246 const struct rocker_desc_info *desc_info)
1248 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1249 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1253 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1254 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1255 dev_kfree_skb_any(skb);
1258 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
1260 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1261 const struct rocker *rocker = rocker_port->rocker;
1265 for (i = 0; i < rx_ring->size; i++) {
1266 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
1267 &rx_ring->desc_info[i]);
1274 for (i--; i >= 0; i--)
1275 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1279 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
1281 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1282 const struct rocker *rocker = rocker_port->rocker;
1285 for (i = 0; i < rx_ring->size; i++)
1286 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1289 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1291 struct rocker *rocker = rocker_port->rocker;
1294 err = rocker_dma_ring_create(rocker,
1295 ROCKER_DMA_TX(rocker_port->port_number),
1296 ROCKER_DMA_TX_DEFAULT_SIZE,
1297 &rocker_port->tx_ring);
1299 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1303 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1305 ROCKER_DMA_TX_DESC_SIZE);
1307 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1308 goto err_dma_tx_ring_bufs_alloc;
1311 err = rocker_dma_ring_create(rocker,
1312 ROCKER_DMA_RX(rocker_port->port_number),
1313 ROCKER_DMA_RX_DEFAULT_SIZE,
1314 &rocker_port->rx_ring);
1316 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1317 goto err_dma_rx_ring_create;
1320 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1321 PCI_DMA_BIDIRECTIONAL,
1322 ROCKER_DMA_RX_DESC_SIZE);
1324 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1325 goto err_dma_rx_ring_bufs_alloc;
1328 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1330 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1331 goto err_dma_rx_ring_skbs_alloc;
1333 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1337 err_dma_rx_ring_skbs_alloc:
1338 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1339 PCI_DMA_BIDIRECTIONAL);
1340 err_dma_rx_ring_bufs_alloc:
1341 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1342 err_dma_rx_ring_create:
1343 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1345 err_dma_tx_ring_bufs_alloc:
1346 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1350 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1352 struct rocker *rocker = rocker_port->rocker;
1354 rocker_dma_rx_ring_skbs_free(rocker_port);
1355 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1356 PCI_DMA_BIDIRECTIONAL);
1357 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1358 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1360 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1363 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1366 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1369 val |= 1ULL << rocker_port->pport;
1371 val &= ~(1ULL << rocker_port->pport);
1372 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1375 /********************************
1376 * Interrupt handler and helpers
1377 ********************************/
1379 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1381 struct rocker *rocker = dev_id;
1382 const struct rocker_desc_info *desc_info;
1383 struct rocker_wait *wait;
1386 spin_lock(&rocker->cmd_ring_lock);
1387 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1388 wait = rocker_desc_cookie_ptr_get(desc_info);
1389 rocker_wait_wake_up(wait);
1392 spin_unlock(&rocker->cmd_ring_lock);
1393 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1398 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1400 netif_carrier_on(rocker_port->dev);
1401 netdev_info(rocker_port->dev, "Link is up\n");
1404 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1406 netif_carrier_off(rocker_port->dev);
1407 netdev_info(rocker_port->dev, "Link is down\n");
1410 static int rocker_event_link_change(const struct rocker *rocker,
1411 const struct rocker_tlv *info)
1413 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1414 unsigned int port_number;
1416 struct rocker_port *rocker_port;
1418 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1419 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1420 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1423 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1424 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1426 if (port_number >= rocker->port_count)
1429 rocker_port = rocker->ports[port_number];
1430 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1432 rocker_port_link_up(rocker_port);
1434 rocker_port_link_down(rocker_port);
1440 #define ROCKER_OP_FLAG_REMOVE BIT(0)
1441 #define ROCKER_OP_FLAG_LEARNED BIT(1)
1442 #define ROCKER_OP_FLAG_REFRESH BIT(2)
1444 static int rocker_port_fdb(struct rocker_port *rocker_port,
1445 enum switchdev_trans trans,
1446 const unsigned char *addr,
1447 __be16 vlan_id, int flags);
1449 struct rocker_mac_vlan_seen_work {
1450 struct work_struct work;
1451 struct rocker_port *rocker_port;
1453 unsigned char addr[ETH_ALEN];
1457 static void rocker_event_mac_vlan_seen_work(struct work_struct *work)
1459 const struct rocker_mac_vlan_seen_work *sw =
1460 container_of(work, struct rocker_mac_vlan_seen_work, work);
1463 rocker_port_fdb(sw->rocker_port, SWITCHDEV_TRANS_NONE,
1464 sw->addr, sw->vlan_id, sw->flags);
1470 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1471 const struct rocker_tlv *info)
1473 struct rocker_mac_vlan_seen_work *sw;
1474 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1475 unsigned int port_number;
1476 struct rocker_port *rocker_port;
1477 const unsigned char *addr;
1478 int flags = ROCKER_OP_FLAG_LEARNED;
1481 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1482 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1483 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1484 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1487 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1488 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1489 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1491 if (port_number >= rocker->port_count)
1494 rocker_port = rocker->ports[port_number];
1496 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1497 rocker_port->stp_state != BR_STATE_FORWARDING)
1500 sw = kmalloc(sizeof(*sw), GFP_ATOMIC);
1504 INIT_WORK(&sw->work, rocker_event_mac_vlan_seen_work);
1506 sw->rocker_port = rocker_port;
1508 ether_addr_copy(sw->addr, addr);
1509 sw->vlan_id = vlan_id;
1511 schedule_work(&sw->work);
1516 static int rocker_event_process(const struct rocker *rocker,
1517 const struct rocker_desc_info *desc_info)
1519 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1520 const struct rocker_tlv *info;
1523 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1524 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1525 !attrs[ROCKER_TLV_EVENT_INFO])
1528 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1529 info = attrs[ROCKER_TLV_EVENT_INFO];
1532 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1533 return rocker_event_link_change(rocker, info);
1534 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1535 return rocker_event_mac_vlan_seen(rocker, info);
1541 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1543 struct rocker *rocker = dev_id;
1544 const struct pci_dev *pdev = rocker->pdev;
1545 const struct rocker_desc_info *desc_info;
1549 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1550 err = rocker_desc_err(desc_info);
1552 dev_err(&pdev->dev, "event desc received with err %d\n",
1555 err = rocker_event_process(rocker, desc_info);
1557 dev_err(&pdev->dev, "event processing failed with err %d\n",
1560 rocker_desc_gen_clear(desc_info);
1561 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1564 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1569 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1571 struct rocker_port *rocker_port = dev_id;
1573 napi_schedule(&rocker_port->napi_tx);
1577 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1579 struct rocker_port *rocker_port = dev_id;
1581 napi_schedule(&rocker_port->napi_rx);
1585 /********************
1587 ********************/
1589 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1590 struct rocker_desc_info *desc_info,
1593 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1594 const struct rocker_desc_info *desc_info,
1597 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1598 enum switchdev_trans trans,
1599 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1600 rocker_cmd_proc_cb_t process, void *process_priv)
1602 struct rocker *rocker = rocker_port->rocker;
1603 struct rocker_desc_info *desc_info;
1604 struct rocker_wait *wait;
1605 unsigned long flags;
1608 wait = rocker_wait_create(rocker_port, trans);
1612 spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
1614 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1616 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1621 err = prepare(rocker_port, desc_info, prepare_priv);
1623 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1627 rocker_desc_cookie_ptr_set(desc_info, wait);
1629 if (trans != SWITCHDEV_TRANS_PREPARE)
1630 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1632 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1634 if (trans != SWITCHDEV_TRANS_PREPARE)
1635 if (!rocker_wait_event_timeout(wait, HZ / 10))
1638 err = rocker_desc_err(desc_info);
1643 err = process(rocker_port, desc_info, process_priv);
1645 rocker_desc_gen_clear(desc_info);
1647 rocker_wait_destroy(trans, wait);
1652 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1653 struct rocker_desc_info *desc_info,
1656 struct rocker_tlv *cmd_info;
1658 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1659 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1661 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1664 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1665 rocker_port->pport))
1667 rocker_tlv_nest_end(desc_info, cmd_info);
1672 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1673 const struct rocker_desc_info *desc_info,
1676 struct ethtool_cmd *ecmd = priv;
1677 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1678 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1683 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1684 if (!attrs[ROCKER_TLV_CMD_INFO])
1687 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1688 attrs[ROCKER_TLV_CMD_INFO]);
1689 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1690 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1691 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1694 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1695 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1696 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1698 ecmd->transceiver = XCVR_INTERNAL;
1699 ecmd->supported = SUPPORTED_TP;
1700 ecmd->phy_address = 0xff;
1701 ecmd->port = PORT_TP;
1702 ethtool_cmd_speed_set(ecmd, speed);
1703 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1704 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1710 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1711 const struct rocker_desc_info *desc_info,
1714 unsigned char *macaddr = priv;
1715 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1716 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1717 const struct rocker_tlv *attr;
1719 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1720 if (!attrs[ROCKER_TLV_CMD_INFO])
1723 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1724 attrs[ROCKER_TLV_CMD_INFO]);
1725 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1729 if (rocker_tlv_len(attr) != ETH_ALEN)
1732 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1742 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1743 const struct rocker_desc_info *desc_info,
1746 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1747 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1748 struct port_name *name = priv;
1749 const struct rocker_tlv *attr;
1753 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1754 if (!attrs[ROCKER_TLV_CMD_INFO])
1757 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1758 attrs[ROCKER_TLV_CMD_INFO]);
1759 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1763 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1764 str = rocker_tlv_data(attr);
1766 /* make sure name only contains alphanumeric characters */
1767 for (i = j = 0; i < len; ++i) {
1768 if (isalnum(str[i])) {
1769 name->buf[j] = str[i];
1777 name->buf[j] = '\0';
1783 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1784 struct rocker_desc_info *desc_info,
1787 struct ethtool_cmd *ecmd = priv;
1788 struct rocker_tlv *cmd_info;
1790 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1791 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1793 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1796 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1797 rocker_port->pport))
1799 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1800 ethtool_cmd_speed(ecmd)))
1802 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1805 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1808 rocker_tlv_nest_end(desc_info, cmd_info);
1813 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1814 struct rocker_desc_info *desc_info,
1817 const unsigned char *macaddr = priv;
1818 struct rocker_tlv *cmd_info;
1820 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1821 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1823 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1826 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1827 rocker_port->pport))
1829 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1832 rocker_tlv_nest_end(desc_info, cmd_info);
1837 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1838 struct rocker_desc_info *desc_info,
1841 struct rocker_tlv *cmd_info;
1843 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1844 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1846 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1849 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1850 rocker_port->pport))
1852 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1853 !!(rocker_port->brport_flags & BR_LEARNING)))
1855 rocker_tlv_nest_end(desc_info, cmd_info);
1859 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1860 struct ethtool_cmd *ecmd)
1862 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
1863 rocker_cmd_get_port_settings_prep, NULL,
1864 rocker_cmd_get_port_settings_ethtool_proc,
1868 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1869 unsigned char *macaddr)
1871 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
1872 rocker_cmd_get_port_settings_prep, NULL,
1873 rocker_cmd_get_port_settings_macaddr_proc,
1877 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1878 struct ethtool_cmd *ecmd)
1880 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
1881 rocker_cmd_set_port_settings_ethtool_prep,
1885 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1886 unsigned char *macaddr)
1888 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
1889 rocker_cmd_set_port_settings_macaddr_prep,
1890 macaddr, NULL, NULL);
1893 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1894 enum switchdev_trans trans)
1896 return rocker_cmd_exec(rocker_port, trans,
1897 rocker_cmd_set_port_learning_prep,
1902 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1903 const struct rocker_flow_tbl_entry *entry)
1905 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1906 entry->key.ig_port.in_pport))
1908 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1909 entry->key.ig_port.in_pport_mask))
1911 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1912 entry->key.ig_port.goto_tbl))
1919 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1920 const struct rocker_flow_tbl_entry *entry)
1922 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1923 entry->key.vlan.in_pport))
1925 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1926 entry->key.vlan.vlan_id))
1928 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1929 entry->key.vlan.vlan_id_mask))
1931 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1932 entry->key.vlan.goto_tbl))
1934 if (entry->key.vlan.untagged &&
1935 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1936 entry->key.vlan.new_vlan_id))
1943 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1944 const struct rocker_flow_tbl_entry *entry)
1946 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1947 entry->key.term_mac.in_pport))
1949 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1950 entry->key.term_mac.in_pport_mask))
1952 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1953 entry->key.term_mac.eth_type))
1955 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1956 ETH_ALEN, entry->key.term_mac.eth_dst))
1958 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1959 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1961 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1962 entry->key.term_mac.vlan_id))
1964 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1965 entry->key.term_mac.vlan_id_mask))
1967 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1968 entry->key.term_mac.goto_tbl))
1970 if (entry->key.term_mac.copy_to_cpu &&
1971 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1972 entry->key.term_mac.copy_to_cpu))
1979 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1980 const struct rocker_flow_tbl_entry *entry)
1982 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1983 entry->key.ucast_routing.eth_type))
1985 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1986 entry->key.ucast_routing.dst4))
1988 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1989 entry->key.ucast_routing.dst4_mask))
1991 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1992 entry->key.ucast_routing.goto_tbl))
1994 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1995 entry->key.ucast_routing.group_id))
2002 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2003 const struct rocker_flow_tbl_entry *entry)
2005 if (entry->key.bridge.has_eth_dst &&
2006 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2007 ETH_ALEN, entry->key.bridge.eth_dst))
2009 if (entry->key.bridge.has_eth_dst_mask &&
2010 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2011 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2013 if (entry->key.bridge.vlan_id &&
2014 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2015 entry->key.bridge.vlan_id))
2017 if (entry->key.bridge.tunnel_id &&
2018 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2019 entry->key.bridge.tunnel_id))
2021 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2022 entry->key.bridge.goto_tbl))
2024 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2025 entry->key.bridge.group_id))
2027 if (entry->key.bridge.copy_to_cpu &&
2028 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2029 entry->key.bridge.copy_to_cpu))
2036 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2037 const struct rocker_flow_tbl_entry *entry)
2039 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2040 entry->key.acl.in_pport))
2042 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2043 entry->key.acl.in_pport_mask))
2045 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2046 ETH_ALEN, entry->key.acl.eth_src))
2048 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2049 ETH_ALEN, entry->key.acl.eth_src_mask))
2051 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2052 ETH_ALEN, entry->key.acl.eth_dst))
2054 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2055 ETH_ALEN, entry->key.acl.eth_dst_mask))
2057 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2058 entry->key.acl.eth_type))
2060 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2061 entry->key.acl.vlan_id))
2063 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2064 entry->key.acl.vlan_id_mask))
2067 switch (ntohs(entry->key.acl.eth_type)) {
2070 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2071 entry->key.acl.ip_proto))
2073 if (rocker_tlv_put_u8(desc_info,
2074 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2075 entry->key.acl.ip_proto_mask))
2077 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2078 entry->key.acl.ip_tos & 0x3f))
2080 if (rocker_tlv_put_u8(desc_info,
2081 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2082 entry->key.acl.ip_tos_mask & 0x3f))
2084 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2085 (entry->key.acl.ip_tos & 0xc0) >> 6))
2087 if (rocker_tlv_put_u8(desc_info,
2088 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2089 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2094 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2095 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2096 entry->key.acl.group_id))
2102 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
2103 struct rocker_desc_info *desc_info,
2106 const struct rocker_flow_tbl_entry *entry = priv;
2107 struct rocker_tlv *cmd_info;
2110 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2112 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2115 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2118 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2119 entry->key.priority))
2121 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2123 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2127 switch (entry->key.tbl_id) {
2128 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2129 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2131 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2132 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2134 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2135 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2137 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2138 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2140 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2141 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2143 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2144 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2154 rocker_tlv_nest_end(desc_info, cmd_info);
2159 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
2160 struct rocker_desc_info *desc_info,
2163 const struct rocker_flow_tbl_entry *entry = priv;
2164 struct rocker_tlv *cmd_info;
2166 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2168 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2171 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2174 rocker_tlv_nest_end(desc_info, cmd_info);
2180 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2181 struct rocker_group_tbl_entry *entry)
2183 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2184 ROCKER_GROUP_PORT_GET(entry->group_id)))
2186 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2187 entry->l2_interface.pop_vlan))
2194 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2195 const struct rocker_group_tbl_entry *entry)
2197 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2198 entry->l2_rewrite.group_id))
2200 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2201 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2202 ETH_ALEN, entry->l2_rewrite.eth_src))
2204 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2205 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2206 ETH_ALEN, entry->l2_rewrite.eth_dst))
2208 if (entry->l2_rewrite.vlan_id &&
2209 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2210 entry->l2_rewrite.vlan_id))
2217 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2218 const struct rocker_group_tbl_entry *entry)
2221 struct rocker_tlv *group_ids;
2223 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2224 entry->group_count))
2227 group_ids = rocker_tlv_nest_start(desc_info,
2228 ROCKER_TLV_OF_DPA_GROUP_IDS);
2232 for (i = 0; i < entry->group_count; i++)
2233 /* Note TLV array is 1-based */
2234 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2237 rocker_tlv_nest_end(desc_info, group_ids);
2243 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2244 const struct rocker_group_tbl_entry *entry)
2246 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2247 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2248 ETH_ALEN, entry->l3_unicast.eth_src))
2250 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2251 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2252 ETH_ALEN, entry->l3_unicast.eth_dst))
2254 if (entry->l3_unicast.vlan_id &&
2255 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2256 entry->l3_unicast.vlan_id))
2258 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2259 entry->l3_unicast.ttl_check))
2261 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2262 entry->l3_unicast.group_id))
2268 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2269 struct rocker_desc_info *desc_info,
2272 struct rocker_group_tbl_entry *entry = priv;
2273 struct rocker_tlv *cmd_info;
2276 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2278 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2282 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2286 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2287 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2288 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2290 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2291 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2293 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2294 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2295 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2297 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2298 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2308 rocker_tlv_nest_end(desc_info, cmd_info);
2313 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2314 struct rocker_desc_info *desc_info,
2317 const struct rocker_group_tbl_entry *entry = priv;
2318 struct rocker_tlv *cmd_info;
2320 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2322 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2325 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2328 rocker_tlv_nest_end(desc_info, cmd_info);
2333 /***************************************************
2334 * Flow, group, FDB, internal VLAN and neigh tables
2335 ***************************************************/
2337 static int rocker_init_tbls(struct rocker *rocker)
2339 hash_init(rocker->flow_tbl);
2340 spin_lock_init(&rocker->flow_tbl_lock);
2342 hash_init(rocker->group_tbl);
2343 spin_lock_init(&rocker->group_tbl_lock);
2345 hash_init(rocker->fdb_tbl);
2346 spin_lock_init(&rocker->fdb_tbl_lock);
2348 hash_init(rocker->internal_vlan_tbl);
2349 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2351 hash_init(rocker->neigh_tbl);
2352 spin_lock_init(&rocker->neigh_tbl_lock);
2357 static void rocker_free_tbls(struct rocker *rocker)
2359 unsigned long flags;
2360 struct rocker_flow_tbl_entry *flow_entry;
2361 struct rocker_group_tbl_entry *group_entry;
2362 struct rocker_fdb_tbl_entry *fdb_entry;
2363 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2364 struct rocker_neigh_tbl_entry *neigh_entry;
2365 struct hlist_node *tmp;
2368 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2369 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2370 hash_del(&flow_entry->entry);
2371 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2373 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2374 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2375 hash_del(&group_entry->entry);
2376 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2378 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2379 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2380 hash_del(&fdb_entry->entry);
2381 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2383 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2384 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2385 tmp, internal_vlan_entry, entry)
2386 hash_del(&internal_vlan_entry->entry);
2387 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2389 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2390 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2391 hash_del(&neigh_entry->entry);
2392 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2395 static struct rocker_flow_tbl_entry *
2396 rocker_flow_tbl_find(const struct rocker *rocker,
2397 const struct rocker_flow_tbl_entry *match)
2399 struct rocker_flow_tbl_entry *found;
2400 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2402 hash_for_each_possible(rocker->flow_tbl, found,
2403 entry, match->key_crc32) {
2404 if (memcmp(&found->key, &match->key, key_len) == 0)
2411 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2412 enum switchdev_trans trans,
2413 struct rocker_flow_tbl_entry *match)
2415 struct rocker *rocker = rocker_port->rocker;
2416 struct rocker_flow_tbl_entry *found;
2417 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2418 unsigned long flags;
2420 match->key_crc32 = crc32(~0, &match->key, key_len);
2422 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2424 found = rocker_flow_tbl_find(rocker, match);
2427 match->cookie = found->cookie;
2428 if (trans != SWITCHDEV_TRANS_PREPARE)
2429 hash_del(&found->entry);
2430 rocker_port_kfree(trans, found);
2432 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2435 found->cookie = rocker->flow_tbl_next_cookie++;
2436 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2439 if (trans != SWITCHDEV_TRANS_PREPARE)
2440 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2442 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2444 return rocker_cmd_exec(rocker_port, trans, rocker_cmd_flow_tbl_add,
2448 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2449 enum switchdev_trans trans,
2450 struct rocker_flow_tbl_entry *match)
2452 struct rocker *rocker = rocker_port->rocker;
2453 struct rocker_flow_tbl_entry *found;
2454 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2455 unsigned long flags;
2458 match->key_crc32 = crc32(~0, &match->key, key_len);
2460 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2462 found = rocker_flow_tbl_find(rocker, match);
2465 if (trans != SWITCHDEV_TRANS_PREPARE)
2466 hash_del(&found->entry);
2467 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2470 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2472 rocker_port_kfree(trans, match);
2475 err = rocker_cmd_exec(rocker_port, trans,
2476 rocker_cmd_flow_tbl_del,
2478 rocker_port_kfree(trans, found);
2484 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2485 enum switchdev_trans trans, int flags,
2486 struct rocker_flow_tbl_entry *entry)
2488 if (flags & ROCKER_OP_FLAG_REMOVE)
2489 return rocker_flow_tbl_del(rocker_port, trans, entry);
2491 return rocker_flow_tbl_add(rocker_port, trans, entry);
2494 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2495 enum switchdev_trans trans, int flags,
2496 u32 in_pport, u32 in_pport_mask,
2497 enum rocker_of_dpa_table_id goto_tbl)
2499 struct rocker_flow_tbl_entry *entry;
2501 entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
2505 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2506 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2507 entry->key.ig_port.in_pport = in_pport;
2508 entry->key.ig_port.in_pport_mask = in_pport_mask;
2509 entry->key.ig_port.goto_tbl = goto_tbl;
2511 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2514 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2515 enum switchdev_trans trans, int flags,
2516 u32 in_pport, __be16 vlan_id,
2517 __be16 vlan_id_mask,
2518 enum rocker_of_dpa_table_id goto_tbl,
2519 bool untagged, __be16 new_vlan_id)
2521 struct rocker_flow_tbl_entry *entry;
2523 entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
2527 entry->key.priority = ROCKER_PRIORITY_VLAN;
2528 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2529 entry->key.vlan.in_pport = in_pport;
2530 entry->key.vlan.vlan_id = vlan_id;
2531 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2532 entry->key.vlan.goto_tbl = goto_tbl;
2534 entry->key.vlan.untagged = untagged;
2535 entry->key.vlan.new_vlan_id = new_vlan_id;
2537 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2540 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2541 enum switchdev_trans trans,
2542 u32 in_pport, u32 in_pport_mask,
2543 __be16 eth_type, const u8 *eth_dst,
2544 const u8 *eth_dst_mask, __be16 vlan_id,
2545 __be16 vlan_id_mask, bool copy_to_cpu,
2548 struct rocker_flow_tbl_entry *entry;
2550 entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
2554 if (is_multicast_ether_addr(eth_dst)) {
2555 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2556 entry->key.term_mac.goto_tbl =
2557 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2559 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2560 entry->key.term_mac.goto_tbl =
2561 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2564 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2565 entry->key.term_mac.in_pport = in_pport;
2566 entry->key.term_mac.in_pport_mask = in_pport_mask;
2567 entry->key.term_mac.eth_type = eth_type;
2568 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2569 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2570 entry->key.term_mac.vlan_id = vlan_id;
2571 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2572 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2574 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2577 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2578 enum switchdev_trans trans, int flags,
2579 const u8 *eth_dst, const u8 *eth_dst_mask,
2580 __be16 vlan_id, u32 tunnel_id,
2581 enum rocker_of_dpa_table_id goto_tbl,
2582 u32 group_id, bool copy_to_cpu)
2584 struct rocker_flow_tbl_entry *entry;
2586 bool vlan_bridging = !!vlan_id;
2587 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2590 entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
2594 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2597 entry->key.bridge.has_eth_dst = 1;
2598 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2601 entry->key.bridge.has_eth_dst_mask = 1;
2602 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2603 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2607 priority = ROCKER_PRIORITY_UNKNOWN;
2608 if (vlan_bridging && dflt && wild)
2609 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2610 else if (vlan_bridging && dflt && !wild)
2611 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2612 else if (vlan_bridging && !dflt)
2613 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2614 else if (!vlan_bridging && dflt && wild)
2615 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2616 else if (!vlan_bridging && dflt && !wild)
2617 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2618 else if (!vlan_bridging && !dflt)
2619 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2621 entry->key.priority = priority;
2622 entry->key.bridge.vlan_id = vlan_id;
2623 entry->key.bridge.tunnel_id = tunnel_id;
2624 entry->key.bridge.goto_tbl = goto_tbl;
2625 entry->key.bridge.group_id = group_id;
2626 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2628 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2631 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2632 enum switchdev_trans trans,
2633 __be16 eth_type, __be32 dst,
2634 __be32 dst_mask, u32 priority,
2635 enum rocker_of_dpa_table_id goto_tbl,
2636 u32 group_id, int flags)
2638 struct rocker_flow_tbl_entry *entry;
2640 entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
2644 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2645 entry->key.priority = priority;
2646 entry->key.ucast_routing.eth_type = eth_type;
2647 entry->key.ucast_routing.dst4 = dst;
2648 entry->key.ucast_routing.dst4_mask = dst_mask;
2649 entry->key.ucast_routing.goto_tbl = goto_tbl;
2650 entry->key.ucast_routing.group_id = group_id;
2651 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2652 ucast_routing.group_id);
2654 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2657 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2658 enum switchdev_trans trans, int flags,
2659 u32 in_pport, u32 in_pport_mask,
2660 const u8 *eth_src, const u8 *eth_src_mask,
2661 const u8 *eth_dst, const u8 *eth_dst_mask,
2662 __be16 eth_type, __be16 vlan_id,
2663 __be16 vlan_id_mask, u8 ip_proto,
2664 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2668 struct rocker_flow_tbl_entry *entry;
2670 entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
2674 priority = ROCKER_PRIORITY_ACL_NORMAL;
2675 if (eth_dst && eth_dst_mask) {
2676 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2677 priority = ROCKER_PRIORITY_ACL_DFLT;
2678 else if (is_link_local_ether_addr(eth_dst))
2679 priority = ROCKER_PRIORITY_ACL_CTRL;
2682 entry->key.priority = priority;
2683 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2684 entry->key.acl.in_pport = in_pport;
2685 entry->key.acl.in_pport_mask = in_pport_mask;
2688 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2690 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2692 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2694 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2696 entry->key.acl.eth_type = eth_type;
2697 entry->key.acl.vlan_id = vlan_id;
2698 entry->key.acl.vlan_id_mask = vlan_id_mask;
2699 entry->key.acl.ip_proto = ip_proto;
2700 entry->key.acl.ip_proto_mask = ip_proto_mask;
2701 entry->key.acl.ip_tos = ip_tos;
2702 entry->key.acl.ip_tos_mask = ip_tos_mask;
2703 entry->key.acl.group_id = group_id;
2705 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2708 static struct rocker_group_tbl_entry *
2709 rocker_group_tbl_find(const struct rocker *rocker,
2710 const struct rocker_group_tbl_entry *match)
2712 struct rocker_group_tbl_entry *found;
2714 hash_for_each_possible(rocker->group_tbl, found,
2715 entry, match->group_id) {
2716 if (found->group_id == match->group_id)
2723 static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
2724 struct rocker_group_tbl_entry *entry)
2726 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2727 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2728 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2729 rocker_port_kfree(trans, entry->group_ids);
2734 rocker_port_kfree(trans, entry);
2737 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2738 enum switchdev_trans trans,
2739 struct rocker_group_tbl_entry *match)
2741 struct rocker *rocker = rocker_port->rocker;
2742 struct rocker_group_tbl_entry *found;
2743 unsigned long flags;
2745 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2747 found = rocker_group_tbl_find(rocker, match);
2750 if (trans != SWITCHDEV_TRANS_PREPARE)
2751 hash_del(&found->entry);
2752 rocker_group_tbl_entry_free(trans, found);
2754 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2757 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2760 if (trans != SWITCHDEV_TRANS_PREPARE)
2761 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2763 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2765 return rocker_cmd_exec(rocker_port, trans, rocker_cmd_group_tbl_add,
2769 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2770 enum switchdev_trans trans,
2771 struct rocker_group_tbl_entry *match)
2773 struct rocker *rocker = rocker_port->rocker;
2774 struct rocker_group_tbl_entry *found;
2775 unsigned long flags;
2778 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2780 found = rocker_group_tbl_find(rocker, match);
2783 if (trans != SWITCHDEV_TRANS_PREPARE)
2784 hash_del(&found->entry);
2785 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2788 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2790 rocker_group_tbl_entry_free(trans, match);
2793 err = rocker_cmd_exec(rocker_port, trans,
2794 rocker_cmd_group_tbl_del,
2796 rocker_group_tbl_entry_free(trans, found);
2802 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2803 enum switchdev_trans trans, int flags,
2804 struct rocker_group_tbl_entry *entry)
2806 if (flags & ROCKER_OP_FLAG_REMOVE)
2807 return rocker_group_tbl_del(rocker_port, trans, entry);
2809 return rocker_group_tbl_add(rocker_port, trans, entry);
2812 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2813 enum switchdev_trans trans, int flags,
2814 __be16 vlan_id, u32 out_pport,
2817 struct rocker_group_tbl_entry *entry;
2819 entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
2823 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2824 entry->l2_interface.pop_vlan = pop_vlan;
2826 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2829 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2830 enum switchdev_trans trans,
2831 int flags, u8 group_count,
2832 const u32 *group_ids, u32 group_id)
2834 struct rocker_group_tbl_entry *entry;
2836 entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
2840 entry->group_id = group_id;
2841 entry->group_count = group_count;
2843 entry->group_ids = rocker_port_kcalloc(rocker_port, trans, group_count,
2845 if (!entry->group_ids) {
2846 rocker_port_kfree(trans, entry);
2849 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2851 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2854 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2855 enum switchdev_trans trans, int flags,
2856 __be16 vlan_id, u8 group_count,
2857 const u32 *group_ids, u32 group_id)
2859 return rocker_group_l2_fan_out(rocker_port, trans, flags,
2860 group_count, group_ids,
2864 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2865 enum switchdev_trans trans, int flags,
2866 u32 index, const u8 *src_mac, const u8 *dst_mac,
2867 __be16 vlan_id, bool ttl_check, u32 pport)
2869 struct rocker_group_tbl_entry *entry;
2871 entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
2875 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2877 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2879 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2880 entry->l3_unicast.vlan_id = vlan_id;
2881 entry->l3_unicast.ttl_check = ttl_check;
2882 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2884 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2887 static struct rocker_neigh_tbl_entry *
2888 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2890 struct rocker_neigh_tbl_entry *found;
2892 hash_for_each_possible(rocker->neigh_tbl, found,
2893 entry, be32_to_cpu(ip_addr))
2894 if (found->ip_addr == ip_addr)
2900 static void _rocker_neigh_add(struct rocker *rocker,
2901 enum switchdev_trans trans,
2902 struct rocker_neigh_tbl_entry *entry)
2904 entry->index = rocker->neigh_tbl_next_index;
2905 if (trans == SWITCHDEV_TRANS_PREPARE)
2907 rocker->neigh_tbl_next_index++;
2909 hash_add(rocker->neigh_tbl, &entry->entry,
2910 be32_to_cpu(entry->ip_addr));
2913 static void _rocker_neigh_del(enum switchdev_trans trans,
2914 struct rocker_neigh_tbl_entry *entry)
2916 if (trans == SWITCHDEV_TRANS_PREPARE)
2918 if (--entry->ref_count == 0) {
2919 hash_del(&entry->entry);
2920 rocker_port_kfree(trans, entry);
2924 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
2925 enum switchdev_trans trans,
2926 const u8 *eth_dst, bool ttl_check)
2929 ether_addr_copy(entry->eth_dst, eth_dst);
2930 entry->ttl_check = ttl_check;
2931 } else if (trans != SWITCHDEV_TRANS_PREPARE) {
2936 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
2937 enum switchdev_trans trans,
2938 int flags, __be32 ip_addr, const u8 *eth_dst)
2940 struct rocker *rocker = rocker_port->rocker;
2941 struct rocker_neigh_tbl_entry *entry;
2942 struct rocker_neigh_tbl_entry *found;
2943 unsigned long lock_flags;
2944 __be16 eth_type = htons(ETH_P_IP);
2945 enum rocker_of_dpa_table_id goto_tbl =
2946 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2949 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2954 entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
2958 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2960 found = rocker_neigh_tbl_find(rocker, ip_addr);
2962 updating = found && adding;
2963 removing = found && !adding;
2964 adding = !found && adding;
2967 entry->ip_addr = ip_addr;
2968 entry->dev = rocker_port->dev;
2969 ether_addr_copy(entry->eth_dst, eth_dst);
2970 entry->ttl_check = true;
2971 _rocker_neigh_add(rocker, trans, entry);
2972 } else if (removing) {
2973 memcpy(entry, found, sizeof(*entry));
2974 _rocker_neigh_del(trans, found);
2975 } else if (updating) {
2976 _rocker_neigh_update(found, trans, eth_dst, true);
2977 memcpy(entry, found, sizeof(*entry));
2982 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2987 /* For each active neighbor, we have an L3 unicast group and
2988 * a /32 route to the neighbor, which uses the L3 unicast
2989 * group. The L3 unicast group can also be referred to by
2990 * other routes' nexthops.
2993 err = rocker_group_l3_unicast(rocker_port, trans, flags,
2995 rocker_port->dev->dev_addr,
2997 rocker_port->internal_vlan_id,
2999 rocker_port->pport);
3001 netdev_err(rocker_port->dev,
3002 "Error (%d) L3 unicast group index %d\n",
3007 if (adding || removing) {
3008 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
3009 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
3016 netdev_err(rocker_port->dev,
3017 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3018 err, &entry->ip_addr, group_id);
3023 rocker_port_kfree(trans, entry);
3028 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
3029 enum switchdev_trans trans, __be32 ip_addr)
3031 struct net_device *dev = rocker_port->dev;
3032 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
3036 n = neigh_create(&arp_tbl, &ip_addr, dev);
3041 /* If the neigh is already resolved, then go ahead and
3042 * install the entry, otherwise start the ARP process to
3043 * resolve the neigh.
3046 if (n->nud_state & NUD_VALID)
3047 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3050 neigh_event_send(n, NULL);
3056 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3057 enum switchdev_trans trans, int flags,
3058 __be32 ip_addr, u32 *index)
3060 struct rocker *rocker = rocker_port->rocker;
3061 struct rocker_neigh_tbl_entry *entry;
3062 struct rocker_neigh_tbl_entry *found;
3063 unsigned long lock_flags;
3064 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3067 bool resolved = true;
3070 entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
3074 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3076 found = rocker_neigh_tbl_find(rocker, ip_addr);
3078 *index = found->index;
3080 updating = found && adding;
3081 removing = found && !adding;
3082 adding = !found && adding;
3085 entry->ip_addr = ip_addr;
3086 entry->dev = rocker_port->dev;
3087 _rocker_neigh_add(rocker, trans, entry);
3088 *index = entry->index;
3090 } else if (removing) {
3091 _rocker_neigh_del(trans, found);
3092 } else if (updating) {
3093 _rocker_neigh_update(found, trans, NULL, false);
3094 resolved = !is_zero_ether_addr(found->eth_dst);
3099 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3102 rocker_port_kfree(trans, entry);
3107 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3110 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
3115 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3116 enum switchdev_trans trans,
3117 int flags, __be16 vlan_id)
3119 struct rocker_port *p;
3120 const struct rocker *rocker = rocker_port->rocker;
3121 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3127 group_ids = rocker_port_kcalloc(rocker_port, trans, rocker->port_count,
3132 /* Adjust the flood group for this VLAN. The flood group
3133 * references an L2 interface group for each port in this
3137 for (i = 0; i < rocker->port_count; i++) {
3138 p = rocker->ports[i];
3141 if (!rocker_port_is_bridged(p))
3143 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3144 group_ids[group_count++] =
3145 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3149 /* If there are no bridged ports in this VLAN, we're done */
3150 if (group_count == 0)
3151 goto no_ports_in_vlan;
3153 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3154 group_count, group_ids, group_id);
3156 netdev_err(rocker_port->dev,
3157 "Error (%d) port VLAN l2 flood group\n", err);
3160 rocker_port_kfree(trans, group_ids);
3164 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3165 enum switchdev_trans trans, int flags,
3166 __be16 vlan_id, bool pop_vlan)
3168 const struct rocker *rocker = rocker_port->rocker;
3169 struct rocker_port *p;
3170 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3176 /* An L2 interface group for this port in this VLAN, but
3177 * only when port STP state is LEARNING|FORWARDING.
3180 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3181 rocker_port->stp_state == BR_STATE_FORWARDING) {
3182 out_pport = rocker_port->pport;
3183 err = rocker_group_l2_interface(rocker_port, trans, flags,
3184 vlan_id, out_pport, pop_vlan);
3186 netdev_err(rocker_port->dev,
3187 "Error (%d) port VLAN l2 group for pport %d\n",
3193 /* An L2 interface group for this VLAN to CPU port.
3194 * Add when first port joins this VLAN and destroy when
3195 * last port leaves this VLAN.
3198 for (i = 0; i < rocker->port_count; i++) {
3199 p = rocker->ports[i];
3200 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3204 if ((!adding || ref != 1) && (adding || ref != 0))
3208 err = rocker_group_l2_interface(rocker_port, trans, flags,
3209 vlan_id, out_pport, pop_vlan);
3211 netdev_err(rocker_port->dev,
3212 "Error (%d) port VLAN l2 group for CPU port\n", err);
3219 static struct rocker_ctrl {
3221 const u8 *eth_dst_mask;
3227 } rocker_ctrls[] = {
3228 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3229 /* pass link local multicast pkts up to CPU for filtering */
3231 .eth_dst_mask = ll_mask,
3234 [ROCKER_CTRL_LOCAL_ARP] = {
3235 /* pass local ARP pkts up to CPU */
3236 .eth_dst = zero_mac,
3237 .eth_dst_mask = zero_mac,
3238 .eth_type = htons(ETH_P_ARP),
3241 [ROCKER_CTRL_IPV4_MCAST] = {
3242 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3243 .eth_dst = ipv4_mcast,
3244 .eth_dst_mask = ipv4_mask,
3245 .eth_type = htons(ETH_P_IP),
3247 .copy_to_cpu = true,
3249 [ROCKER_CTRL_IPV6_MCAST] = {
3250 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3251 .eth_dst = ipv6_mcast,
3252 .eth_dst_mask = ipv6_mask,
3253 .eth_type = htons(ETH_P_IPV6),
3255 .copy_to_cpu = true,
3257 [ROCKER_CTRL_DFLT_BRIDGING] = {
3258 /* flood any pkts on vlan */
3260 .copy_to_cpu = true,
3264 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3265 enum switchdev_trans trans, int flags,
3266 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3268 u32 in_pport = rocker_port->pport;
3269 u32 in_pport_mask = 0xffffffff;
3271 const u8 *eth_src = NULL;
3272 const u8 *eth_src_mask = NULL;
3273 __be16 vlan_id_mask = htons(0xffff);
3275 u8 ip_proto_mask = 0;
3278 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3281 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3282 in_pport, in_pport_mask,
3283 eth_src, eth_src_mask,
3284 ctrl->eth_dst, ctrl->eth_dst_mask,
3286 vlan_id, vlan_id_mask,
3287 ip_proto, ip_proto_mask,
3288 ip_tos, ip_tos_mask,
3292 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3297 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3298 enum switchdev_trans trans, int flags,
3299 const struct rocker_ctrl *ctrl,
3302 enum rocker_of_dpa_table_id goto_tbl =
3303 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3304 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3308 if (!rocker_port_is_bridged(rocker_port))
3311 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3312 ctrl->eth_dst, ctrl->eth_dst_mask,
3314 goto_tbl, group_id, ctrl->copy_to_cpu);
3317 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3322 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3323 enum switchdev_trans trans, int flags,
3324 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3326 u32 in_pport_mask = 0xffffffff;
3327 __be16 vlan_id_mask = htons(0xffff);
3330 if (ntohs(vlan_id) == 0)
3331 vlan_id = rocker_port->internal_vlan_id;
3333 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3334 rocker_port->pport, in_pport_mask,
3335 ctrl->eth_type, ctrl->eth_dst,
3336 ctrl->eth_dst_mask, vlan_id,
3337 vlan_id_mask, ctrl->copy_to_cpu,
3341 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3346 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3347 enum switchdev_trans trans, int flags,
3348 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3351 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3354 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3358 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3364 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3365 enum switchdev_trans trans, int flags,
3371 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3372 if (rocker_port->ctrls[i]) {
3373 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3374 &rocker_ctrls[i], vlan_id);
3383 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3384 enum switchdev_trans trans, int flags,
3385 const struct rocker_ctrl *ctrl)
3390 for (vid = 1; vid < VLAN_N_VID; vid++) {
3391 if (!test_bit(vid, rocker_port->vlan_bitmap))
3393 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3402 static int rocker_port_vlan(struct rocker_port *rocker_port,
3403 enum switchdev_trans trans, int flags, u16 vid)
3405 enum rocker_of_dpa_table_id goto_tbl =
3406 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3407 u32 in_pport = rocker_port->pport;
3408 __be16 vlan_id = htons(vid);
3409 __be16 vlan_id_mask = htons(0xffff);
3410 __be16 internal_vlan_id;
3412 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3415 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3417 if (adding && test_bit(ntohs(internal_vlan_id),
3418 rocker_port->vlan_bitmap))
3419 return 0; /* already added */
3420 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3421 rocker_port->vlan_bitmap))
3422 return 0; /* already removed */
3424 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3427 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3430 netdev_err(rocker_port->dev,
3431 "Error (%d) port ctrl vlan add\n", err);
3436 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3437 internal_vlan_id, untagged);
3439 netdev_err(rocker_port->dev,
3440 "Error (%d) port VLAN l2 groups\n", err);
3444 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3447 netdev_err(rocker_port->dev,
3448 "Error (%d) port VLAN l2 flood group\n", err);
3452 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3453 in_pport, vlan_id, vlan_id_mask,
3454 goto_tbl, untagged, internal_vlan_id);
3456 netdev_err(rocker_port->dev,
3457 "Error (%d) port VLAN table\n", err);
3460 if (trans == SWITCHDEV_TRANS_PREPARE)
3461 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3466 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3467 enum switchdev_trans trans, int flags)
3469 enum rocker_of_dpa_table_id goto_tbl;
3474 /* Normal Ethernet Frames. Matches pkts from any local physical
3475 * ports. Goto VLAN tbl.
3479 in_pport_mask = 0xffff0000;
3480 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3482 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3483 in_pport, in_pport_mask,
3486 netdev_err(rocker_port->dev,
3487 "Error (%d) ingress port table entry\n", err);
3492 struct rocker_fdb_learn_work {
3493 struct work_struct work;
3494 struct rocker_port *rocker_port;
3495 enum switchdev_trans trans;
3501 static void rocker_port_fdb_learn_work(struct work_struct *work)
3503 const struct rocker_fdb_learn_work *lw =
3504 container_of(work, struct rocker_fdb_learn_work, work);
3505 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3506 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3507 struct switchdev_notifier_fdb_info info;
3509 info.addr = lw->addr;
3512 if (learned && removing)
3513 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3514 lw->rocker_port->dev, &info.info);
3515 else if (learned && !removing)
3516 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3517 lw->rocker_port->dev, &info.info);
3519 rocker_port_kfree(lw->trans, work);
3522 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3523 enum switchdev_trans trans, int flags,
3524 const u8 *addr, __be16 vlan_id)
3526 struct rocker_fdb_learn_work *lw;
3527 enum rocker_of_dpa_table_id goto_tbl =
3528 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3529 u32 out_pport = rocker_port->pport;
3531 u32 group_id = ROCKER_GROUP_NONE;
3532 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3533 bool copy_to_cpu = false;
3536 if (rocker_port_is_bridged(rocker_port))
3537 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3539 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3540 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3541 NULL, vlan_id, tunnel_id, goto_tbl,
3542 group_id, copy_to_cpu);
3550 if (!rocker_port_is_bridged(rocker_port))
3553 lw = rocker_port_kzalloc(rocker_port, trans, sizeof(*lw));
3557 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3559 lw->rocker_port = rocker_port;
3562 ether_addr_copy(lw->addr, addr);
3563 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3565 if (trans == SWITCHDEV_TRANS_PREPARE)
3566 rocker_port_kfree(trans, lw);
3568 schedule_work(&lw->work);
3573 static struct rocker_fdb_tbl_entry *
3574 rocker_fdb_tbl_find(const struct rocker *rocker,
3575 const struct rocker_fdb_tbl_entry *match)
3577 struct rocker_fdb_tbl_entry *found;
3579 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3580 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3586 static int rocker_port_fdb(struct rocker_port *rocker_port,
3587 enum switchdev_trans trans,
3588 const unsigned char *addr,
3589 __be16 vlan_id, int flags)
3591 struct rocker *rocker = rocker_port->rocker;
3592 struct rocker_fdb_tbl_entry *fdb;
3593 struct rocker_fdb_tbl_entry *found;
3594 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3595 unsigned long lock_flags;
3597 fdb = rocker_port_kzalloc(rocker_port, trans, sizeof(*fdb));
3601 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3602 fdb->key.pport = rocker_port->pport;
3603 ether_addr_copy(fdb->key.addr, addr);
3604 fdb->key.vlan_id = vlan_id;
3605 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3607 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3609 found = rocker_fdb_tbl_find(rocker, fdb);
3611 if (removing && found) {
3612 rocker_port_kfree(trans, fdb);
3613 if (trans != SWITCHDEV_TRANS_PREPARE)
3614 hash_del(&found->entry);
3615 } else if (!removing && !found) {
3616 if (trans != SWITCHDEV_TRANS_PREPARE)
3617 hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
3620 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3622 /* Check if adding and already exists, or removing and can't find */
3623 if (!found != !removing) {
3624 rocker_port_kfree(trans, fdb);
3625 if (!found && removing)
3627 /* Refreshing existing to update aging timers */
3628 flags |= ROCKER_OP_FLAG_REFRESH;
3631 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3634 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3635 enum switchdev_trans trans)
3637 struct rocker *rocker = rocker_port->rocker;
3638 struct rocker_fdb_tbl_entry *found;
3639 unsigned long lock_flags;
3640 int flags = ROCKER_OP_FLAG_REMOVE;
3641 struct hlist_node *tmp;
3645 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3646 rocker_port->stp_state == BR_STATE_FORWARDING)
3649 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3651 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3652 if (found->key.pport != rocker_port->pport)
3654 if (!found->learned)
3656 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3658 found->key.vlan_id);
3661 if (trans != SWITCHDEV_TRANS_PREPARE)
3662 hash_del(&found->entry);
3666 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3671 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3672 enum switchdev_trans trans, int flags,
3675 u32 in_pport_mask = 0xffffffff;
3677 const u8 *dst_mac_mask = ff_mac;
3678 __be16 vlan_id_mask = htons(0xffff);
3679 bool copy_to_cpu = false;
3682 if (ntohs(vlan_id) == 0)
3683 vlan_id = rocker_port->internal_vlan_id;
3685 eth_type = htons(ETH_P_IP);
3686 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3687 rocker_port->pport, in_pport_mask,
3688 eth_type, rocker_port->dev->dev_addr,
3689 dst_mac_mask, vlan_id, vlan_id_mask,
3690 copy_to_cpu, flags);
3694 eth_type = htons(ETH_P_IPV6);
3695 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3696 rocker_port->pport, in_pport_mask,
3697 eth_type, rocker_port->dev->dev_addr,
3698 dst_mac_mask, vlan_id, vlan_id_mask,
3699 copy_to_cpu, flags);
3704 static int rocker_port_fwding(struct rocker_port *rocker_port,
3705 enum switchdev_trans trans)
3714 /* Port will be forwarding-enabled if its STP state is LEARNING
3715 * or FORWARDING. Traffic from CPU can still egress, regardless of
3716 * port STP state. Use L2 interface group on port VLANs as a way
3717 * to toggle port forwarding: if forwarding is disabled, L2
3718 * interface group will not exist.
3721 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3722 rocker_port->stp_state != BR_STATE_FORWARDING)
3723 flags |= ROCKER_OP_FLAG_REMOVE;
3725 out_pport = rocker_port->pport;
3726 for (vid = 1; vid < VLAN_N_VID; vid++) {
3727 if (!test_bit(vid, rocker_port->vlan_bitmap))
3729 vlan_id = htons(vid);
3730 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3731 err = rocker_group_l2_interface(rocker_port, trans, flags,
3732 vlan_id, out_pport, pop_vlan);
3734 netdev_err(rocker_port->dev,
3735 "Error (%d) port VLAN l2 group for pport %d\n",
3744 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3745 enum switchdev_trans trans, u8 state)
3747 bool want[ROCKER_CTRL_MAX] = { 0, };
3748 bool prev_ctrls[ROCKER_CTRL_MAX];
3754 if (trans == SWITCHDEV_TRANS_PREPARE) {
3755 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3756 prev_state = rocker_port->stp_state;
3759 if (rocker_port->stp_state == state)
3762 rocker_port->stp_state = state;
3765 case BR_STATE_DISABLED:
3766 /* port is completely disabled */
3768 case BR_STATE_LISTENING:
3769 case BR_STATE_BLOCKING:
3770 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3772 case BR_STATE_LEARNING:
3773 case BR_STATE_FORWARDING:
3774 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3775 want[ROCKER_CTRL_IPV4_MCAST] = true;
3776 want[ROCKER_CTRL_IPV6_MCAST] = true;
3777 if (rocker_port_is_bridged(rocker_port))
3778 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3780 want[ROCKER_CTRL_LOCAL_ARP] = true;
3784 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3785 if (want[i] != rocker_port->ctrls[i]) {
3786 flags = (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3787 err = rocker_port_ctrl(rocker_port, trans, flags,
3791 rocker_port->ctrls[i] = want[i];
3795 err = rocker_port_fdb_flush(rocker_port, trans);
3799 err = rocker_port_fwding(rocker_port, trans);
3802 if (trans == SWITCHDEV_TRANS_PREPARE) {
3803 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3804 rocker_port->stp_state = prev_state;
3810 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3811 enum switchdev_trans trans)
3813 if (rocker_port_is_bridged(rocker_port))
3814 /* bridge STP will enable port */
3817 /* port is not bridged, so simulate going to FORWARDING state */
3818 return rocker_port_stp_update(rocker_port, trans, BR_STATE_FORWARDING);
3821 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3822 enum switchdev_trans trans)
3824 if (rocker_port_is_bridged(rocker_port))
3825 /* bridge STP will disable port */
3828 /* port is not bridged, so simulate going to DISABLED state */
3829 return rocker_port_stp_update(rocker_port, trans, BR_STATE_DISABLED);
3832 static struct rocker_internal_vlan_tbl_entry *
3833 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3835 struct rocker_internal_vlan_tbl_entry *found;
3837 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3839 if (found->ifindex == ifindex)
3846 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3849 struct rocker *rocker = rocker_port->rocker;
3850 struct rocker_internal_vlan_tbl_entry *entry;
3851 struct rocker_internal_vlan_tbl_entry *found;
3852 unsigned long lock_flags;
3855 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3859 entry->ifindex = ifindex;
3861 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3863 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3870 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3872 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3873 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3875 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3879 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3883 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3885 return found->vlan_id;
3889 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3892 struct rocker *rocker = rocker_port->rocker;
3893 struct rocker_internal_vlan_tbl_entry *found;
3894 unsigned long lock_flags;
3897 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3899 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3901 netdev_err(rocker_port->dev,
3902 "ifindex (%d) not found in internal VLAN tbl\n",
3907 if (--found->ref_count <= 0) {
3908 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3909 clear_bit(bit, rocker->internal_vlan_bitmap);
3910 hash_del(&found->entry);
3915 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3918 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
3919 enum switchdev_trans trans, __be32 dst,
3920 int dst_len, const struct fib_info *fi,
3921 u32 tb_id, int flags)
3923 const struct fib_nh *nh;
3924 __be16 eth_type = htons(ETH_P_IP);
3925 __be32 dst_mask = inet_make_mask(dst_len);
3926 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3927 u32 priority = fi->fib_priority;
3928 enum rocker_of_dpa_table_id goto_tbl =
3929 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3936 /* XXX support ECMP */
3939 nh_on_port = (fi->fib_dev == rocker_port->dev);
3940 has_gw = !!nh->nh_gw;
3942 if (has_gw && nh_on_port) {
3943 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
3948 group_id = ROCKER_GROUP_L3_UNICAST(index);
3950 /* Send to CPU for processing */
3951 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3954 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
3955 dst_mask, priority, goto_tbl,
3958 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
3968 static int rocker_port_open(struct net_device *dev)
3970 struct rocker_port *rocker_port = netdev_priv(dev);
3973 err = rocker_port_dma_rings_init(rocker_port);
3977 err = request_irq(rocker_msix_tx_vector(rocker_port),
3978 rocker_tx_irq_handler, 0,
3979 rocker_driver_name, rocker_port);
3981 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3982 goto err_request_tx_irq;
3985 err = request_irq(rocker_msix_rx_vector(rocker_port),
3986 rocker_rx_irq_handler, 0,
3987 rocker_driver_name, rocker_port);
3989 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3990 goto err_request_rx_irq;
3993 err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE);
3995 goto err_fwd_enable;
3997 napi_enable(&rocker_port->napi_tx);
3998 napi_enable(&rocker_port->napi_rx);
3999 rocker_port_set_enable(rocker_port, true);
4000 netif_start_queue(dev);
4004 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4006 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4008 rocker_port_dma_rings_fini(rocker_port);
4012 static int rocker_port_stop(struct net_device *dev)
4014 struct rocker_port *rocker_port = netdev_priv(dev);
4016 netif_stop_queue(dev);
4017 rocker_port_set_enable(rocker_port, false);
4018 napi_disable(&rocker_port->napi_rx);
4019 napi_disable(&rocker_port->napi_tx);
4020 rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE);
4021 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4022 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4023 rocker_port_dma_rings_fini(rocker_port);
4028 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4029 const struct rocker_desc_info *desc_info)
4031 const struct rocker *rocker = rocker_port->rocker;
4032 struct pci_dev *pdev = rocker->pdev;
4033 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4034 struct rocker_tlv *attr;
4037 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4038 if (!attrs[ROCKER_TLV_TX_FRAGS])
4040 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
4041 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4042 dma_addr_t dma_handle;
4045 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4047 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4049 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4050 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4052 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4053 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4054 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4058 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4059 struct rocker_desc_info *desc_info,
4060 char *buf, size_t buf_len)
4062 const struct rocker *rocker = rocker_port->rocker;
4063 struct pci_dev *pdev = rocker->pdev;
4064 dma_addr_t dma_handle;
4065 struct rocker_tlv *frag;
4067 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4068 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4069 if (net_ratelimit())
4070 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4073 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4076 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4079 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4082 rocker_tlv_nest_end(desc_info, frag);
4086 rocker_tlv_nest_cancel(desc_info, frag);
4088 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4092 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4094 struct rocker_port *rocker_port = netdev_priv(dev);
4095 struct rocker *rocker = rocker_port->rocker;
4096 struct rocker_desc_info *desc_info;
4097 struct rocker_tlv *frags;
4101 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4102 if (unlikely(!desc_info)) {
4103 if (net_ratelimit())
4104 netdev_err(dev, "tx ring full when queue awake\n");
4105 return NETDEV_TX_BUSY;
4108 rocker_desc_cookie_ptr_set(desc_info, skb);
4110 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4113 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4114 skb->data, skb_headlen(skb));
4117 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
4120 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4121 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4123 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4124 skb_frag_address(frag),
4125 skb_frag_size(frag));
4129 rocker_tlv_nest_end(desc_info, frags);
4131 rocker_desc_gen_clear(desc_info);
4132 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4134 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4136 netif_stop_queue(dev);
4138 return NETDEV_TX_OK;
4141 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4143 rocker_tlv_nest_cancel(desc_info, frags);
4146 dev->stats.tx_dropped++;
4148 return NETDEV_TX_OK;
4151 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4153 struct sockaddr *addr = p;
4154 struct rocker_port *rocker_port = netdev_priv(dev);
4157 if (!is_valid_ether_addr(addr->sa_data))
4158 return -EADDRNOTAVAIL;
4160 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4163 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4167 static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
4168 __be16 proto, u16 vid)
4170 struct rocker_port *rocker_port = netdev_priv(dev);
4173 err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, 0, vid);
4177 return rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
4181 static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
4182 __be16 proto, u16 vid)
4184 struct rocker_port *rocker_port = netdev_priv(dev);
4187 err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
4188 ROCKER_OP_FLAG_REMOVE, htons(vid));
4192 return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
4193 ROCKER_OP_FLAG_REMOVE, vid);
4196 static int rocker_port_get_phys_port_name(struct net_device *dev,
4197 char *buf, size_t len)
4199 struct rocker_port *rocker_port = netdev_priv(dev);
4200 struct port_name name = { .buf = buf, .len = len };
4203 err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
4204 rocker_cmd_get_port_settings_prep, NULL,
4205 rocker_cmd_get_port_settings_phys_name_proc,
4208 return err ? -EOPNOTSUPP : 0;
4211 static const struct net_device_ops rocker_port_netdev_ops = {
4212 .ndo_open = rocker_port_open,
4213 .ndo_stop = rocker_port_stop,
4214 .ndo_start_xmit = rocker_port_xmit,
4215 .ndo_set_mac_address = rocker_port_set_mac_address,
4216 .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
4217 .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
4218 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
4219 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
4220 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
4221 .ndo_fdb_add = switchdev_port_fdb_add,
4222 .ndo_fdb_del = switchdev_port_fdb_del,
4223 .ndo_fdb_dump = switchdev_port_fdb_dump,
4224 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
4227 /********************
4229 ********************/
4231 static int rocker_port_attr_get(struct net_device *dev,
4232 struct switchdev_attr *attr)
4234 const struct rocker_port *rocker_port = netdev_priv(dev);
4235 const struct rocker *rocker = rocker_port->rocker;
4238 case SWITCHDEV_ATTR_PORT_PARENT_ID:
4239 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4240 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4242 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
4243 attr->u.brport_flags = rocker_port->brport_flags;
4252 static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
4254 struct list_head *mem, *tmp;
4256 list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
4262 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4263 enum switchdev_trans trans,
4264 unsigned long brport_flags)
4266 unsigned long orig_flags;
4269 orig_flags = rocker_port->brport_flags;
4270 rocker_port->brport_flags = brport_flags;
4271 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4272 err = rocker_port_set_learning(rocker_port, trans);
4274 if (trans == SWITCHDEV_TRANS_PREPARE)
4275 rocker_port->brport_flags = orig_flags;
4280 static int rocker_port_attr_set(struct net_device *dev,
4281 struct switchdev_attr *attr)
4283 struct rocker_port *rocker_port = netdev_priv(dev);
4286 switch (attr->trans) {
4287 case SWITCHDEV_TRANS_PREPARE:
4288 BUG_ON(!list_empty(&rocker_port->trans_mem));
4290 case SWITCHDEV_TRANS_ABORT:
4291 rocker_port_trans_abort(rocker_port);
4298 case SWITCHDEV_ATTR_PORT_STP_STATE:
4299 err = rocker_port_stp_update(rocker_port, attr->trans,
4302 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
4303 err = rocker_port_brport_flags_set(rocker_port, attr->trans,
4304 attr->u.brport_flags);
4314 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4315 enum switchdev_trans trans, u16 vid, u16 flags)
4319 /* XXX deal with flags for PVID and untagged */
4321 err = rocker_port_vlan(rocker_port, trans, 0, vid);
4325 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4327 rocker_port_vlan(rocker_port, trans,
4328 ROCKER_OP_FLAG_REMOVE, vid);
4333 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4334 enum switchdev_trans trans,
4335 const struct switchdev_obj_vlan *vlan)
4340 for (vid = vlan->vid_start; vid <= vlan->vid_end; vid++) {
4341 err = rocker_port_vlan_add(rocker_port, trans,
4350 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4351 enum switchdev_trans trans,
4352 const struct switchdev_obj_fdb *fdb)
4354 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4357 if (!rocker_port_is_bridged(rocker_port))
4360 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4363 static int rocker_port_obj_add(struct net_device *dev,
4364 struct switchdev_obj *obj)
4366 struct rocker_port *rocker_port = netdev_priv(dev);
4367 const struct switchdev_obj_ipv4_fib *fib4;
4370 switch (obj->trans) {
4371 case SWITCHDEV_TRANS_PREPARE:
4372 BUG_ON(!list_empty(&rocker_port->trans_mem));
4374 case SWITCHDEV_TRANS_ABORT:
4375 rocker_port_trans_abort(rocker_port);
4382 case SWITCHDEV_OBJ_PORT_VLAN:
4383 err = rocker_port_vlans_add(rocker_port, obj->trans,
4386 case SWITCHDEV_OBJ_IPV4_FIB:
4387 fib4 = &obj->u.ipv4_fib;
4388 err = rocker_port_fib_ipv4(rocker_port, obj->trans,
4389 htonl(fib4->dst), fib4->dst_len,
4390 fib4->fi, fib4->tb_id, 0);
4392 case SWITCHDEV_OBJ_PORT_FDB:
4393 err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
4403 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4408 err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
4409 ROCKER_OP_FLAG_REMOVE, htons(vid));
4413 return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
4414 ROCKER_OP_FLAG_REMOVE, vid);
4417 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4418 const struct switchdev_obj_vlan *vlan)
4423 for (vid = vlan->vid_start; vid <= vlan->vid_end; vid++) {
4424 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4432 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4433 enum switchdev_trans trans,
4434 const struct switchdev_obj_fdb *fdb)
4436 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4437 int flags = ROCKER_OP_FLAG_REMOVE;
4439 if (!rocker_port_is_bridged(rocker_port))
4442 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4445 static int rocker_port_obj_del(struct net_device *dev,
4446 struct switchdev_obj *obj)
4448 struct rocker_port *rocker_port = netdev_priv(dev);
4449 const struct switchdev_obj_ipv4_fib *fib4;
4453 case SWITCHDEV_OBJ_PORT_VLAN:
4454 err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
4456 case SWITCHDEV_OBJ_IPV4_FIB:
4457 fib4 = &obj->u.ipv4_fib;
4458 err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
4459 htonl(fib4->dst), fib4->dst_len,
4460 fib4->fi, fib4->tb_id,
4461 ROCKER_OP_FLAG_REMOVE);
4463 case SWITCHDEV_OBJ_PORT_FDB:
4464 err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
4474 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4475 struct switchdev_obj *obj)
4477 struct rocker *rocker = rocker_port->rocker;
4478 struct switchdev_obj_fdb *fdb = &obj->u.fdb;
4479 struct rocker_fdb_tbl_entry *found;
4480 struct hlist_node *tmp;
4481 unsigned long lock_flags;
4485 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4486 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4487 if (found->key.pport != rocker_port->pport)
4489 fdb->addr = found->key.addr;
4490 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4491 found->key.vlan_id);
4492 err = obj->cb(rocker_port->dev, obj);
4496 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4501 static int rocker_port_obj_dump(struct net_device *dev,
4502 struct switchdev_obj *obj)
4504 const struct rocker_port *rocker_port = netdev_priv(dev);
4508 case SWITCHDEV_OBJ_PORT_FDB:
4509 err = rocker_port_fdb_dump(rocker_port, obj);
4519 static const struct switchdev_ops rocker_port_switchdev_ops = {
4520 .switchdev_port_attr_get = rocker_port_attr_get,
4521 .switchdev_port_attr_set = rocker_port_attr_set,
4522 .switchdev_port_obj_add = rocker_port_obj_add,
4523 .switchdev_port_obj_del = rocker_port_obj_del,
4524 .switchdev_port_obj_dump = rocker_port_obj_dump,
4527 /********************
4529 ********************/
4531 static int rocker_port_get_settings(struct net_device *dev,
4532 struct ethtool_cmd *ecmd)
4534 struct rocker_port *rocker_port = netdev_priv(dev);
4536 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4539 static int rocker_port_set_settings(struct net_device *dev,
4540 struct ethtool_cmd *ecmd)
4542 struct rocker_port *rocker_port = netdev_priv(dev);
4544 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4547 static void rocker_port_get_drvinfo(struct net_device *dev,
4548 struct ethtool_drvinfo *drvinfo)
4550 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4551 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4554 static struct rocker_port_stats {
4555 char str[ETH_GSTRING_LEN];
4557 } rocker_port_stats[] = {
4558 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4559 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4560 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4561 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4563 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4564 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4565 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4566 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4569 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4571 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4577 switch (stringset) {
4579 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4580 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4581 p += ETH_GSTRING_LEN;
4588 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4589 struct rocker_desc_info *desc_info,
4592 struct rocker_tlv *cmd_stats;
4594 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4595 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4598 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4602 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4603 rocker_port->pport))
4606 rocker_tlv_nest_end(desc_info, cmd_stats);
4612 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4613 const struct rocker_desc_info *desc_info,
4616 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4617 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4618 const struct rocker_tlv *pattr;
4623 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4625 if (!attrs[ROCKER_TLV_CMD_INFO])
4628 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4629 attrs[ROCKER_TLV_CMD_INFO]);
4631 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4634 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4635 if (pport != rocker_port->pport)
4638 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4639 pattr = stats_attrs[rocker_port_stats[i].type];
4643 data[i] = rocker_tlv_get_u64(pattr);
4649 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4652 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
4653 rocker_cmd_get_port_stats_prep, NULL,
4654 rocker_cmd_get_port_stats_ethtool_proc,
4658 static void rocker_port_get_stats(struct net_device *dev,
4659 struct ethtool_stats *stats, u64 *data)
4661 struct rocker_port *rocker_port = netdev_priv(dev);
4663 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4666 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4671 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4675 return ROCKER_PORT_STATS_LEN;
4681 static const struct ethtool_ops rocker_port_ethtool_ops = {
4682 .get_settings = rocker_port_get_settings,
4683 .set_settings = rocker_port_set_settings,
4684 .get_drvinfo = rocker_port_get_drvinfo,
4685 .get_link = ethtool_op_get_link,
4686 .get_strings = rocker_port_get_strings,
4687 .get_ethtool_stats = rocker_port_get_stats,
4688 .get_sset_count = rocker_port_get_sset_count,
4695 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4697 return container_of(napi, struct rocker_port, napi_tx);
4700 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4702 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4703 const struct rocker *rocker = rocker_port->rocker;
4704 const struct rocker_desc_info *desc_info;
4708 /* Cleanup tx descriptors */
4709 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4710 struct sk_buff *skb;
4712 err = rocker_desc_err(desc_info);
4713 if (err && net_ratelimit())
4714 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4716 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4718 skb = rocker_desc_cookie_ptr_get(desc_info);
4720 rocker_port->dev->stats.tx_packets++;
4721 rocker_port->dev->stats.tx_bytes += skb->len;
4723 rocker_port->dev->stats.tx_errors++;
4726 dev_kfree_skb_any(skb);
4730 if (credits && netif_queue_stopped(rocker_port->dev))
4731 netif_wake_queue(rocker_port->dev);
4733 napi_complete(napi);
4734 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4739 static int rocker_port_rx_proc(const struct rocker *rocker,
4740 const struct rocker_port *rocker_port,
4741 struct rocker_desc_info *desc_info)
4743 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4744 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4750 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4751 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4754 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4756 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4757 skb_put(skb, rx_len);
4758 skb->protocol = eth_type_trans(skb, rocker_port->dev);
4760 rocker_port->dev->stats.rx_packets++;
4761 rocker_port->dev->stats.rx_bytes += skb->len;
4763 netif_receive_skb(skb);
4765 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
4768 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4770 return container_of(napi, struct rocker_port, napi_rx);
4773 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4775 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4776 const struct rocker *rocker = rocker_port->rocker;
4777 struct rocker_desc_info *desc_info;
4781 /* Process rx descriptors */
4782 while (credits < budget &&
4783 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4784 err = rocker_desc_err(desc_info);
4786 if (net_ratelimit())
4787 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4790 err = rocker_port_rx_proc(rocker, rocker_port,
4792 if (err && net_ratelimit())
4793 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4797 rocker_port->dev->stats.rx_errors++;
4799 rocker_desc_gen_clear(desc_info);
4800 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4804 if (credits < budget)
4805 napi_complete(napi);
4807 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4816 static void rocker_carrier_init(const struct rocker_port *rocker_port)
4818 const struct rocker *rocker = rocker_port->rocker;
4819 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4822 link_up = link_status & (1 << rocker_port->pport);
4824 netif_carrier_on(rocker_port->dev);
4826 netif_carrier_off(rocker_port->dev);
4829 static void rocker_remove_ports(const struct rocker *rocker)
4831 struct rocker_port *rocker_port;
4834 for (i = 0; i < rocker->port_count; i++) {
4835 rocker_port = rocker->ports[i];
4836 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4837 ROCKER_OP_FLAG_REMOVE);
4838 unregister_netdev(rocker_port->dev);
4840 kfree(rocker->ports);
4843 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
4845 const struct rocker *rocker = rocker_port->rocker;
4846 const struct pci_dev *pdev = rocker->pdev;
4849 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4850 rocker_port->dev->dev_addr);
4852 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4853 eth_hw_addr_random(rocker_port->dev);
4857 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4859 const struct pci_dev *pdev = rocker->pdev;
4860 struct rocker_port *rocker_port;
4861 struct net_device *dev;
4862 u16 untagged_vid = 0;
4865 dev = alloc_etherdev(sizeof(struct rocker_port));
4868 rocker_port = netdev_priv(dev);
4869 rocker_port->dev = dev;
4870 rocker_port->rocker = rocker;
4871 rocker_port->port_number = port_number;
4872 rocker_port->pport = port_number + 1;
4873 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
4874 INIT_LIST_HEAD(&rocker_port->trans_mem);
4876 rocker_port_dev_addr_init(rocker_port);
4877 dev->netdev_ops = &rocker_port_netdev_ops;
4878 dev->ethtool_ops = &rocker_port_ethtool_ops;
4879 dev->switchdev_ops = &rocker_port_switchdev_ops;
4880 netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4882 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4884 rocker_carrier_init(rocker_port);
4886 dev->features |= NETIF_F_NETNS_LOCAL |
4887 NETIF_F_HW_VLAN_CTAG_FILTER;
4889 err = register_netdev(dev);
4891 dev_err(&pdev->dev, "register_netdev failed\n");
4892 goto err_register_netdev;
4894 rocker->ports[port_number] = rocker_port;
4896 rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
4898 err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
4900 dev_err(&pdev->dev, "install ig port table failed\n");
4901 goto err_port_ig_tbl;
4904 rocker_port->internal_vlan_id =
4905 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
4907 err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
4910 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
4911 goto err_untagged_vlan;
4917 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4918 ROCKER_OP_FLAG_REMOVE);
4920 unregister_netdev(dev);
4921 err_register_netdev:
4926 static int rocker_probe_ports(struct rocker *rocker)
4932 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4933 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
4936 for (i = 0; i < rocker->port_count; i++) {
4937 err = rocker_probe_port(rocker, i);
4944 rocker_remove_ports(rocker);
4948 static int rocker_msix_init(struct rocker *rocker)
4950 struct pci_dev *pdev = rocker->pdev;
4955 msix_entries = pci_msix_vec_count(pdev);
4956 if (msix_entries < 0)
4957 return msix_entries;
4959 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
4962 rocker->msix_entries = kmalloc_array(msix_entries,
4963 sizeof(struct msix_entry),
4965 if (!rocker->msix_entries)
4968 for (i = 0; i < msix_entries; i++)
4969 rocker->msix_entries[i].entry = i;
4971 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
4973 goto err_enable_msix;
4978 kfree(rocker->msix_entries);
4982 static void rocker_msix_fini(const struct rocker *rocker)
4984 pci_disable_msix(rocker->pdev);
4985 kfree(rocker->msix_entries);
4988 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4990 struct rocker *rocker;
4993 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
4997 err = pci_enable_device(pdev);
4999 dev_err(&pdev->dev, "pci_enable_device failed\n");
5000 goto err_pci_enable_device;
5003 err = pci_request_regions(pdev, rocker_driver_name);
5005 dev_err(&pdev->dev, "pci_request_regions failed\n");
5006 goto err_pci_request_regions;
5009 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5011 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5013 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5014 goto err_pci_set_dma_mask;
5017 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5019 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5020 goto err_pci_set_dma_mask;
5024 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5025 dev_err(&pdev->dev, "invalid PCI region size\n");
5027 goto err_pci_resource_len_check;
5030 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5031 pci_resource_len(pdev, 0));
5032 if (!rocker->hw_addr) {
5033 dev_err(&pdev->dev, "ioremap failed\n");
5037 pci_set_master(pdev);
5039 rocker->pdev = pdev;
5040 pci_set_drvdata(pdev, rocker);
5042 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5044 err = rocker_msix_init(rocker);
5046 dev_err(&pdev->dev, "MSI-X init failed\n");
5050 err = rocker_basic_hw_test(rocker);
5052 dev_err(&pdev->dev, "basic hw test failed\n");
5053 goto err_basic_hw_test;
5056 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5058 err = rocker_dma_rings_init(rocker);
5060 goto err_dma_rings_init;
5062 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5063 rocker_cmd_irq_handler, 0,
5064 rocker_driver_name, rocker);
5066 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5067 goto err_request_cmd_irq;
5070 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5071 rocker_event_irq_handler, 0,
5072 rocker_driver_name, rocker);
5074 dev_err(&pdev->dev, "cannot assign event irq\n");
5075 goto err_request_event_irq;
5078 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5080 err = rocker_init_tbls(rocker);
5082 dev_err(&pdev->dev, "cannot init rocker tables\n");
5086 err = rocker_probe_ports(rocker);
5088 dev_err(&pdev->dev, "failed to probe ports\n");
5089 goto err_probe_ports;
5092 dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
5097 rocker_free_tbls(rocker);
5099 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5100 err_request_event_irq:
5101 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5102 err_request_cmd_irq:
5103 rocker_dma_rings_fini(rocker);
5106 rocker_msix_fini(rocker);
5108 iounmap(rocker->hw_addr);
5110 err_pci_resource_len_check:
5111 err_pci_set_dma_mask:
5112 pci_release_regions(pdev);
5113 err_pci_request_regions:
5114 pci_disable_device(pdev);
5115 err_pci_enable_device:
5120 static void rocker_remove(struct pci_dev *pdev)
5122 struct rocker *rocker = pci_get_drvdata(pdev);
5124 rocker_free_tbls(rocker);
5125 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5126 rocker_remove_ports(rocker);
5127 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5128 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5129 rocker_dma_rings_fini(rocker);
5130 rocker_msix_fini(rocker);
5131 iounmap(rocker->hw_addr);
5132 pci_release_regions(rocker->pdev);
5133 pci_disable_device(rocker->pdev);
5137 static struct pci_driver rocker_pci_driver = {
5138 .name = rocker_driver_name,
5139 .id_table = rocker_pci_id_table,
5140 .probe = rocker_probe,
5141 .remove = rocker_remove,
5144 /************************************
5145 * Net device notifier event handler
5146 ************************************/
5148 static bool rocker_port_dev_check(const struct net_device *dev)
5150 return dev->netdev_ops == &rocker_port_netdev_ops;
5153 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5154 struct net_device *bridge)
5156 u16 untagged_vid = 0;
5159 /* Port is joining bridge, so the internal VLAN for the
5160 * port is going to change to the bridge internal VLAN.
5161 * Let's remove untagged VLAN (vid=0) from port and
5162 * re-add once internal VLAN has changed.
5165 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5169 rocker_port_internal_vlan_id_put(rocker_port,
5170 rocker_port->dev->ifindex);
5171 rocker_port->internal_vlan_id =
5172 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5174 rocker_port->bridge_dev = bridge;
5176 return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5180 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5182 u16 untagged_vid = 0;
5185 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5189 rocker_port_internal_vlan_id_put(rocker_port,
5190 rocker_port->bridge_dev->ifindex);
5191 rocker_port->internal_vlan_id =
5192 rocker_port_internal_vlan_id_get(rocker_port,
5193 rocker_port->dev->ifindex);
5195 rocker_port->bridge_dev = NULL;
5197 err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5202 if (rocker_port->dev->flags & IFF_UP)
5203 err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE);
5208 static int rocker_port_master_changed(struct net_device *dev)
5210 struct rocker_port *rocker_port = netdev_priv(dev);
5211 struct net_device *master = netdev_master_upper_dev_get(dev);
5214 /* There are currently three cases handled here:
5215 * 1. Joining a bridge
5216 * 2. Leaving a previously joined bridge
5217 * 3. Other, e.g. being added to or removed from a bond or openvswitch,
5218 * in which case nothing is done
5220 if (master && master->rtnl_link_ops &&
5221 !strcmp(master->rtnl_link_ops->kind, "bridge"))
5222 err = rocker_port_bridge_join(rocker_port, master);
5223 else if (rocker_port_is_bridged(rocker_port))
5224 err = rocker_port_bridge_leave(rocker_port);
5229 static int rocker_netdevice_event(struct notifier_block *unused,
5230 unsigned long event, void *ptr)
5232 struct net_device *dev;
5236 case NETDEV_CHANGEUPPER:
5237 dev = netdev_notifier_info_to_dev(ptr);
5238 if (!rocker_port_dev_check(dev))
5240 err = rocker_port_master_changed(dev);
5243 "failed to reflect master change (err %d)\n",
5251 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5252 .notifier_call = rocker_netdevice_event,
5255 /************************************
5256 * Net event notifier event handler
5257 ************************************/
5259 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5261 struct rocker_port *rocker_port = netdev_priv(dev);
5262 int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
5263 __be32 ip_addr = *(__be32 *)n->primary_key;
5265 return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
5266 flags, ip_addr, n->ha);
5269 static int rocker_netevent_event(struct notifier_block *unused,
5270 unsigned long event, void *ptr)
5272 struct net_device *dev;
5273 struct neighbour *n = ptr;
5277 case NETEVENT_NEIGH_UPDATE:
5278 if (n->tbl != &arp_tbl)
5281 if (!rocker_port_dev_check(dev))
5283 err = rocker_neigh_update(dev, n);
5286 "failed to handle neigh update (err %d)\n",
5294 static struct notifier_block rocker_netevent_nb __read_mostly = {
5295 .notifier_call = rocker_netevent_event,
5298 /***********************
5299 * Module init and exit
5300 ***********************/
5302 static int __init rocker_module_init(void)
5306 register_netdevice_notifier(&rocker_netdevice_nb);
5307 register_netevent_notifier(&rocker_netevent_nb);
5308 err = pci_register_driver(&rocker_pci_driver);
5310 goto err_pci_register_driver;
5313 err_pci_register_driver:
5314 unregister_netdevice_notifier(&rocker_netevent_nb);
5315 unregister_netdevice_notifier(&rocker_netdevice_nb);
5319 static void __exit rocker_module_exit(void)
5321 unregister_netevent_notifier(&rocker_netevent_nb);
5322 unregister_netdevice_notifier(&rocker_netdevice_nb);
5323 pci_unregister_driver(&rocker_pci_driver);
5326 module_init(rocker_module_init);
5327 module_exit(rocker_module_exit);
5329 MODULE_LICENSE("GPL v2");
5330 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5331 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5332 MODULE_DESCRIPTION("Rocker switch device driver");
5333 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);