]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/rocker/rocker.c
rocker: push struct switchdev_trans down through rocker code
[karo-tx-linux.git] / drivers / net / ethernet / rocker / rocker.c
1 /*
2  * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3  * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
38 #include <net/arp.h>
39 #include <asm-generic/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
41
42 #include "rocker.h"
43
44 static const char rocker_driver_name[] = "rocker";
45
46 static const struct pci_device_id rocker_pci_id_table[] = {
47         {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
48         {0, }
49 };
50
51 struct rocker_flow_tbl_key {
52         u32 priority;
53         enum rocker_of_dpa_table_id tbl_id;
54         union {
55                 struct {
56                         u32 in_pport;
57                         u32 in_pport_mask;
58                         enum rocker_of_dpa_table_id goto_tbl;
59                 } ig_port;
60                 struct {
61                         u32 in_pport;
62                         __be16 vlan_id;
63                         __be16 vlan_id_mask;
64                         enum rocker_of_dpa_table_id goto_tbl;
65                         bool untagged;
66                         __be16 new_vlan_id;
67                 } vlan;
68                 struct {
69                         u32 in_pport;
70                         u32 in_pport_mask;
71                         __be16 eth_type;
72                         u8 eth_dst[ETH_ALEN];
73                         u8 eth_dst_mask[ETH_ALEN];
74                         __be16 vlan_id;
75                         __be16 vlan_id_mask;
76                         enum rocker_of_dpa_table_id goto_tbl;
77                         bool copy_to_cpu;
78                 } term_mac;
79                 struct {
80                         __be16 eth_type;
81                         __be32 dst4;
82                         __be32 dst4_mask;
83                         enum rocker_of_dpa_table_id goto_tbl;
84                         u32 group_id;
85                 } ucast_routing;
86                 struct {
87                         u8 eth_dst[ETH_ALEN];
88                         u8 eth_dst_mask[ETH_ALEN];
89                         int has_eth_dst;
90                         int has_eth_dst_mask;
91                         __be16 vlan_id;
92                         u32 tunnel_id;
93                         enum rocker_of_dpa_table_id goto_tbl;
94                         u32 group_id;
95                         bool copy_to_cpu;
96                 } bridge;
97                 struct {
98                         u32 in_pport;
99                         u32 in_pport_mask;
100                         u8 eth_src[ETH_ALEN];
101                         u8 eth_src_mask[ETH_ALEN];
102                         u8 eth_dst[ETH_ALEN];
103                         u8 eth_dst_mask[ETH_ALEN];
104                         __be16 eth_type;
105                         __be16 vlan_id;
106                         __be16 vlan_id_mask;
107                         u8 ip_proto;
108                         u8 ip_proto_mask;
109                         u8 ip_tos;
110                         u8 ip_tos_mask;
111                         u32 group_id;
112                 } acl;
113         };
114 };
115
116 struct rocker_flow_tbl_entry {
117         struct hlist_node entry;
118         u32 cmd;
119         u64 cookie;
120         struct rocker_flow_tbl_key key;
121         size_t key_len;
122         u32 key_crc32; /* key */
123 };
124
125 struct rocker_group_tbl_entry {
126         struct hlist_node entry;
127         u32 cmd;
128         u32 group_id; /* key */
129         u16 group_count;
130         u32 *group_ids;
131         union {
132                 struct {
133                         u8 pop_vlan;
134                 } l2_interface;
135                 struct {
136                         u8 eth_src[ETH_ALEN];
137                         u8 eth_dst[ETH_ALEN];
138                         __be16 vlan_id;
139                         u32 group_id;
140                 } l2_rewrite;
141                 struct {
142                         u8 eth_src[ETH_ALEN];
143                         u8 eth_dst[ETH_ALEN];
144                         __be16 vlan_id;
145                         bool ttl_check;
146                         u32 group_id;
147                 } l3_unicast;
148         };
149 };
150
151 struct rocker_fdb_tbl_entry {
152         struct hlist_node entry;
153         u32 key_crc32; /* key */
154         bool learned;
155         unsigned long touched;
156         struct rocker_fdb_tbl_key {
157                 struct rocker_port *rocker_port;
158                 u8 addr[ETH_ALEN];
159                 __be16 vlan_id;
160         } key;
161 };
162
163 struct rocker_internal_vlan_tbl_entry {
164         struct hlist_node entry;
165         int ifindex; /* key */
166         u32 ref_count;
167         __be16 vlan_id;
168 };
169
170 struct rocker_neigh_tbl_entry {
171         struct hlist_node entry;
172         __be32 ip_addr; /* key */
173         struct net_device *dev;
174         u32 ref_count;
175         u32 index;
176         u8 eth_dst[ETH_ALEN];
177         bool ttl_check;
178 };
179
180 struct rocker_desc_info {
181         char *data; /* mapped */
182         size_t data_size;
183         size_t tlv_size;
184         struct rocker_desc *desc;
185         dma_addr_t mapaddr;
186 };
187
188 struct rocker_dma_ring_info {
189         size_t size;
190         u32 head;
191         u32 tail;
192         struct rocker_desc *desc; /* mapped */
193         dma_addr_t mapaddr;
194         struct rocker_desc_info *desc_info;
195         unsigned int type;
196 };
197
198 struct rocker;
199
200 enum {
201         ROCKER_CTRL_LINK_LOCAL_MCAST,
202         ROCKER_CTRL_LOCAL_ARP,
203         ROCKER_CTRL_IPV4_MCAST,
204         ROCKER_CTRL_IPV6_MCAST,
205         ROCKER_CTRL_DFLT_BRIDGING,
206         ROCKER_CTRL_DFLT_OVS,
207         ROCKER_CTRL_MAX,
208 };
209
210 #define ROCKER_INTERNAL_VLAN_ID_BASE    0x0f00
211 #define ROCKER_N_INTERNAL_VLANS         255
212 #define ROCKER_VLAN_BITMAP_LEN          BITS_TO_LONGS(VLAN_N_VID)
213 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
214
215 struct rocker_port {
216         struct net_device *dev;
217         struct net_device *bridge_dev;
218         struct rocker *rocker;
219         unsigned int port_number;
220         u32 pport;
221         __be16 internal_vlan_id;
222         int stp_state;
223         u32 brport_flags;
224         unsigned long ageing_time;
225         bool ctrls[ROCKER_CTRL_MAX];
226         unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
227         struct napi_struct napi_tx;
228         struct napi_struct napi_rx;
229         struct rocker_dma_ring_info tx_ring;
230         struct rocker_dma_ring_info rx_ring;
231         struct list_head trans_mem;
232 };
233
234 struct rocker {
235         struct pci_dev *pdev;
236         u8 __iomem *hw_addr;
237         struct msix_entry *msix_entries;
238         unsigned int port_count;
239         struct rocker_port **ports;
240         struct {
241                 u64 id;
242         } hw;
243         spinlock_t cmd_ring_lock;               /* for cmd ring accesses */
244         struct rocker_dma_ring_info cmd_ring;
245         struct rocker_dma_ring_info event_ring;
246         DECLARE_HASHTABLE(flow_tbl, 16);
247         spinlock_t flow_tbl_lock;               /* for flow tbl accesses */
248         u64 flow_tbl_next_cookie;
249         DECLARE_HASHTABLE(group_tbl, 16);
250         spinlock_t group_tbl_lock;              /* for group tbl accesses */
251         struct timer_list fdb_cleanup_timer;
252         DECLARE_HASHTABLE(fdb_tbl, 16);
253         spinlock_t fdb_tbl_lock;                /* for fdb tbl accesses */
254         unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
255         DECLARE_HASHTABLE(internal_vlan_tbl, 8);
256         spinlock_t internal_vlan_tbl_lock;      /* for vlan tbl accesses */
257         DECLARE_HASHTABLE(neigh_tbl, 16);
258         spinlock_t neigh_tbl_lock;              /* for neigh tbl accesses */
259         u32 neigh_tbl_next_index;
260 };
261
262 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
263 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
264 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
265 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
266 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
267 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
268 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
269 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
270 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
271
272 /* Rocker priority levels for flow table entries.  Higher
273  * priority match takes precedence over lower priority match.
274  */
275
276 enum {
277         ROCKER_PRIORITY_UNKNOWN = 0,
278         ROCKER_PRIORITY_IG_PORT = 1,
279         ROCKER_PRIORITY_VLAN = 1,
280         ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
281         ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
282         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
283         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
284         ROCKER_PRIORITY_BRIDGING_VLAN = 3,
285         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
286         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
287         ROCKER_PRIORITY_BRIDGING_TENANT = 3,
288         ROCKER_PRIORITY_ACL_CTRL = 3,
289         ROCKER_PRIORITY_ACL_NORMAL = 2,
290         ROCKER_PRIORITY_ACL_DFLT = 1,
291 };
292
293 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
294 {
295         u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
296         u16 end = 0xffe;
297         u16 _vlan_id = ntohs(vlan_id);
298
299         return (_vlan_id >= start && _vlan_id <= end);
300 }
301
302 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
303                                       u16 vid, bool *pop_vlan)
304 {
305         __be16 vlan_id;
306
307         if (pop_vlan)
308                 *pop_vlan = false;
309         vlan_id = htons(vid);
310         if (!vlan_id) {
311                 vlan_id = rocker_port->internal_vlan_id;
312                 if (pop_vlan)
313                         *pop_vlan = true;
314         }
315
316         return vlan_id;
317 }
318
319 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
320                                    __be16 vlan_id)
321 {
322         if (rocker_vlan_id_is_internal(vlan_id))
323                 return 0;
324
325         return ntohs(vlan_id);
326 }
327
328 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
329 {
330         return rocker_port->bridge_dev &&
331                netif_is_bridge_master(rocker_port->bridge_dev);
332 }
333
334 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
335 {
336         return rocker_port->bridge_dev &&
337                netif_is_ovs_master(rocker_port->bridge_dev);
338 }
339
340 #define ROCKER_OP_FLAG_REMOVE           BIT(0)
341 #define ROCKER_OP_FLAG_NOWAIT           BIT(1)
342 #define ROCKER_OP_FLAG_LEARNED          BIT(2)
343 #define ROCKER_OP_FLAG_REFRESH          BIT(3)
344
345 static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
346                                      struct switchdev_trans *trans, int flags,
347                                      size_t size)
348 {
349         struct list_head *elem = NULL;
350         gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
351                           GFP_ATOMIC : GFP_KERNEL;
352
353         /* If in transaction prepare phase, allocate the memory
354          * and enqueue it on a per-port list.  If in transaction
355          * commit phase, dequeue the memory from the per-port list
356          * rather than re-allocating the memory.  The idea is the
357          * driver code paths for prepare and commit are identical
358          * so the memory allocated in the prepare phase is the
359          * memory used in the commit phase.
360          */
361
362         if (!trans) {
363                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
364                 if (elem)
365                         INIT_LIST_HEAD(elem);
366         } else if (switchdev_trans_ph_prepare(trans)) {
367                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
368                 if (!elem)
369                         return NULL;
370                 list_add_tail(elem, &rocker_port->trans_mem);
371         } else {
372                 BUG_ON(list_empty(&rocker_port->trans_mem));
373                 elem = rocker_port->trans_mem.next;
374                 list_del_init(elem);
375         }
376
377         return elem ? elem + 1 : NULL;
378 }
379
380 static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
381                                  struct switchdev_trans *trans, int flags,
382                                  size_t size)
383 {
384         return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
385 }
386
387 static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
388                                  struct switchdev_trans *trans, int flags,
389                                  size_t n, size_t size)
390 {
391         return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
392 }
393
394 static void rocker_port_kfree(struct switchdev_trans *trans, const void *mem)
395 {
396         struct list_head *elem;
397
398         /* Frees are ignored if in transaction prepare phase.  The
399          * memory remains on the per-port list until freed in the
400          * commit phase.
401          */
402
403         if (switchdev_trans_ph_prepare(trans))
404                 return;
405
406         elem = (struct list_head *)mem - 1;
407         BUG_ON(!list_empty(elem));
408         kfree(elem);
409 }
410
411 struct rocker_wait {
412         wait_queue_head_t wait;
413         bool done;
414         bool nowait;
415 };
416
417 static void rocker_wait_reset(struct rocker_wait *wait)
418 {
419         wait->done = false;
420         wait->nowait = false;
421 }
422
423 static void rocker_wait_init(struct rocker_wait *wait)
424 {
425         init_waitqueue_head(&wait->wait);
426         rocker_wait_reset(wait);
427 }
428
429 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
430                                               struct switchdev_trans *trans,
431                                               int flags)
432 {
433         struct rocker_wait *wait;
434
435         wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
436         if (!wait)
437                 return NULL;
438         rocker_wait_init(wait);
439         return wait;
440 }
441
442 static void rocker_wait_destroy(struct switchdev_trans *trans,
443                                 struct rocker_wait *wait)
444 {
445         rocker_port_kfree(trans, wait);
446 }
447
448 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
449                                       unsigned long timeout)
450 {
451         wait_event_timeout(wait->wait, wait->done, HZ / 10);
452         if (!wait->done)
453                 return false;
454         return true;
455 }
456
457 static void rocker_wait_wake_up(struct rocker_wait *wait)
458 {
459         wait->done = true;
460         wake_up(&wait->wait);
461 }
462
463 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
464 {
465         return rocker->msix_entries[vector].vector;
466 }
467
468 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
469 {
470         return rocker_msix_vector(rocker_port->rocker,
471                                   ROCKER_MSIX_VEC_TX(rocker_port->port_number));
472 }
473
474 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
475 {
476         return rocker_msix_vector(rocker_port->rocker,
477                                   ROCKER_MSIX_VEC_RX(rocker_port->port_number));
478 }
479
480 #define rocker_write32(rocker, reg, val)        \
481         writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
482 #define rocker_read32(rocker, reg)      \
483         readl((rocker)->hw_addr + (ROCKER_ ## reg))
484 #define rocker_write64(rocker, reg, val)        \
485         writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
486 #define rocker_read64(rocker, reg)      \
487         readq((rocker)->hw_addr + (ROCKER_ ## reg))
488
489 /*****************************
490  * HW basic testing functions
491  *****************************/
492
493 static int rocker_reg_test(const struct rocker *rocker)
494 {
495         const struct pci_dev *pdev = rocker->pdev;
496         u64 test_reg;
497         u64 rnd;
498
499         rnd = prandom_u32();
500         rnd >>= 1;
501         rocker_write32(rocker, TEST_REG, rnd);
502         test_reg = rocker_read32(rocker, TEST_REG);
503         if (test_reg != rnd * 2) {
504                 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
505                         test_reg, rnd * 2);
506                 return -EIO;
507         }
508
509         rnd = prandom_u32();
510         rnd <<= 31;
511         rnd |= prandom_u32();
512         rocker_write64(rocker, TEST_REG64, rnd);
513         test_reg = rocker_read64(rocker, TEST_REG64);
514         if (test_reg != rnd * 2) {
515                 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
516                         test_reg, rnd * 2);
517                 return -EIO;
518         }
519
520         return 0;
521 }
522
523 static int rocker_dma_test_one(const struct rocker *rocker,
524                                struct rocker_wait *wait, u32 test_type,
525                                dma_addr_t dma_handle, const unsigned char *buf,
526                                const unsigned char *expect, size_t size)
527 {
528         const struct pci_dev *pdev = rocker->pdev;
529         int i;
530
531         rocker_wait_reset(wait);
532         rocker_write32(rocker, TEST_DMA_CTRL, test_type);
533
534         if (!rocker_wait_event_timeout(wait, HZ / 10)) {
535                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
536                 return -EIO;
537         }
538
539         for (i = 0; i < size; i++) {
540                 if (buf[i] != expect[i]) {
541                         dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
542                                 buf[i], i, expect[i]);
543                         return -EIO;
544                 }
545         }
546         return 0;
547 }
548
549 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
550 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
551
552 static int rocker_dma_test_offset(const struct rocker *rocker,
553                                   struct rocker_wait *wait, int offset)
554 {
555         struct pci_dev *pdev = rocker->pdev;
556         unsigned char *alloc;
557         unsigned char *buf;
558         unsigned char *expect;
559         dma_addr_t dma_handle;
560         int i;
561         int err;
562
563         alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
564                         GFP_KERNEL | GFP_DMA);
565         if (!alloc)
566                 return -ENOMEM;
567         buf = alloc + offset;
568         expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
569
570         dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
571                                     PCI_DMA_BIDIRECTIONAL);
572         if (pci_dma_mapping_error(pdev, dma_handle)) {
573                 err = -EIO;
574                 goto free_alloc;
575         }
576
577         rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
578         rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
579
580         memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
581         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
582                                   dma_handle, buf, expect,
583                                   ROCKER_TEST_DMA_BUF_SIZE);
584         if (err)
585                 goto unmap;
586
587         memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
588         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
589                                   dma_handle, buf, expect,
590                                   ROCKER_TEST_DMA_BUF_SIZE);
591         if (err)
592                 goto unmap;
593
594         prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
595         for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
596                 expect[i] = ~buf[i];
597         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
598                                   dma_handle, buf, expect,
599                                   ROCKER_TEST_DMA_BUF_SIZE);
600         if (err)
601                 goto unmap;
602
603 unmap:
604         pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
605                          PCI_DMA_BIDIRECTIONAL);
606 free_alloc:
607         kfree(alloc);
608
609         return err;
610 }
611
612 static int rocker_dma_test(const struct rocker *rocker,
613                            struct rocker_wait *wait)
614 {
615         int i;
616         int err;
617
618         for (i = 0; i < 8; i++) {
619                 err = rocker_dma_test_offset(rocker, wait, i);
620                 if (err)
621                         return err;
622         }
623         return 0;
624 }
625
626 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
627 {
628         struct rocker_wait *wait = dev_id;
629
630         rocker_wait_wake_up(wait);
631
632         return IRQ_HANDLED;
633 }
634
635 static int rocker_basic_hw_test(const struct rocker *rocker)
636 {
637         const struct pci_dev *pdev = rocker->pdev;
638         struct rocker_wait wait;
639         int err;
640
641         err = rocker_reg_test(rocker);
642         if (err) {
643                 dev_err(&pdev->dev, "reg test failed\n");
644                 return err;
645         }
646
647         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
648                           rocker_test_irq_handler, 0,
649                           rocker_driver_name, &wait);
650         if (err) {
651                 dev_err(&pdev->dev, "cannot assign test irq\n");
652                 return err;
653         }
654
655         rocker_wait_init(&wait);
656         rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
657
658         if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
659                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
660                 err = -EIO;
661                 goto free_irq;
662         }
663
664         err = rocker_dma_test(rocker, &wait);
665         if (err)
666                 dev_err(&pdev->dev, "dma test failed\n");
667
668 free_irq:
669         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
670         return err;
671 }
672
673 /******
674  * TLV
675  ******/
676
677 #define ROCKER_TLV_ALIGNTO 8U
678 #define ROCKER_TLV_ALIGN(len) \
679         (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
680 #define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
681
682 /*  <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
683  * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
684  * |             Header          | Pad |           Payload           | Pad |
685  * |      (struct rocker_tlv)    | ing |                             | ing |
686  * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
687  *  <--------------------------- tlv->len -------------------------->
688  */
689
690 static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
691                                           int *remaining)
692 {
693         int totlen = ROCKER_TLV_ALIGN(tlv->len);
694
695         *remaining -= totlen;
696         return (struct rocker_tlv *) ((char *) tlv + totlen);
697 }
698
699 static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
700 {
701         return remaining >= (int) ROCKER_TLV_HDRLEN &&
702                tlv->len >= ROCKER_TLV_HDRLEN &&
703                tlv->len <= remaining;
704 }
705
706 #define rocker_tlv_for_each(pos, head, len, rem)        \
707         for (pos = head, rem = len;                     \
708              rocker_tlv_ok(pos, rem);                   \
709              pos = rocker_tlv_next(pos, &(rem)))
710
711 #define rocker_tlv_for_each_nested(pos, tlv, rem)       \
712         rocker_tlv_for_each(pos, rocker_tlv_data(tlv),  \
713                             rocker_tlv_len(tlv), rem)
714
715 static int rocker_tlv_attr_size(int payload)
716 {
717         return ROCKER_TLV_HDRLEN + payload;
718 }
719
720 static int rocker_tlv_total_size(int payload)
721 {
722         return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
723 }
724
725 static int rocker_tlv_padlen(int payload)
726 {
727         return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
728 }
729
730 static int rocker_tlv_type(const struct rocker_tlv *tlv)
731 {
732         return tlv->type;
733 }
734
735 static void *rocker_tlv_data(const struct rocker_tlv *tlv)
736 {
737         return (char *) tlv + ROCKER_TLV_HDRLEN;
738 }
739
740 static int rocker_tlv_len(const struct rocker_tlv *tlv)
741 {
742         return tlv->len - ROCKER_TLV_HDRLEN;
743 }
744
745 static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
746 {
747         return *(u8 *) rocker_tlv_data(tlv);
748 }
749
750 static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
751 {
752         return *(u16 *) rocker_tlv_data(tlv);
753 }
754
755 static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
756 {
757         return *(__be16 *) rocker_tlv_data(tlv);
758 }
759
760 static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
761 {
762         return *(u32 *) rocker_tlv_data(tlv);
763 }
764
765 static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
766 {
767         return *(u64 *) rocker_tlv_data(tlv);
768 }
769
770 static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
771                              const char *buf, int buf_len)
772 {
773         const struct rocker_tlv *tlv;
774         const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
775         int rem;
776
777         memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
778
779         rocker_tlv_for_each(tlv, head, buf_len, rem) {
780                 u32 type = rocker_tlv_type(tlv);
781
782                 if (type > 0 && type <= maxtype)
783                         tb[type] = tlv;
784         }
785 }
786
787 static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
788                                     const struct rocker_tlv *tlv)
789 {
790         rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
791                          rocker_tlv_len(tlv));
792 }
793
794 static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
795                                   const struct rocker_desc_info *desc_info)
796 {
797         rocker_tlv_parse(tb, maxtype, desc_info->data,
798                          desc_info->desc->tlv_size);
799 }
800
801 static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
802 {
803         return (struct rocker_tlv *) ((char *) desc_info->data +
804                                                desc_info->tlv_size);
805 }
806
807 static int rocker_tlv_put(struct rocker_desc_info *desc_info,
808                           int attrtype, int attrlen, const void *data)
809 {
810         int tail_room = desc_info->data_size - desc_info->tlv_size;
811         int total_size = rocker_tlv_total_size(attrlen);
812         struct rocker_tlv *tlv;
813
814         if (unlikely(tail_room < total_size))
815                 return -EMSGSIZE;
816
817         tlv = rocker_tlv_start(desc_info);
818         desc_info->tlv_size += total_size;
819         tlv->type = attrtype;
820         tlv->len = rocker_tlv_attr_size(attrlen);
821         memcpy(rocker_tlv_data(tlv), data, attrlen);
822         memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
823         return 0;
824 }
825
826 static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
827                              int attrtype, u8 value)
828 {
829         return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
830 }
831
832 static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
833                               int attrtype, u16 value)
834 {
835         return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
836 }
837
838 static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
839                                int attrtype, __be16 value)
840 {
841         return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
842 }
843
844 static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
845                               int attrtype, u32 value)
846 {
847         return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
848 }
849
850 static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
851                                int attrtype, __be32 value)
852 {
853         return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
854 }
855
856 static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
857                               int attrtype, u64 value)
858 {
859         return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
860 }
861
862 static struct rocker_tlv *
863 rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
864 {
865         struct rocker_tlv *start = rocker_tlv_start(desc_info);
866
867         if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
868                 return NULL;
869
870         return start;
871 }
872
873 static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
874                                 struct rocker_tlv *start)
875 {
876         start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
877 }
878
879 static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
880                                    const struct rocker_tlv *start)
881 {
882         desc_info->tlv_size = (const char *) start - desc_info->data;
883 }
884
885 /******************************************
886  * DMA rings and descriptors manipulations
887  ******************************************/
888
889 static u32 __pos_inc(u32 pos, size_t limit)
890 {
891         return ++pos == limit ? 0 : pos;
892 }
893
894 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
895 {
896         int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
897
898         switch (err) {
899         case ROCKER_OK:
900                 return 0;
901         case -ROCKER_ENOENT:
902                 return -ENOENT;
903         case -ROCKER_ENXIO:
904                 return -ENXIO;
905         case -ROCKER_ENOMEM:
906                 return -ENOMEM;
907         case -ROCKER_EEXIST:
908                 return -EEXIST;
909         case -ROCKER_EINVAL:
910                 return -EINVAL;
911         case -ROCKER_EMSGSIZE:
912                 return -EMSGSIZE;
913         case -ROCKER_ENOTSUP:
914                 return -EOPNOTSUPP;
915         case -ROCKER_ENOBUFS:
916                 return -ENOBUFS;
917         }
918
919         return -EINVAL;
920 }
921
922 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
923 {
924         desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
925 }
926
927 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
928 {
929         u32 comp_err = desc_info->desc->comp_err;
930
931         return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
932 }
933
934 static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
935 {
936         return (void *)(uintptr_t)desc_info->desc->cookie;
937 }
938
939 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
940                                        void *ptr)
941 {
942         desc_info->desc->cookie = (uintptr_t) ptr;
943 }
944
945 static struct rocker_desc_info *
946 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
947 {
948         static struct rocker_desc_info *desc_info;
949         u32 head = __pos_inc(info->head, info->size);
950
951         desc_info = &info->desc_info[info->head];
952         if (head == info->tail)
953                 return NULL; /* ring full */
954         desc_info->tlv_size = 0;
955         return desc_info;
956 }
957
958 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
959 {
960         desc_info->desc->buf_size = desc_info->data_size;
961         desc_info->desc->tlv_size = desc_info->tlv_size;
962 }
963
964 static void rocker_desc_head_set(const struct rocker *rocker,
965                                  struct rocker_dma_ring_info *info,
966                                  const struct rocker_desc_info *desc_info)
967 {
968         u32 head = __pos_inc(info->head, info->size);
969
970         BUG_ON(head == info->tail);
971         rocker_desc_commit(desc_info);
972         info->head = head;
973         rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
974 }
975
976 static struct rocker_desc_info *
977 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
978 {
979         static struct rocker_desc_info *desc_info;
980
981         if (info->tail == info->head)
982                 return NULL; /* nothing to be done between head and tail */
983         desc_info = &info->desc_info[info->tail];
984         if (!rocker_desc_gen(desc_info))
985                 return NULL; /* gen bit not set, desc is not ready yet */
986         info->tail = __pos_inc(info->tail, info->size);
987         desc_info->tlv_size = desc_info->desc->tlv_size;
988         return desc_info;
989 }
990
991 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
992                                         const struct rocker_dma_ring_info *info,
993                                         u32 credits)
994 {
995         if (credits)
996                 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
997 }
998
999 static unsigned long rocker_dma_ring_size_fix(size_t size)
1000 {
1001         return max(ROCKER_DMA_SIZE_MIN,
1002                    min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
1003 }
1004
1005 static int rocker_dma_ring_create(const struct rocker *rocker,
1006                                   unsigned int type,
1007                                   size_t size,
1008                                   struct rocker_dma_ring_info *info)
1009 {
1010         int i;
1011
1012         BUG_ON(size != rocker_dma_ring_size_fix(size));
1013         info->size = size;
1014         info->type = type;
1015         info->head = 0;
1016         info->tail = 0;
1017         info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
1018                                   GFP_KERNEL);
1019         if (!info->desc_info)
1020                 return -ENOMEM;
1021
1022         info->desc = pci_alloc_consistent(rocker->pdev,
1023                                           info->size * sizeof(*info->desc),
1024                                           &info->mapaddr);
1025         if (!info->desc) {
1026                 kfree(info->desc_info);
1027                 return -ENOMEM;
1028         }
1029
1030         for (i = 0; i < info->size; i++)
1031                 info->desc_info[i].desc = &info->desc[i];
1032
1033         rocker_write32(rocker, DMA_DESC_CTRL(info->type),
1034                        ROCKER_DMA_DESC_CTRL_RESET);
1035         rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
1036         rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
1037
1038         return 0;
1039 }
1040
1041 static void rocker_dma_ring_destroy(const struct rocker *rocker,
1042                                     const struct rocker_dma_ring_info *info)
1043 {
1044         rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
1045
1046         pci_free_consistent(rocker->pdev,
1047                             info->size * sizeof(struct rocker_desc),
1048                             info->desc, info->mapaddr);
1049         kfree(info->desc_info);
1050 }
1051
1052 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
1053                                              struct rocker_dma_ring_info *info)
1054 {
1055         int i;
1056
1057         BUG_ON(info->head || info->tail);
1058
1059         /* When ring is consumer, we need to advance head for each desc.
1060          * That tells hw that the desc is ready to be used by it.
1061          */
1062         for (i = 0; i < info->size - 1; i++)
1063                 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
1064         rocker_desc_commit(&info->desc_info[i]);
1065 }
1066
1067 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
1068                                       const struct rocker_dma_ring_info *info,
1069                                       int direction, size_t buf_size)
1070 {
1071         struct pci_dev *pdev = rocker->pdev;
1072         int i;
1073         int err;
1074
1075         for (i = 0; i < info->size; i++) {
1076                 struct rocker_desc_info *desc_info = &info->desc_info[i];
1077                 struct rocker_desc *desc = &info->desc[i];
1078                 dma_addr_t dma_handle;
1079                 char *buf;
1080
1081                 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
1082                 if (!buf) {
1083                         err = -ENOMEM;
1084                         goto rollback;
1085                 }
1086
1087                 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1088                 if (pci_dma_mapping_error(pdev, dma_handle)) {
1089                         kfree(buf);
1090                         err = -EIO;
1091                         goto rollback;
1092                 }
1093
1094                 desc_info->data = buf;
1095                 desc_info->data_size = buf_size;
1096                 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1097
1098                 desc->buf_addr = dma_handle;
1099                 desc->buf_size = buf_size;
1100         }
1101         return 0;
1102
1103 rollback:
1104         for (i--; i >= 0; i--) {
1105                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
1106
1107                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1108                                  desc_info->data_size, direction);
1109                 kfree(desc_info->data);
1110         }
1111         return err;
1112 }
1113
1114 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
1115                                       const struct rocker_dma_ring_info *info,
1116                                       int direction)
1117 {
1118         struct pci_dev *pdev = rocker->pdev;
1119         int i;
1120
1121         for (i = 0; i < info->size; i++) {
1122                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
1123                 struct rocker_desc *desc = &info->desc[i];
1124
1125                 desc->buf_addr = 0;
1126                 desc->buf_size = 0;
1127                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1128                                  desc_info->data_size, direction);
1129                 kfree(desc_info->data);
1130         }
1131 }
1132
1133 static int rocker_dma_rings_init(struct rocker *rocker)
1134 {
1135         const struct pci_dev *pdev = rocker->pdev;
1136         int err;
1137
1138         err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1139                                      ROCKER_DMA_CMD_DEFAULT_SIZE,
1140                                      &rocker->cmd_ring);
1141         if (err) {
1142                 dev_err(&pdev->dev, "failed to create command dma ring\n");
1143                 return err;
1144         }
1145
1146         spin_lock_init(&rocker->cmd_ring_lock);
1147
1148         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1149                                          PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1150         if (err) {
1151                 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1152                 goto err_dma_cmd_ring_bufs_alloc;
1153         }
1154
1155         err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1156                                      ROCKER_DMA_EVENT_DEFAULT_SIZE,
1157                                      &rocker->event_ring);
1158         if (err) {
1159                 dev_err(&pdev->dev, "failed to create event dma ring\n");
1160                 goto err_dma_event_ring_create;
1161         }
1162
1163         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1164                                          PCI_DMA_FROMDEVICE, PAGE_SIZE);
1165         if (err) {
1166                 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1167                 goto err_dma_event_ring_bufs_alloc;
1168         }
1169         rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1170         return 0;
1171
1172 err_dma_event_ring_bufs_alloc:
1173         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1174 err_dma_event_ring_create:
1175         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1176                                   PCI_DMA_BIDIRECTIONAL);
1177 err_dma_cmd_ring_bufs_alloc:
1178         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1179         return err;
1180 }
1181
1182 static void rocker_dma_rings_fini(struct rocker *rocker)
1183 {
1184         rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1185                                   PCI_DMA_BIDIRECTIONAL);
1186         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1187         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1188                                   PCI_DMA_BIDIRECTIONAL);
1189         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1190 }
1191
1192 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
1193                                       struct rocker_desc_info *desc_info,
1194                                       struct sk_buff *skb, size_t buf_len)
1195 {
1196         const struct rocker *rocker = rocker_port->rocker;
1197         struct pci_dev *pdev = rocker->pdev;
1198         dma_addr_t dma_handle;
1199
1200         dma_handle = pci_map_single(pdev, skb->data, buf_len,
1201                                     PCI_DMA_FROMDEVICE);
1202         if (pci_dma_mapping_error(pdev, dma_handle))
1203                 return -EIO;
1204         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1205                 goto tlv_put_failure;
1206         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1207                 goto tlv_put_failure;
1208         return 0;
1209
1210 tlv_put_failure:
1211         pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1212         desc_info->tlv_size = 0;
1213         return -EMSGSIZE;
1214 }
1215
1216 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
1217 {
1218         return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1219 }
1220
1221 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
1222                                         struct rocker_desc_info *desc_info)
1223 {
1224         struct net_device *dev = rocker_port->dev;
1225         struct sk_buff *skb;
1226         size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1227         int err;
1228
1229         /* Ensure that hw will see tlv_size zero in case of an error.
1230          * That tells hw to use another descriptor.
1231          */
1232         rocker_desc_cookie_ptr_set(desc_info, NULL);
1233         desc_info->tlv_size = 0;
1234
1235         skb = netdev_alloc_skb_ip_align(dev, buf_len);
1236         if (!skb)
1237                 return -ENOMEM;
1238         err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
1239         if (err) {
1240                 dev_kfree_skb_any(skb);
1241                 return err;
1242         }
1243         rocker_desc_cookie_ptr_set(desc_info, skb);
1244         return 0;
1245 }
1246
1247 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1248                                          const struct rocker_tlv **attrs)
1249 {
1250         struct pci_dev *pdev = rocker->pdev;
1251         dma_addr_t dma_handle;
1252         size_t len;
1253
1254         if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1255             !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1256                 return;
1257         dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1258         len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1259         pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1260 }
1261
1262 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1263                                         const struct rocker_desc_info *desc_info)
1264 {
1265         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1266         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1267
1268         if (!skb)
1269                 return;
1270         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1271         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1272         dev_kfree_skb_any(skb);
1273 }
1274
1275 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
1276 {
1277         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1278         const struct rocker *rocker = rocker_port->rocker;
1279         int i;
1280         int err;
1281
1282         for (i = 0; i < rx_ring->size; i++) {
1283                 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
1284                                                    &rx_ring->desc_info[i]);
1285                 if (err)
1286                         goto rollback;
1287         }
1288         return 0;
1289
1290 rollback:
1291         for (i--; i >= 0; i--)
1292                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1293         return err;
1294 }
1295
1296 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
1297 {
1298         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1299         const struct rocker *rocker = rocker_port->rocker;
1300         int i;
1301
1302         for (i = 0; i < rx_ring->size; i++)
1303                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1304 }
1305
1306 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1307 {
1308         struct rocker *rocker = rocker_port->rocker;
1309         int err;
1310
1311         err = rocker_dma_ring_create(rocker,
1312                                      ROCKER_DMA_TX(rocker_port->port_number),
1313                                      ROCKER_DMA_TX_DEFAULT_SIZE,
1314                                      &rocker_port->tx_ring);
1315         if (err) {
1316                 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1317                 return err;
1318         }
1319
1320         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1321                                          PCI_DMA_TODEVICE,
1322                                          ROCKER_DMA_TX_DESC_SIZE);
1323         if (err) {
1324                 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1325                 goto err_dma_tx_ring_bufs_alloc;
1326         }
1327
1328         err = rocker_dma_ring_create(rocker,
1329                                      ROCKER_DMA_RX(rocker_port->port_number),
1330                                      ROCKER_DMA_RX_DEFAULT_SIZE,
1331                                      &rocker_port->rx_ring);
1332         if (err) {
1333                 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1334                 goto err_dma_rx_ring_create;
1335         }
1336
1337         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1338                                          PCI_DMA_BIDIRECTIONAL,
1339                                          ROCKER_DMA_RX_DESC_SIZE);
1340         if (err) {
1341                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1342                 goto err_dma_rx_ring_bufs_alloc;
1343         }
1344
1345         err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1346         if (err) {
1347                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1348                 goto err_dma_rx_ring_skbs_alloc;
1349         }
1350         rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1351
1352         return 0;
1353
1354 err_dma_rx_ring_skbs_alloc:
1355         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1356                                   PCI_DMA_BIDIRECTIONAL);
1357 err_dma_rx_ring_bufs_alloc:
1358         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1359 err_dma_rx_ring_create:
1360         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1361                                   PCI_DMA_TODEVICE);
1362 err_dma_tx_ring_bufs_alloc:
1363         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1364         return err;
1365 }
1366
1367 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1368 {
1369         struct rocker *rocker = rocker_port->rocker;
1370
1371         rocker_dma_rx_ring_skbs_free(rocker_port);
1372         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1373                                   PCI_DMA_BIDIRECTIONAL);
1374         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1375         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1376                                   PCI_DMA_TODEVICE);
1377         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1378 }
1379
1380 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1381                                    bool enable)
1382 {
1383         u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1384
1385         if (enable)
1386                 val |= 1ULL << rocker_port->pport;
1387         else
1388                 val &= ~(1ULL << rocker_port->pport);
1389         rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1390 }
1391
1392 /********************************
1393  * Interrupt handler and helpers
1394  ********************************/
1395
1396 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1397 {
1398         struct rocker *rocker = dev_id;
1399         const struct rocker_desc_info *desc_info;
1400         struct rocker_wait *wait;
1401         u32 credits = 0;
1402
1403         spin_lock(&rocker->cmd_ring_lock);
1404         while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1405                 wait = rocker_desc_cookie_ptr_get(desc_info);
1406                 if (wait->nowait) {
1407                         rocker_desc_gen_clear(desc_info);
1408                         rocker_wait_destroy(NULL, wait);
1409                 } else {
1410                         rocker_wait_wake_up(wait);
1411                 }
1412                 credits++;
1413         }
1414         spin_unlock(&rocker->cmd_ring_lock);
1415         rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1416
1417         return IRQ_HANDLED;
1418 }
1419
1420 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1421 {
1422         netif_carrier_on(rocker_port->dev);
1423         netdev_info(rocker_port->dev, "Link is up\n");
1424 }
1425
1426 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1427 {
1428         netif_carrier_off(rocker_port->dev);
1429         netdev_info(rocker_port->dev, "Link is down\n");
1430 }
1431
1432 static int rocker_event_link_change(const struct rocker *rocker,
1433                                     const struct rocker_tlv *info)
1434 {
1435         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1436         unsigned int port_number;
1437         bool link_up;
1438         struct rocker_port *rocker_port;
1439
1440         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1441         if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1442             !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1443                 return -EIO;
1444         port_number =
1445                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1446         link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1447
1448         if (port_number >= rocker->port_count)
1449                 return -EINVAL;
1450
1451         rocker_port = rocker->ports[port_number];
1452         if (netif_carrier_ok(rocker_port->dev) != link_up) {
1453                 if (link_up)
1454                         rocker_port_link_up(rocker_port);
1455                 else
1456                         rocker_port_link_down(rocker_port);
1457         }
1458
1459         return 0;
1460 }
1461
1462 static int rocker_port_fdb(struct rocker_port *rocker_port,
1463                            struct switchdev_trans *trans,
1464                            const unsigned char *addr,
1465                            __be16 vlan_id, int flags);
1466
1467 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1468                                       const struct rocker_tlv *info)
1469 {
1470         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1471         unsigned int port_number;
1472         struct rocker_port *rocker_port;
1473         const unsigned char *addr;
1474         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1475         __be16 vlan_id;
1476
1477         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1478         if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1479             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1480             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1481                 return -EIO;
1482         port_number =
1483                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1484         addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1485         vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1486
1487         if (port_number >= rocker->port_count)
1488                 return -EINVAL;
1489
1490         rocker_port = rocker->ports[port_number];
1491
1492         if (rocker_port->stp_state != BR_STATE_LEARNING &&
1493             rocker_port->stp_state != BR_STATE_FORWARDING)
1494                 return 0;
1495
1496         return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1497 }
1498
1499 static int rocker_event_process(const struct rocker *rocker,
1500                                 const struct rocker_desc_info *desc_info)
1501 {
1502         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1503         const struct rocker_tlv *info;
1504         u16 type;
1505
1506         rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1507         if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1508             !attrs[ROCKER_TLV_EVENT_INFO])
1509                 return -EIO;
1510
1511         type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1512         info = attrs[ROCKER_TLV_EVENT_INFO];
1513
1514         switch (type) {
1515         case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1516                 return rocker_event_link_change(rocker, info);
1517         case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1518                 return rocker_event_mac_vlan_seen(rocker, info);
1519         }
1520
1521         return -EOPNOTSUPP;
1522 }
1523
1524 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1525 {
1526         struct rocker *rocker = dev_id;
1527         const struct pci_dev *pdev = rocker->pdev;
1528         const struct rocker_desc_info *desc_info;
1529         u32 credits = 0;
1530         int err;
1531
1532         while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1533                 err = rocker_desc_err(desc_info);
1534                 if (err) {
1535                         dev_err(&pdev->dev, "event desc received with err %d\n",
1536                                 err);
1537                 } else {
1538                         err = rocker_event_process(rocker, desc_info);
1539                         if (err)
1540                                 dev_err(&pdev->dev, "event processing failed with err %d\n",
1541                                         err);
1542                 }
1543                 rocker_desc_gen_clear(desc_info);
1544                 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1545                 credits++;
1546         }
1547         rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1548
1549         return IRQ_HANDLED;
1550 }
1551
1552 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1553 {
1554         struct rocker_port *rocker_port = dev_id;
1555
1556         napi_schedule(&rocker_port->napi_tx);
1557         return IRQ_HANDLED;
1558 }
1559
1560 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1561 {
1562         struct rocker_port *rocker_port = dev_id;
1563
1564         napi_schedule(&rocker_port->napi_rx);
1565         return IRQ_HANDLED;
1566 }
1567
1568 /********************
1569  * Command interface
1570  ********************/
1571
1572 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1573                                     struct rocker_desc_info *desc_info,
1574                                     void *priv);
1575
1576 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1577                                     const struct rocker_desc_info *desc_info,
1578                                     void *priv);
1579
1580 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1581                            struct switchdev_trans *trans, int flags,
1582                            rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1583                            rocker_cmd_proc_cb_t process, void *process_priv)
1584 {
1585         struct rocker *rocker = rocker_port->rocker;
1586         struct rocker_desc_info *desc_info;
1587         struct rocker_wait *wait;
1588         bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1589         unsigned long lock_flags;
1590         int err;
1591
1592         wait = rocker_wait_create(rocker_port, trans, flags);
1593         if (!wait)
1594                 return -ENOMEM;
1595         wait->nowait = nowait;
1596
1597         spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1598
1599         desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1600         if (!desc_info) {
1601                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1602                 err = -EAGAIN;
1603                 goto out;
1604         }
1605
1606         err = prepare(rocker_port, desc_info, prepare_priv);
1607         if (err) {
1608                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1609                 goto out;
1610         }
1611
1612         rocker_desc_cookie_ptr_set(desc_info, wait);
1613
1614         if (!switchdev_trans_ph_prepare(trans))
1615                 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1616
1617         spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1618
1619         if (nowait)
1620                 return 0;
1621
1622         if (!switchdev_trans_ph_prepare(trans))
1623                 if (!rocker_wait_event_timeout(wait, HZ / 10))
1624                         return -EIO;
1625
1626         err = rocker_desc_err(desc_info);
1627         if (err)
1628                 return err;
1629
1630         if (process)
1631                 err = process(rocker_port, desc_info, process_priv);
1632
1633         rocker_desc_gen_clear(desc_info);
1634 out:
1635         rocker_wait_destroy(trans, wait);
1636         return err;
1637 }
1638
1639 static int
1640 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1641                                   struct rocker_desc_info *desc_info,
1642                                   void *priv)
1643 {
1644         struct rocker_tlv *cmd_info;
1645
1646         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1647                                ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1648                 return -EMSGSIZE;
1649         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1650         if (!cmd_info)
1651                 return -EMSGSIZE;
1652         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1653                                rocker_port->pport))
1654                 return -EMSGSIZE;
1655         rocker_tlv_nest_end(desc_info, cmd_info);
1656         return 0;
1657 }
1658
1659 static int
1660 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1661                                           const struct rocker_desc_info *desc_info,
1662                                           void *priv)
1663 {
1664         struct ethtool_cmd *ecmd = priv;
1665         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1666         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1667         u32 speed;
1668         u8 duplex;
1669         u8 autoneg;
1670
1671         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1672         if (!attrs[ROCKER_TLV_CMD_INFO])
1673                 return -EIO;
1674
1675         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1676                                 attrs[ROCKER_TLV_CMD_INFO]);
1677         if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1678             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1679             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1680                 return -EIO;
1681
1682         speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1683         duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1684         autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1685
1686         ecmd->transceiver = XCVR_INTERNAL;
1687         ecmd->supported = SUPPORTED_TP;
1688         ecmd->phy_address = 0xff;
1689         ecmd->port = PORT_TP;
1690         ethtool_cmd_speed_set(ecmd, speed);
1691         ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1692         ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1693
1694         return 0;
1695 }
1696
1697 static int
1698 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1699                                           const struct rocker_desc_info *desc_info,
1700                                           void *priv)
1701 {
1702         unsigned char *macaddr = priv;
1703         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1704         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1705         const struct rocker_tlv *attr;
1706
1707         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1708         if (!attrs[ROCKER_TLV_CMD_INFO])
1709                 return -EIO;
1710
1711         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1712                                 attrs[ROCKER_TLV_CMD_INFO]);
1713         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1714         if (!attr)
1715                 return -EIO;
1716
1717         if (rocker_tlv_len(attr) != ETH_ALEN)
1718                 return -EINVAL;
1719
1720         ether_addr_copy(macaddr, rocker_tlv_data(attr));
1721         return 0;
1722 }
1723
1724 struct port_name {
1725         char *buf;
1726         size_t len;
1727 };
1728
1729 static int
1730 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1731                                             const struct rocker_desc_info *desc_info,
1732                                             void *priv)
1733 {
1734         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1735         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1736         struct port_name *name = priv;
1737         const struct rocker_tlv *attr;
1738         size_t i, j, len;
1739         const char *str;
1740
1741         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1742         if (!attrs[ROCKER_TLV_CMD_INFO])
1743                 return -EIO;
1744
1745         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1746                                 attrs[ROCKER_TLV_CMD_INFO]);
1747         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1748         if (!attr)
1749                 return -EIO;
1750
1751         len = min_t(size_t, rocker_tlv_len(attr), name->len);
1752         str = rocker_tlv_data(attr);
1753
1754         /* make sure name only contains alphanumeric characters */
1755         for (i = j = 0; i < len; ++i) {
1756                 if (isalnum(str[i])) {
1757                         name->buf[j] = str[i];
1758                         j++;
1759                 }
1760         }
1761
1762         if (j == 0)
1763                 return -EIO;
1764
1765         name->buf[j] = '\0';
1766
1767         return 0;
1768 }
1769
1770 static int
1771 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1772                                           struct rocker_desc_info *desc_info,
1773                                           void *priv)
1774 {
1775         struct ethtool_cmd *ecmd = priv;
1776         struct rocker_tlv *cmd_info;
1777
1778         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1779                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1780                 return -EMSGSIZE;
1781         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1782         if (!cmd_info)
1783                 return -EMSGSIZE;
1784         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1785                                rocker_port->pport))
1786                 return -EMSGSIZE;
1787         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1788                                ethtool_cmd_speed(ecmd)))
1789                 return -EMSGSIZE;
1790         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1791                               ecmd->duplex))
1792                 return -EMSGSIZE;
1793         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1794                               ecmd->autoneg))
1795                 return -EMSGSIZE;
1796         rocker_tlv_nest_end(desc_info, cmd_info);
1797         return 0;
1798 }
1799
1800 static int
1801 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1802                                           struct rocker_desc_info *desc_info,
1803                                           void *priv)
1804 {
1805         const unsigned char *macaddr = priv;
1806         struct rocker_tlv *cmd_info;
1807
1808         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1809                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1810                 return -EMSGSIZE;
1811         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1812         if (!cmd_info)
1813                 return -EMSGSIZE;
1814         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1815                                rocker_port->pport))
1816                 return -EMSGSIZE;
1817         if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1818                            ETH_ALEN, macaddr))
1819                 return -EMSGSIZE;
1820         rocker_tlv_nest_end(desc_info, cmd_info);
1821         return 0;
1822 }
1823
1824 static int
1825 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1826                                       struct rocker_desc_info *desc_info,
1827                                       void *priv)
1828 {
1829         int mtu = *(int *)priv;
1830         struct rocker_tlv *cmd_info;
1831
1832         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1833                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1834                 return -EMSGSIZE;
1835         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1836         if (!cmd_info)
1837                 return -EMSGSIZE;
1838         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1839                                rocker_port->pport))
1840                 return -EMSGSIZE;
1841         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1842                                mtu))
1843                 return -EMSGSIZE;
1844         rocker_tlv_nest_end(desc_info, cmd_info);
1845         return 0;
1846 }
1847
1848 static int
1849 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1850                                   struct rocker_desc_info *desc_info,
1851                                   void *priv)
1852 {
1853         struct rocker_tlv *cmd_info;
1854
1855         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1856                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1857                 return -EMSGSIZE;
1858         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1859         if (!cmd_info)
1860                 return -EMSGSIZE;
1861         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1862                                rocker_port->pport))
1863                 return -EMSGSIZE;
1864         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1865                               !!(rocker_port->brport_flags & BR_LEARNING)))
1866                 return -EMSGSIZE;
1867         rocker_tlv_nest_end(desc_info, cmd_info);
1868         return 0;
1869 }
1870
1871 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1872                                                 struct ethtool_cmd *ecmd)
1873 {
1874         return rocker_cmd_exec(rocker_port, NULL, 0,
1875                                rocker_cmd_get_port_settings_prep, NULL,
1876                                rocker_cmd_get_port_settings_ethtool_proc,
1877                                ecmd);
1878 }
1879
1880 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1881                                                 unsigned char *macaddr)
1882 {
1883         return rocker_cmd_exec(rocker_port, NULL, 0,
1884                                rocker_cmd_get_port_settings_prep, NULL,
1885                                rocker_cmd_get_port_settings_macaddr_proc,
1886                                macaddr);
1887 }
1888
1889 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1890                                                 struct ethtool_cmd *ecmd)
1891 {
1892         return rocker_cmd_exec(rocker_port, NULL, 0,
1893                                rocker_cmd_set_port_settings_ethtool_prep,
1894                                ecmd, NULL, NULL);
1895 }
1896
1897 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1898                                                 unsigned char *macaddr)
1899 {
1900         return rocker_cmd_exec(rocker_port, NULL, 0,
1901                                rocker_cmd_set_port_settings_macaddr_prep,
1902                                macaddr, NULL, NULL);
1903 }
1904
1905 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1906                                             int mtu)
1907 {
1908         return rocker_cmd_exec(rocker_port, NULL, 0,
1909                                rocker_cmd_set_port_settings_mtu_prep,
1910                                &mtu, NULL, NULL);
1911 }
1912
1913 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1914                                     struct switchdev_trans *trans)
1915 {
1916         return rocker_cmd_exec(rocker_port, trans, 0,
1917                                rocker_cmd_set_port_learning_prep,
1918                                NULL, NULL, NULL);
1919 }
1920
1921 static int
1922 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1923                                 const struct rocker_flow_tbl_entry *entry)
1924 {
1925         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1926                                entry->key.ig_port.in_pport))
1927                 return -EMSGSIZE;
1928         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1929                                entry->key.ig_port.in_pport_mask))
1930                 return -EMSGSIZE;
1931         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1932                                entry->key.ig_port.goto_tbl))
1933                 return -EMSGSIZE;
1934
1935         return 0;
1936 }
1937
1938 static int
1939 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1940                              const struct rocker_flow_tbl_entry *entry)
1941 {
1942         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1943                                entry->key.vlan.in_pport))
1944                 return -EMSGSIZE;
1945         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1946                                 entry->key.vlan.vlan_id))
1947                 return -EMSGSIZE;
1948         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1949                                 entry->key.vlan.vlan_id_mask))
1950                 return -EMSGSIZE;
1951         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1952                                entry->key.vlan.goto_tbl))
1953                 return -EMSGSIZE;
1954         if (entry->key.vlan.untagged &&
1955             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1956                                 entry->key.vlan.new_vlan_id))
1957                 return -EMSGSIZE;
1958
1959         return 0;
1960 }
1961
1962 static int
1963 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1964                                  const struct rocker_flow_tbl_entry *entry)
1965 {
1966         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1967                                entry->key.term_mac.in_pport))
1968                 return -EMSGSIZE;
1969         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1970                                entry->key.term_mac.in_pport_mask))
1971                 return -EMSGSIZE;
1972         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1973                                 entry->key.term_mac.eth_type))
1974                 return -EMSGSIZE;
1975         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1976                            ETH_ALEN, entry->key.term_mac.eth_dst))
1977                 return -EMSGSIZE;
1978         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1979                            ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1980                 return -EMSGSIZE;
1981         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1982                                 entry->key.term_mac.vlan_id))
1983                 return -EMSGSIZE;
1984         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1985                                 entry->key.term_mac.vlan_id_mask))
1986                 return -EMSGSIZE;
1987         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1988                                entry->key.term_mac.goto_tbl))
1989                 return -EMSGSIZE;
1990         if (entry->key.term_mac.copy_to_cpu &&
1991             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1992                               entry->key.term_mac.copy_to_cpu))
1993                 return -EMSGSIZE;
1994
1995         return 0;
1996 }
1997
1998 static int
1999 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
2000                                       const struct rocker_flow_tbl_entry *entry)
2001 {
2002         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2003                                 entry->key.ucast_routing.eth_type))
2004                 return -EMSGSIZE;
2005         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2006                                 entry->key.ucast_routing.dst4))
2007                 return -EMSGSIZE;
2008         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2009                                 entry->key.ucast_routing.dst4_mask))
2010                 return -EMSGSIZE;
2011         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2012                                entry->key.ucast_routing.goto_tbl))
2013                 return -EMSGSIZE;
2014         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2015                                entry->key.ucast_routing.group_id))
2016                 return -EMSGSIZE;
2017
2018         return 0;
2019 }
2020
2021 static int
2022 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2023                                const struct rocker_flow_tbl_entry *entry)
2024 {
2025         if (entry->key.bridge.has_eth_dst &&
2026             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2027                            ETH_ALEN, entry->key.bridge.eth_dst))
2028                 return -EMSGSIZE;
2029         if (entry->key.bridge.has_eth_dst_mask &&
2030             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2031                            ETH_ALEN, entry->key.bridge.eth_dst_mask))
2032                 return -EMSGSIZE;
2033         if (entry->key.bridge.vlan_id &&
2034             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2035                                 entry->key.bridge.vlan_id))
2036                 return -EMSGSIZE;
2037         if (entry->key.bridge.tunnel_id &&
2038             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2039                                entry->key.bridge.tunnel_id))
2040                 return -EMSGSIZE;
2041         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2042                                entry->key.bridge.goto_tbl))
2043                 return -EMSGSIZE;
2044         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2045                                entry->key.bridge.group_id))
2046                 return -EMSGSIZE;
2047         if (entry->key.bridge.copy_to_cpu &&
2048             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2049                               entry->key.bridge.copy_to_cpu))
2050                 return -EMSGSIZE;
2051
2052         return 0;
2053 }
2054
2055 static int
2056 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2057                             const struct rocker_flow_tbl_entry *entry)
2058 {
2059         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2060                                entry->key.acl.in_pport))
2061                 return -EMSGSIZE;
2062         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2063                                entry->key.acl.in_pport_mask))
2064                 return -EMSGSIZE;
2065         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2066                            ETH_ALEN, entry->key.acl.eth_src))
2067                 return -EMSGSIZE;
2068         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2069                            ETH_ALEN, entry->key.acl.eth_src_mask))
2070                 return -EMSGSIZE;
2071         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2072                            ETH_ALEN, entry->key.acl.eth_dst))
2073                 return -EMSGSIZE;
2074         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2075                            ETH_ALEN, entry->key.acl.eth_dst_mask))
2076                 return -EMSGSIZE;
2077         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2078                                 entry->key.acl.eth_type))
2079                 return -EMSGSIZE;
2080         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2081                                 entry->key.acl.vlan_id))
2082                 return -EMSGSIZE;
2083         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2084                                 entry->key.acl.vlan_id_mask))
2085                 return -EMSGSIZE;
2086
2087         switch (ntohs(entry->key.acl.eth_type)) {
2088         case ETH_P_IP:
2089         case ETH_P_IPV6:
2090                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2091                                       entry->key.acl.ip_proto))
2092                         return -EMSGSIZE;
2093                 if (rocker_tlv_put_u8(desc_info,
2094                                       ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2095                                       entry->key.acl.ip_proto_mask))
2096                         return -EMSGSIZE;
2097                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2098                                       entry->key.acl.ip_tos & 0x3f))
2099                         return -EMSGSIZE;
2100                 if (rocker_tlv_put_u8(desc_info,
2101                                       ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2102                                       entry->key.acl.ip_tos_mask & 0x3f))
2103                         return -EMSGSIZE;
2104                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2105                                       (entry->key.acl.ip_tos & 0xc0) >> 6))
2106                         return -EMSGSIZE;
2107                 if (rocker_tlv_put_u8(desc_info,
2108                                       ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2109                                       (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2110                         return -EMSGSIZE;
2111                 break;
2112         }
2113
2114         if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2115             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2116                                entry->key.acl.group_id))
2117                 return -EMSGSIZE;
2118
2119         return 0;
2120 }
2121
2122 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
2123                                    struct rocker_desc_info *desc_info,
2124                                    void *priv)
2125 {
2126         const struct rocker_flow_tbl_entry *entry = priv;
2127         struct rocker_tlv *cmd_info;
2128         int err = 0;
2129
2130         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2131                 return -EMSGSIZE;
2132         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2133         if (!cmd_info)
2134                 return -EMSGSIZE;
2135         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2136                                entry->key.tbl_id))
2137                 return -EMSGSIZE;
2138         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2139                                entry->key.priority))
2140                 return -EMSGSIZE;
2141         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2142                 return -EMSGSIZE;
2143         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2144                                entry->cookie))
2145                 return -EMSGSIZE;
2146
2147         switch (entry->key.tbl_id) {
2148         case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2149                 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2150                 break;
2151         case ROCKER_OF_DPA_TABLE_ID_VLAN:
2152                 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2153                 break;
2154         case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2155                 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2156                 break;
2157         case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2158                 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2159                 break;
2160         case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2161                 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2162                 break;
2163         case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2164                 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2165                 break;
2166         default:
2167                 err = -ENOTSUPP;
2168                 break;
2169         }
2170
2171         if (err)
2172                 return err;
2173
2174         rocker_tlv_nest_end(desc_info, cmd_info);
2175
2176         return 0;
2177 }
2178
2179 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
2180                                    struct rocker_desc_info *desc_info,
2181                                    void *priv)
2182 {
2183         const struct rocker_flow_tbl_entry *entry = priv;
2184         struct rocker_tlv *cmd_info;
2185
2186         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2187                 return -EMSGSIZE;
2188         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2189         if (!cmd_info)
2190                 return -EMSGSIZE;
2191         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2192                                entry->cookie))
2193                 return -EMSGSIZE;
2194         rocker_tlv_nest_end(desc_info, cmd_info);
2195
2196         return 0;
2197 }
2198
2199 static int
2200 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2201                                       struct rocker_group_tbl_entry *entry)
2202 {
2203         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2204                                ROCKER_GROUP_PORT_GET(entry->group_id)))
2205                 return -EMSGSIZE;
2206         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2207                               entry->l2_interface.pop_vlan))
2208                 return -EMSGSIZE;
2209
2210         return 0;
2211 }
2212
2213 static int
2214 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2215                                     const struct rocker_group_tbl_entry *entry)
2216 {
2217         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2218                                entry->l2_rewrite.group_id))
2219                 return -EMSGSIZE;
2220         if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2221             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2222                            ETH_ALEN, entry->l2_rewrite.eth_src))
2223                 return -EMSGSIZE;
2224         if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2225             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2226                            ETH_ALEN, entry->l2_rewrite.eth_dst))
2227                 return -EMSGSIZE;
2228         if (entry->l2_rewrite.vlan_id &&
2229             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2230                                 entry->l2_rewrite.vlan_id))
2231                 return -EMSGSIZE;
2232
2233         return 0;
2234 }
2235
2236 static int
2237 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2238                                    const struct rocker_group_tbl_entry *entry)
2239 {
2240         int i;
2241         struct rocker_tlv *group_ids;
2242
2243         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2244                                entry->group_count))
2245                 return -EMSGSIZE;
2246
2247         group_ids = rocker_tlv_nest_start(desc_info,
2248                                           ROCKER_TLV_OF_DPA_GROUP_IDS);
2249         if (!group_ids)
2250                 return -EMSGSIZE;
2251
2252         for (i = 0; i < entry->group_count; i++)
2253                 /* Note TLV array is 1-based */
2254                 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2255                         return -EMSGSIZE;
2256
2257         rocker_tlv_nest_end(desc_info, group_ids);
2258
2259         return 0;
2260 }
2261
2262 static int
2263 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2264                                     const struct rocker_group_tbl_entry *entry)
2265 {
2266         if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2267             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2268                            ETH_ALEN, entry->l3_unicast.eth_src))
2269                 return -EMSGSIZE;
2270         if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2271             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2272                            ETH_ALEN, entry->l3_unicast.eth_dst))
2273                 return -EMSGSIZE;
2274         if (entry->l3_unicast.vlan_id &&
2275             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2276                                 entry->l3_unicast.vlan_id))
2277                 return -EMSGSIZE;
2278         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2279                               entry->l3_unicast.ttl_check))
2280                 return -EMSGSIZE;
2281         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2282                                entry->l3_unicast.group_id))
2283                 return -EMSGSIZE;
2284
2285         return 0;
2286 }
2287
2288 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2289                                     struct rocker_desc_info *desc_info,
2290                                     void *priv)
2291 {
2292         struct rocker_group_tbl_entry *entry = priv;
2293         struct rocker_tlv *cmd_info;
2294         int err = 0;
2295
2296         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2297                 return -EMSGSIZE;
2298         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2299         if (!cmd_info)
2300                 return -EMSGSIZE;
2301
2302         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2303                                entry->group_id))
2304                 return -EMSGSIZE;
2305
2306         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2307         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2308                 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2309                 break;
2310         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2311                 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2312                 break;
2313         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2314         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2315                 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2316                 break;
2317         case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2318                 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2319                 break;
2320         default:
2321                 err = -ENOTSUPP;
2322                 break;
2323         }
2324
2325         if (err)
2326                 return err;
2327
2328         rocker_tlv_nest_end(desc_info, cmd_info);
2329
2330         return 0;
2331 }
2332
2333 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2334                                     struct rocker_desc_info *desc_info,
2335                                     void *priv)
2336 {
2337         const struct rocker_group_tbl_entry *entry = priv;
2338         struct rocker_tlv *cmd_info;
2339
2340         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2341                 return -EMSGSIZE;
2342         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2343         if (!cmd_info)
2344                 return -EMSGSIZE;
2345         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2346                                entry->group_id))
2347                 return -EMSGSIZE;
2348         rocker_tlv_nest_end(desc_info, cmd_info);
2349
2350         return 0;
2351 }
2352
2353 /***************************************************
2354  * Flow, group, FDB, internal VLAN and neigh tables
2355  ***************************************************/
2356
2357 static int rocker_init_tbls(struct rocker *rocker)
2358 {
2359         hash_init(rocker->flow_tbl);
2360         spin_lock_init(&rocker->flow_tbl_lock);
2361
2362         hash_init(rocker->group_tbl);
2363         spin_lock_init(&rocker->group_tbl_lock);
2364
2365         hash_init(rocker->fdb_tbl);
2366         spin_lock_init(&rocker->fdb_tbl_lock);
2367
2368         hash_init(rocker->internal_vlan_tbl);
2369         spin_lock_init(&rocker->internal_vlan_tbl_lock);
2370
2371         hash_init(rocker->neigh_tbl);
2372         spin_lock_init(&rocker->neigh_tbl_lock);
2373
2374         return 0;
2375 }
2376
2377 static void rocker_free_tbls(struct rocker *rocker)
2378 {
2379         unsigned long flags;
2380         struct rocker_flow_tbl_entry *flow_entry;
2381         struct rocker_group_tbl_entry *group_entry;
2382         struct rocker_fdb_tbl_entry *fdb_entry;
2383         struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2384         struct rocker_neigh_tbl_entry *neigh_entry;
2385         struct hlist_node *tmp;
2386         int bkt;
2387
2388         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2389         hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2390                 hash_del(&flow_entry->entry);
2391         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2392
2393         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2394         hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2395                 hash_del(&group_entry->entry);
2396         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2397
2398         spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2399         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2400                 hash_del(&fdb_entry->entry);
2401         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2402
2403         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2404         hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2405                            tmp, internal_vlan_entry, entry)
2406                 hash_del(&internal_vlan_entry->entry);
2407         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2408
2409         spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2410         hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2411                 hash_del(&neigh_entry->entry);
2412         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2413 }
2414
2415 static struct rocker_flow_tbl_entry *
2416 rocker_flow_tbl_find(const struct rocker *rocker,
2417                      const struct rocker_flow_tbl_entry *match)
2418 {
2419         struct rocker_flow_tbl_entry *found;
2420         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2421
2422         hash_for_each_possible(rocker->flow_tbl, found,
2423                                entry, match->key_crc32) {
2424                 if (memcmp(&found->key, &match->key, key_len) == 0)
2425                         return found;
2426         }
2427
2428         return NULL;
2429 }
2430
2431 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2432                                struct switchdev_trans *trans, int flags,
2433                                struct rocker_flow_tbl_entry *match)
2434 {
2435         struct rocker *rocker = rocker_port->rocker;
2436         struct rocker_flow_tbl_entry *found;
2437         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2438         unsigned long lock_flags;
2439
2440         match->key_crc32 = crc32(~0, &match->key, key_len);
2441
2442         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2443
2444         found = rocker_flow_tbl_find(rocker, match);
2445
2446         if (found) {
2447                 match->cookie = found->cookie;
2448                 if (!switchdev_trans_ph_prepare(trans))
2449                         hash_del(&found->entry);
2450                 rocker_port_kfree(trans, found);
2451                 found = match;
2452                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2453         } else {
2454                 found = match;
2455                 found->cookie = rocker->flow_tbl_next_cookie++;
2456                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2457         }
2458
2459         if (!switchdev_trans_ph_prepare(trans))
2460                 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2461
2462         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2463
2464         return rocker_cmd_exec(rocker_port, trans, flags,
2465                                rocker_cmd_flow_tbl_add, found, NULL, NULL);
2466 }
2467
2468 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2469                                struct switchdev_trans *trans, int flags,
2470                                struct rocker_flow_tbl_entry *match)
2471 {
2472         struct rocker *rocker = rocker_port->rocker;
2473         struct rocker_flow_tbl_entry *found;
2474         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2475         unsigned long lock_flags;
2476         int err = 0;
2477
2478         match->key_crc32 = crc32(~0, &match->key, key_len);
2479
2480         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2481
2482         found = rocker_flow_tbl_find(rocker, match);
2483
2484         if (found) {
2485                 if (!switchdev_trans_ph_prepare(trans))
2486                         hash_del(&found->entry);
2487                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2488         }
2489
2490         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2491
2492         rocker_port_kfree(trans, match);
2493
2494         if (found) {
2495                 err = rocker_cmd_exec(rocker_port, trans, flags,
2496                                       rocker_cmd_flow_tbl_del,
2497                                       found, NULL, NULL);
2498                 rocker_port_kfree(trans, found);
2499         }
2500
2501         return err;
2502 }
2503
2504 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2505                               struct switchdev_trans *trans, int flags,
2506                               struct rocker_flow_tbl_entry *entry)
2507 {
2508         if (flags & ROCKER_OP_FLAG_REMOVE)
2509                 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2510         else
2511                 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2512 }
2513
2514 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2515                                    struct switchdev_trans *trans, int flags,
2516                                    u32 in_pport, u32 in_pport_mask,
2517                                    enum rocker_of_dpa_table_id goto_tbl)
2518 {
2519         struct rocker_flow_tbl_entry *entry;
2520
2521         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2522         if (!entry)
2523                 return -ENOMEM;
2524
2525         entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2526         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2527         entry->key.ig_port.in_pport = in_pport;
2528         entry->key.ig_port.in_pport_mask = in_pport_mask;
2529         entry->key.ig_port.goto_tbl = goto_tbl;
2530
2531         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2532 }
2533
2534 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2535                                 struct switchdev_trans *trans, int flags,
2536                                 u32 in_pport, __be16 vlan_id,
2537                                 __be16 vlan_id_mask,
2538                                 enum rocker_of_dpa_table_id goto_tbl,
2539                                 bool untagged, __be16 new_vlan_id)
2540 {
2541         struct rocker_flow_tbl_entry *entry;
2542
2543         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2544         if (!entry)
2545                 return -ENOMEM;
2546
2547         entry->key.priority = ROCKER_PRIORITY_VLAN;
2548         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2549         entry->key.vlan.in_pport = in_pport;
2550         entry->key.vlan.vlan_id = vlan_id;
2551         entry->key.vlan.vlan_id_mask = vlan_id_mask;
2552         entry->key.vlan.goto_tbl = goto_tbl;
2553
2554         entry->key.vlan.untagged = untagged;
2555         entry->key.vlan.new_vlan_id = new_vlan_id;
2556
2557         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2558 }
2559
2560 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2561                                     struct switchdev_trans *trans,
2562                                     u32 in_pport, u32 in_pport_mask,
2563                                     __be16 eth_type, const u8 *eth_dst,
2564                                     const u8 *eth_dst_mask, __be16 vlan_id,
2565                                     __be16 vlan_id_mask, bool copy_to_cpu,
2566                                     int flags)
2567 {
2568         struct rocker_flow_tbl_entry *entry;
2569
2570         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2571         if (!entry)
2572                 return -ENOMEM;
2573
2574         if (is_multicast_ether_addr(eth_dst)) {
2575                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2576                 entry->key.term_mac.goto_tbl =
2577                          ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2578         } else {
2579                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2580                 entry->key.term_mac.goto_tbl =
2581                          ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2582         }
2583
2584         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2585         entry->key.term_mac.in_pport = in_pport;
2586         entry->key.term_mac.in_pport_mask = in_pport_mask;
2587         entry->key.term_mac.eth_type = eth_type;
2588         ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2589         ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2590         entry->key.term_mac.vlan_id = vlan_id;
2591         entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2592         entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2593
2594         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2595 }
2596
2597 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2598                                   struct switchdev_trans *trans, int flags,
2599                                   const u8 *eth_dst, const u8 *eth_dst_mask,
2600                                   __be16 vlan_id, u32 tunnel_id,
2601                                   enum rocker_of_dpa_table_id goto_tbl,
2602                                   u32 group_id, bool copy_to_cpu)
2603 {
2604         struct rocker_flow_tbl_entry *entry;
2605         u32 priority;
2606         bool vlan_bridging = !!vlan_id;
2607         bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2608         bool wild = false;
2609
2610         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2611         if (!entry)
2612                 return -ENOMEM;
2613
2614         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2615
2616         if (eth_dst) {
2617                 entry->key.bridge.has_eth_dst = 1;
2618                 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2619         }
2620         if (eth_dst_mask) {
2621                 entry->key.bridge.has_eth_dst_mask = 1;
2622                 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2623                 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2624                         wild = true;
2625         }
2626
2627         priority = ROCKER_PRIORITY_UNKNOWN;
2628         if (vlan_bridging && dflt && wild)
2629                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2630         else if (vlan_bridging && dflt && !wild)
2631                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2632         else if (vlan_bridging && !dflt)
2633                 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2634         else if (!vlan_bridging && dflt && wild)
2635                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2636         else if (!vlan_bridging && dflt && !wild)
2637                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2638         else if (!vlan_bridging && !dflt)
2639                 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2640
2641         entry->key.priority = priority;
2642         entry->key.bridge.vlan_id = vlan_id;
2643         entry->key.bridge.tunnel_id = tunnel_id;
2644         entry->key.bridge.goto_tbl = goto_tbl;
2645         entry->key.bridge.group_id = group_id;
2646         entry->key.bridge.copy_to_cpu = copy_to_cpu;
2647
2648         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2649 }
2650
2651 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2652                                           struct switchdev_trans *trans,
2653                                           __be16 eth_type, __be32 dst,
2654                                           __be32 dst_mask, u32 priority,
2655                                           enum rocker_of_dpa_table_id goto_tbl,
2656                                           u32 group_id, int flags)
2657 {
2658         struct rocker_flow_tbl_entry *entry;
2659
2660         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2661         if (!entry)
2662                 return -ENOMEM;
2663
2664         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2665         entry->key.priority = priority;
2666         entry->key.ucast_routing.eth_type = eth_type;
2667         entry->key.ucast_routing.dst4 = dst;
2668         entry->key.ucast_routing.dst4_mask = dst_mask;
2669         entry->key.ucast_routing.goto_tbl = goto_tbl;
2670         entry->key.ucast_routing.group_id = group_id;
2671         entry->key_len = offsetof(struct rocker_flow_tbl_key,
2672                                   ucast_routing.group_id);
2673
2674         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2675 }
2676
2677 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2678                                struct switchdev_trans *trans, int flags,
2679                                u32 in_pport, u32 in_pport_mask,
2680                                const u8 *eth_src, const u8 *eth_src_mask,
2681                                const u8 *eth_dst, const u8 *eth_dst_mask,
2682                                __be16 eth_type, __be16 vlan_id,
2683                                __be16 vlan_id_mask, u8 ip_proto,
2684                                u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2685                                u32 group_id)
2686 {
2687         u32 priority;
2688         struct rocker_flow_tbl_entry *entry;
2689
2690         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2691         if (!entry)
2692                 return -ENOMEM;
2693
2694         priority = ROCKER_PRIORITY_ACL_NORMAL;
2695         if (eth_dst && eth_dst_mask) {
2696                 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2697                         priority = ROCKER_PRIORITY_ACL_DFLT;
2698                 else if (is_link_local_ether_addr(eth_dst))
2699                         priority = ROCKER_PRIORITY_ACL_CTRL;
2700         }
2701
2702         entry->key.priority = priority;
2703         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2704         entry->key.acl.in_pport = in_pport;
2705         entry->key.acl.in_pport_mask = in_pport_mask;
2706
2707         if (eth_src)
2708                 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2709         if (eth_src_mask)
2710                 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2711         if (eth_dst)
2712                 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2713         if (eth_dst_mask)
2714                 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2715
2716         entry->key.acl.eth_type = eth_type;
2717         entry->key.acl.vlan_id = vlan_id;
2718         entry->key.acl.vlan_id_mask = vlan_id_mask;
2719         entry->key.acl.ip_proto = ip_proto;
2720         entry->key.acl.ip_proto_mask = ip_proto_mask;
2721         entry->key.acl.ip_tos = ip_tos;
2722         entry->key.acl.ip_tos_mask = ip_tos_mask;
2723         entry->key.acl.group_id = group_id;
2724
2725         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2726 }
2727
2728 static struct rocker_group_tbl_entry *
2729 rocker_group_tbl_find(const struct rocker *rocker,
2730                       const struct rocker_group_tbl_entry *match)
2731 {
2732         struct rocker_group_tbl_entry *found;
2733
2734         hash_for_each_possible(rocker->group_tbl, found,
2735                                entry, match->group_id) {
2736                 if (found->group_id == match->group_id)
2737                         return found;
2738         }
2739
2740         return NULL;
2741 }
2742
2743 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2744                                         struct rocker_group_tbl_entry *entry)
2745 {
2746         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2747         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2748         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2749                 rocker_port_kfree(trans, entry->group_ids);
2750                 break;
2751         default:
2752                 break;
2753         }
2754         rocker_port_kfree(trans, entry);
2755 }
2756
2757 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2758                                 struct switchdev_trans *trans, int flags,
2759                                 struct rocker_group_tbl_entry *match)
2760 {
2761         struct rocker *rocker = rocker_port->rocker;
2762         struct rocker_group_tbl_entry *found;
2763         unsigned long lock_flags;
2764
2765         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2766
2767         found = rocker_group_tbl_find(rocker, match);
2768
2769         if (found) {
2770                 if (!switchdev_trans_ph_prepare(trans))
2771                         hash_del(&found->entry);
2772                 rocker_group_tbl_entry_free(trans, found);
2773                 found = match;
2774                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2775         } else {
2776                 found = match;
2777                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2778         }
2779
2780         if (!switchdev_trans_ph_prepare(trans))
2781                 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2782
2783         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2784
2785         return rocker_cmd_exec(rocker_port, trans, flags,
2786                                rocker_cmd_group_tbl_add, found, NULL, NULL);
2787 }
2788
2789 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2790                                 struct switchdev_trans *trans, int flags,
2791                                 struct rocker_group_tbl_entry *match)
2792 {
2793         struct rocker *rocker = rocker_port->rocker;
2794         struct rocker_group_tbl_entry *found;
2795         unsigned long lock_flags;
2796         int err = 0;
2797
2798         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2799
2800         found = rocker_group_tbl_find(rocker, match);
2801
2802         if (found) {
2803                 if (!switchdev_trans_ph_prepare(trans))
2804                         hash_del(&found->entry);
2805                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2806         }
2807
2808         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2809
2810         rocker_group_tbl_entry_free(trans, match);
2811
2812         if (found) {
2813                 err = rocker_cmd_exec(rocker_port, trans, flags,
2814                                       rocker_cmd_group_tbl_del,
2815                                       found, NULL, NULL);
2816                 rocker_group_tbl_entry_free(trans, found);
2817         }
2818
2819         return err;
2820 }
2821
2822 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2823                                struct switchdev_trans *trans, int flags,
2824                                struct rocker_group_tbl_entry *entry)
2825 {
2826         if (flags & ROCKER_OP_FLAG_REMOVE)
2827                 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2828         else
2829                 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2830 }
2831
2832 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2833                                      struct switchdev_trans *trans, int flags,
2834                                      __be16 vlan_id, u32 out_pport,
2835                                      int pop_vlan)
2836 {
2837         struct rocker_group_tbl_entry *entry;
2838
2839         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2840         if (!entry)
2841                 return -ENOMEM;
2842
2843         entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2844         entry->l2_interface.pop_vlan = pop_vlan;
2845
2846         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2847 }
2848
2849 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2850                                    struct switchdev_trans *trans,
2851                                    int flags, u8 group_count,
2852                                    const u32 *group_ids, u32 group_id)
2853 {
2854         struct rocker_group_tbl_entry *entry;
2855
2856         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2857         if (!entry)
2858                 return -ENOMEM;
2859
2860         entry->group_id = group_id;
2861         entry->group_count = group_count;
2862
2863         entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
2864                                                group_count, sizeof(u32));
2865         if (!entry->group_ids) {
2866                 rocker_port_kfree(trans, entry);
2867                 return -ENOMEM;
2868         }
2869         memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2870
2871         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2872 }
2873
2874 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2875                                  struct switchdev_trans *trans, int flags,
2876                                  __be16 vlan_id, u8 group_count,
2877                                  const u32 *group_ids, u32 group_id)
2878 {
2879         return rocker_group_l2_fan_out(rocker_port, trans, flags,
2880                                        group_count, group_ids,
2881                                        group_id);
2882 }
2883
2884 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2885                                    struct switchdev_trans *trans, int flags,
2886                                    u32 index, const u8 *src_mac, const u8 *dst_mac,
2887                                    __be16 vlan_id, bool ttl_check, u32 pport)
2888 {
2889         struct rocker_group_tbl_entry *entry;
2890
2891         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2892         if (!entry)
2893                 return -ENOMEM;
2894
2895         entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2896         if (src_mac)
2897                 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2898         if (dst_mac)
2899                 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2900         entry->l3_unicast.vlan_id = vlan_id;
2901         entry->l3_unicast.ttl_check = ttl_check;
2902         entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2903
2904         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2905 }
2906
2907 static struct rocker_neigh_tbl_entry *
2908 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2909 {
2910         struct rocker_neigh_tbl_entry *found;
2911
2912         hash_for_each_possible(rocker->neigh_tbl, found,
2913                                entry, be32_to_cpu(ip_addr))
2914                 if (found->ip_addr == ip_addr)
2915                         return found;
2916
2917         return NULL;
2918 }
2919
2920 static void _rocker_neigh_add(struct rocker *rocker,
2921                               struct switchdev_trans *trans,
2922                               struct rocker_neigh_tbl_entry *entry)
2923 {
2924         if (!switchdev_trans_ph_commit(trans))
2925                 entry->index = rocker->neigh_tbl_next_index++;
2926         if (switchdev_trans_ph_prepare(trans))
2927                 return;
2928         entry->ref_count++;
2929         hash_add(rocker->neigh_tbl, &entry->entry,
2930                  be32_to_cpu(entry->ip_addr));
2931 }
2932
2933 static void _rocker_neigh_del(struct switchdev_trans *trans,
2934                               struct rocker_neigh_tbl_entry *entry)
2935 {
2936         if (switchdev_trans_ph_prepare(trans))
2937                 return;
2938         if (--entry->ref_count == 0) {
2939                 hash_del(&entry->entry);
2940                 rocker_port_kfree(trans, entry);
2941         }
2942 }
2943
2944 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
2945                                  struct switchdev_trans *trans,
2946                                  const u8 *eth_dst, bool ttl_check)
2947 {
2948         if (eth_dst) {
2949                 ether_addr_copy(entry->eth_dst, eth_dst);
2950                 entry->ttl_check = ttl_check;
2951         } else if (!switchdev_trans_ph_prepare(trans)) {
2952                 entry->ref_count++;
2953         }
2954 }
2955
2956 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
2957                                   struct switchdev_trans *trans,
2958                                   int flags, __be32 ip_addr, const u8 *eth_dst)
2959 {
2960         struct rocker *rocker = rocker_port->rocker;
2961         struct rocker_neigh_tbl_entry *entry;
2962         struct rocker_neigh_tbl_entry *found;
2963         unsigned long lock_flags;
2964         __be16 eth_type = htons(ETH_P_IP);
2965         enum rocker_of_dpa_table_id goto_tbl =
2966                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2967         u32 group_id;
2968         u32 priority = 0;
2969         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2970         bool updating;
2971         bool removing;
2972         int err = 0;
2973
2974         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2975         if (!entry)
2976                 return -ENOMEM;
2977
2978         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2979
2980         found = rocker_neigh_tbl_find(rocker, ip_addr);
2981
2982         updating = found && adding;
2983         removing = found && !adding;
2984         adding = !found && adding;
2985
2986         if (adding) {
2987                 entry->ip_addr = ip_addr;
2988                 entry->dev = rocker_port->dev;
2989                 ether_addr_copy(entry->eth_dst, eth_dst);
2990                 entry->ttl_check = true;
2991                 _rocker_neigh_add(rocker, trans, entry);
2992         } else if (removing) {
2993                 memcpy(entry, found, sizeof(*entry));
2994                 _rocker_neigh_del(trans, found);
2995         } else if (updating) {
2996                 _rocker_neigh_update(found, trans, eth_dst, true);
2997                 memcpy(entry, found, sizeof(*entry));
2998         } else {
2999                 err = -ENOENT;
3000         }
3001
3002         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3003
3004         if (err)
3005                 goto err_out;
3006
3007         /* For each active neighbor, we have an L3 unicast group and
3008          * a /32 route to the neighbor, which uses the L3 unicast
3009          * group.  The L3 unicast group can also be referred to by
3010          * other routes' nexthops.
3011          */
3012
3013         err = rocker_group_l3_unicast(rocker_port, trans, flags,
3014                                       entry->index,
3015                                       rocker_port->dev->dev_addr,
3016                                       entry->eth_dst,
3017                                       rocker_port->internal_vlan_id,
3018                                       entry->ttl_check,
3019                                       rocker_port->pport);
3020         if (err) {
3021                 netdev_err(rocker_port->dev,
3022                            "Error (%d) L3 unicast group index %d\n",
3023                            err, entry->index);
3024                 goto err_out;
3025         }
3026
3027         if (adding || removing) {
3028                 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
3029                 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
3030                                                      eth_type, ip_addr,
3031                                                      inet_make_mask(32),
3032                                                      priority, goto_tbl,
3033                                                      group_id, flags);
3034
3035                 if (err)
3036                         netdev_err(rocker_port->dev,
3037                                    "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3038                                    err, &entry->ip_addr, group_id);
3039         }
3040
3041 err_out:
3042         if (!adding)
3043                 rocker_port_kfree(trans, entry);
3044
3045         return err;
3046 }
3047
3048 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
3049                                     struct switchdev_trans *trans,
3050                                     __be32 ip_addr)
3051 {
3052         struct net_device *dev = rocker_port->dev;
3053         struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
3054         int err = 0;
3055
3056         if (!n) {
3057                 n = neigh_create(&arp_tbl, &ip_addr, dev);
3058                 if (IS_ERR(n))
3059                         return IS_ERR(n);
3060         }
3061
3062         /* If the neigh is already resolved, then go ahead and
3063          * install the entry, otherwise start the ARP process to
3064          * resolve the neigh.
3065          */
3066
3067         if (n->nud_state & NUD_VALID)
3068                 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3069                                              ip_addr, n->ha);
3070         else
3071                 neigh_event_send(n, NULL);
3072
3073         neigh_release(n);
3074         return err;
3075 }
3076
3077 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3078                                struct switchdev_trans *trans, int flags,
3079                                __be32 ip_addr, u32 *index)
3080 {
3081         struct rocker *rocker = rocker_port->rocker;
3082         struct rocker_neigh_tbl_entry *entry;
3083         struct rocker_neigh_tbl_entry *found;
3084         unsigned long lock_flags;
3085         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3086         bool updating;
3087         bool removing;
3088         bool resolved = true;
3089         int err = 0;
3090
3091         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
3092         if (!entry)
3093                 return -ENOMEM;
3094
3095         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3096
3097         found = rocker_neigh_tbl_find(rocker, ip_addr);
3098         if (found)
3099                 *index = found->index;
3100
3101         updating = found && adding;
3102         removing = found && !adding;
3103         adding = !found && adding;
3104
3105         if (adding) {
3106                 entry->ip_addr = ip_addr;
3107                 entry->dev = rocker_port->dev;
3108                 _rocker_neigh_add(rocker, trans, entry);
3109                 *index = entry->index;
3110                 resolved = false;
3111         } else if (removing) {
3112                 _rocker_neigh_del(trans, found);
3113         } else if (updating) {
3114                 _rocker_neigh_update(found, trans, NULL, false);
3115                 resolved = !is_zero_ether_addr(found->eth_dst);
3116         } else {
3117                 err = -ENOENT;
3118         }
3119
3120         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3121
3122         if (!adding)
3123                 rocker_port_kfree(trans, entry);
3124
3125         if (err)
3126                 return err;
3127
3128         /* Resolved means neigh ip_addr is resolved to neigh mac. */
3129
3130         if (!resolved)
3131                 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
3132
3133         return err;
3134 }
3135
3136 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3137                                         struct switchdev_trans *trans,
3138                                         int flags, __be16 vlan_id)
3139 {
3140         struct rocker_port *p;
3141         const struct rocker *rocker = rocker_port->rocker;
3142         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3143         u32 *group_ids;
3144         u8 group_count = 0;
3145         int err = 0;
3146         int i;
3147
3148         group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
3149                                         rocker->port_count, sizeof(u32));
3150         if (!group_ids)
3151                 return -ENOMEM;
3152
3153         /* Adjust the flood group for this VLAN.  The flood group
3154          * references an L2 interface group for each port in this
3155          * VLAN.
3156          */
3157
3158         for (i = 0; i < rocker->port_count; i++) {
3159                 p = rocker->ports[i];
3160                 if (!p)
3161                         continue;
3162                 if (!rocker_port_is_bridged(p))
3163                         continue;
3164                 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3165                         group_ids[group_count++] =
3166                                 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3167                 }
3168         }
3169
3170         /* If there are no bridged ports in this VLAN, we're done */
3171         if (group_count == 0)
3172                 goto no_ports_in_vlan;
3173
3174         err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3175                                     group_count, group_ids, group_id);
3176         if (err)
3177                 netdev_err(rocker_port->dev,
3178                            "Error (%d) port VLAN l2 flood group\n", err);
3179
3180 no_ports_in_vlan:
3181         rocker_port_kfree(trans, group_ids);
3182         return err;
3183 }
3184
3185 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3186                                       struct switchdev_trans *trans, int flags,
3187                                       __be16 vlan_id, bool pop_vlan)
3188 {
3189         const struct rocker *rocker = rocker_port->rocker;
3190         struct rocker_port *p;
3191         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3192         u32 out_pport;
3193         int ref = 0;
3194         int err;
3195         int i;
3196
3197         /* An L2 interface group for this port in this VLAN, but
3198          * only when port STP state is LEARNING|FORWARDING.
3199          */
3200
3201         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3202             rocker_port->stp_state == BR_STATE_FORWARDING) {
3203                 out_pport = rocker_port->pport;
3204                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3205                                                 vlan_id, out_pport, pop_vlan);
3206                 if (err) {
3207                         netdev_err(rocker_port->dev,
3208                                    "Error (%d) port VLAN l2 group for pport %d\n",
3209                                    err, out_pport);
3210                         return err;
3211                 }
3212         }
3213
3214         /* An L2 interface group for this VLAN to CPU port.
3215          * Add when first port joins this VLAN and destroy when
3216          * last port leaves this VLAN.
3217          */
3218
3219         for (i = 0; i < rocker->port_count; i++) {
3220                 p = rocker->ports[i];
3221                 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3222                         ref++;
3223         }
3224
3225         if ((!adding || ref != 1) && (adding || ref != 0))
3226                 return 0;
3227
3228         out_pport = 0;
3229         err = rocker_group_l2_interface(rocker_port, trans, flags,
3230                                         vlan_id, out_pport, pop_vlan);
3231         if (err) {
3232                 netdev_err(rocker_port->dev,
3233                            "Error (%d) port VLAN l2 group for CPU port\n", err);
3234                 return err;
3235         }
3236
3237         return 0;
3238 }
3239
3240 static struct rocker_ctrl {
3241         const u8 *eth_dst;
3242         const u8 *eth_dst_mask;
3243         __be16 eth_type;
3244         bool acl;
3245         bool bridge;
3246         bool term;
3247         bool copy_to_cpu;
3248 } rocker_ctrls[] = {
3249         [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3250                 /* pass link local multicast pkts up to CPU for filtering */
3251                 .eth_dst = ll_mac,
3252                 .eth_dst_mask = ll_mask,
3253                 .acl = true,
3254         },
3255         [ROCKER_CTRL_LOCAL_ARP] = {
3256                 /* pass local ARP pkts up to CPU */
3257                 .eth_dst = zero_mac,
3258                 .eth_dst_mask = zero_mac,
3259                 .eth_type = htons(ETH_P_ARP),
3260                 .acl = true,
3261         },
3262         [ROCKER_CTRL_IPV4_MCAST] = {
3263                 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3264                 .eth_dst = ipv4_mcast,
3265                 .eth_dst_mask = ipv4_mask,
3266                 .eth_type = htons(ETH_P_IP),
3267                 .term  = true,
3268                 .copy_to_cpu = true,
3269         },
3270         [ROCKER_CTRL_IPV6_MCAST] = {
3271                 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3272                 .eth_dst = ipv6_mcast,
3273                 .eth_dst_mask = ipv6_mask,
3274                 .eth_type = htons(ETH_P_IPV6),
3275                 .term  = true,
3276                 .copy_to_cpu = true,
3277         },
3278         [ROCKER_CTRL_DFLT_BRIDGING] = {
3279                 /* flood any pkts on vlan */
3280                 .bridge = true,
3281                 .copy_to_cpu = true,
3282         },
3283         [ROCKER_CTRL_DFLT_OVS] = {
3284                 /* pass all pkts up to CPU */
3285                 .eth_dst = zero_mac,
3286                 .eth_dst_mask = zero_mac,
3287                 .acl = true,
3288         },
3289 };
3290
3291 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3292                                      struct switchdev_trans *trans, int flags,
3293                                      const struct rocker_ctrl *ctrl, __be16 vlan_id)
3294 {
3295         u32 in_pport = rocker_port->pport;
3296         u32 in_pport_mask = 0xffffffff;
3297         u32 out_pport = 0;
3298         const u8 *eth_src = NULL;
3299         const u8 *eth_src_mask = NULL;
3300         __be16 vlan_id_mask = htons(0xffff);
3301         u8 ip_proto = 0;
3302         u8 ip_proto_mask = 0;
3303         u8 ip_tos = 0;
3304         u8 ip_tos_mask = 0;
3305         u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3306         int err;
3307
3308         err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3309                                   in_pport, in_pport_mask,
3310                                   eth_src, eth_src_mask,
3311                                   ctrl->eth_dst, ctrl->eth_dst_mask,
3312                                   ctrl->eth_type,
3313                                   vlan_id, vlan_id_mask,
3314                                   ip_proto, ip_proto_mask,
3315                                   ip_tos, ip_tos_mask,
3316                                   group_id);
3317
3318         if (err)
3319                 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3320
3321         return err;
3322 }
3323
3324 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3325                                         struct switchdev_trans *trans,
3326                                         int flags,
3327                                         const struct rocker_ctrl *ctrl,
3328                                         __be16 vlan_id)
3329 {
3330         enum rocker_of_dpa_table_id goto_tbl =
3331                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3332         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3333         u32 tunnel_id = 0;
3334         int err;
3335
3336         if (!rocker_port_is_bridged(rocker_port))
3337                 return 0;
3338
3339         err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3340                                      ctrl->eth_dst, ctrl->eth_dst_mask,
3341                                      vlan_id, tunnel_id,
3342                                      goto_tbl, group_id, ctrl->copy_to_cpu);
3343
3344         if (err)
3345                 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3346
3347         return err;
3348 }
3349
3350 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3351                                       struct switchdev_trans *trans, int flags,
3352                                       const struct rocker_ctrl *ctrl, __be16 vlan_id)
3353 {
3354         u32 in_pport_mask = 0xffffffff;
3355         __be16 vlan_id_mask = htons(0xffff);
3356         int err;
3357
3358         if (ntohs(vlan_id) == 0)
3359                 vlan_id = rocker_port->internal_vlan_id;
3360
3361         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3362                                        rocker_port->pport, in_pport_mask,
3363                                        ctrl->eth_type, ctrl->eth_dst,
3364                                        ctrl->eth_dst_mask, vlan_id,
3365                                        vlan_id_mask, ctrl->copy_to_cpu,
3366                                        flags);
3367
3368         if (err)
3369                 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3370
3371         return err;
3372 }
3373
3374 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3375                                  struct switchdev_trans *trans, int flags,
3376                                  const struct rocker_ctrl *ctrl, __be16 vlan_id)
3377 {
3378         if (ctrl->acl)
3379                 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3380                                                  ctrl, vlan_id);
3381         if (ctrl->bridge)
3382                 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3383                                                     ctrl, vlan_id);
3384
3385         if (ctrl->term)
3386                 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3387                                                   ctrl, vlan_id);
3388
3389         return -EOPNOTSUPP;
3390 }
3391
3392 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3393                                      struct switchdev_trans *trans, int flags,
3394                                      __be16 vlan_id)
3395 {
3396         int err = 0;
3397         int i;
3398
3399         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3400                 if (rocker_port->ctrls[i]) {
3401                         err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3402                                                     &rocker_ctrls[i], vlan_id);
3403                         if (err)
3404                                 return err;
3405                 }
3406         }
3407
3408         return err;
3409 }
3410
3411 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3412                             struct switchdev_trans *trans, int flags,
3413                             const struct rocker_ctrl *ctrl)
3414 {
3415         u16 vid;
3416         int err = 0;
3417
3418         for (vid = 1; vid < VLAN_N_VID; vid++) {
3419                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3420                         continue;
3421                 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3422                                             ctrl, htons(vid));
3423                 if (err)
3424                         break;
3425         }
3426
3427         return err;
3428 }
3429
3430 static int rocker_port_vlan(struct rocker_port *rocker_port,
3431                             struct switchdev_trans *trans, int flags, u16 vid)
3432 {
3433         enum rocker_of_dpa_table_id goto_tbl =
3434                 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3435         u32 in_pport = rocker_port->pport;
3436         __be16 vlan_id = htons(vid);
3437         __be16 vlan_id_mask = htons(0xffff);
3438         __be16 internal_vlan_id;
3439         bool untagged;
3440         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3441         int err;
3442
3443         internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3444
3445         if (adding && test_bit(ntohs(internal_vlan_id),
3446                                rocker_port->vlan_bitmap))
3447                         return 0; /* already added */
3448         else if (!adding && !test_bit(ntohs(internal_vlan_id),
3449                                       rocker_port->vlan_bitmap))
3450                         return 0; /* already removed */
3451
3452         change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3453
3454         if (adding) {
3455                 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3456                                                 internal_vlan_id);
3457                 if (err) {
3458                         netdev_err(rocker_port->dev,
3459                                    "Error (%d) port ctrl vlan add\n", err);
3460                         goto err_out;
3461                 }
3462         }
3463
3464         err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3465                                          internal_vlan_id, untagged);
3466         if (err) {
3467                 netdev_err(rocker_port->dev,
3468                            "Error (%d) port VLAN l2 groups\n", err);
3469                 goto err_out;
3470         }
3471
3472         err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3473                                            internal_vlan_id);
3474         if (err) {
3475                 netdev_err(rocker_port->dev,
3476                            "Error (%d) port VLAN l2 flood group\n", err);
3477                 goto err_out;
3478         }
3479
3480         err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3481                                    in_pport, vlan_id, vlan_id_mask,
3482                                    goto_tbl, untagged, internal_vlan_id);
3483         if (err)
3484                 netdev_err(rocker_port->dev,
3485                            "Error (%d) port VLAN table\n", err);
3486
3487 err_out:
3488         if (switchdev_trans_ph_prepare(trans))
3489                 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3490
3491         return err;
3492 }
3493
3494 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3495                               struct switchdev_trans *trans, int flags)
3496 {
3497         enum rocker_of_dpa_table_id goto_tbl;
3498         u32 in_pport;
3499         u32 in_pport_mask;
3500         int err;
3501
3502         /* Normal Ethernet Frames.  Matches pkts from any local physical
3503          * ports.  Goto VLAN tbl.
3504          */
3505
3506         in_pport = 0;
3507         in_pport_mask = 0xffff0000;
3508         goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3509
3510         err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3511                                       in_pport, in_pport_mask,
3512                                       goto_tbl);
3513         if (err)
3514                 netdev_err(rocker_port->dev,
3515                            "Error (%d) ingress port table entry\n", err);
3516
3517         return err;
3518 }
3519
3520 struct rocker_fdb_learn_work {
3521         struct work_struct work;
3522         struct rocker_port *rocker_port;
3523         struct switchdev_trans *trans;
3524         int flags;
3525         u8 addr[ETH_ALEN];
3526         u16 vid;
3527 };
3528
3529 static void rocker_port_fdb_learn_work(struct work_struct *work)
3530 {
3531         const struct rocker_fdb_learn_work *lw =
3532                 container_of(work, struct rocker_fdb_learn_work, work);
3533         bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3534         bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3535         struct switchdev_notifier_fdb_info info;
3536
3537         info.addr = lw->addr;
3538         info.vid = lw->vid;
3539
3540         if (learned && removing)
3541                 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3542                                          lw->rocker_port->dev, &info.info);
3543         else if (learned && !removing)
3544                 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3545                                          lw->rocker_port->dev, &info.info);
3546
3547         rocker_port_kfree(lw->trans, work);
3548 }
3549
3550 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3551                                  struct switchdev_trans *trans, int flags,
3552                                  const u8 *addr, __be16 vlan_id)
3553 {
3554         struct rocker_fdb_learn_work *lw;
3555         enum rocker_of_dpa_table_id goto_tbl =
3556                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3557         u32 out_pport = rocker_port->pport;
3558         u32 tunnel_id = 0;
3559         u32 group_id = ROCKER_GROUP_NONE;
3560         bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3561         bool copy_to_cpu = false;
3562         int err;
3563
3564         if (rocker_port_is_bridged(rocker_port))
3565                 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3566
3567         if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3568                 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3569                                              NULL, vlan_id, tunnel_id, goto_tbl,
3570                                              group_id, copy_to_cpu);
3571                 if (err)
3572                         return err;
3573         }
3574
3575         if (!syncing)
3576                 return 0;
3577
3578         if (!rocker_port_is_bridged(rocker_port))
3579                 return 0;
3580
3581         lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
3582         if (!lw)
3583                 return -ENOMEM;
3584
3585         INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3586
3587         lw->rocker_port = rocker_port;
3588         lw->trans = trans;
3589         lw->flags = flags;
3590         ether_addr_copy(lw->addr, addr);
3591         lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3592
3593         if (switchdev_trans_ph_prepare(trans))
3594                 rocker_port_kfree(trans, lw);
3595         else
3596                 schedule_work(&lw->work);
3597
3598         return 0;
3599 }
3600
3601 static struct rocker_fdb_tbl_entry *
3602 rocker_fdb_tbl_find(const struct rocker *rocker,
3603                     const struct rocker_fdb_tbl_entry *match)
3604 {
3605         struct rocker_fdb_tbl_entry *found;
3606
3607         hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3608                 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3609                         return found;
3610
3611         return NULL;
3612 }
3613
3614 static int rocker_port_fdb(struct rocker_port *rocker_port,
3615                            struct switchdev_trans *trans,
3616                            const unsigned char *addr,
3617                            __be16 vlan_id, int flags)
3618 {
3619         struct rocker *rocker = rocker_port->rocker;
3620         struct rocker_fdb_tbl_entry *fdb;
3621         struct rocker_fdb_tbl_entry *found;
3622         bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3623         unsigned long lock_flags;
3624
3625         fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
3626         if (!fdb)
3627                 return -ENOMEM;
3628
3629         fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3630         fdb->touched = jiffies;
3631         fdb->key.rocker_port = rocker_port;
3632         ether_addr_copy(fdb->key.addr, addr);
3633         fdb->key.vlan_id = vlan_id;
3634         fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3635
3636         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3637
3638         found = rocker_fdb_tbl_find(rocker, fdb);
3639
3640         if (found) {
3641                 found->touched = jiffies;
3642                 if (removing) {
3643                         rocker_port_kfree(trans, fdb);
3644                         if (!switchdev_trans_ph_prepare(trans))
3645                                 hash_del(&found->entry);
3646                 }
3647         } else if (!removing) {
3648                 if (!switchdev_trans_ph_prepare(trans))
3649                         hash_add(rocker->fdb_tbl, &fdb->entry,
3650                                  fdb->key_crc32);
3651         }
3652
3653         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3654
3655         /* Check if adding and already exists, or removing and can't find */
3656         if (!found != !removing) {
3657                 rocker_port_kfree(trans, fdb);
3658                 if (!found && removing)
3659                         return 0;
3660                 /* Refreshing existing to update aging timers */
3661                 flags |= ROCKER_OP_FLAG_REFRESH;
3662         }
3663
3664         return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3665 }
3666
3667 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3668                                  struct switchdev_trans *trans, int flags)
3669 {
3670         struct rocker *rocker = rocker_port->rocker;
3671         struct rocker_fdb_tbl_entry *found;
3672         unsigned long lock_flags;
3673         struct hlist_node *tmp;
3674         int bkt;
3675         int err = 0;
3676
3677         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3678             rocker_port->stp_state == BR_STATE_FORWARDING)
3679                 return 0;
3680
3681         flags |= ROCKER_OP_FLAG_REMOVE;
3682
3683         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3684
3685         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3686                 if (found->key.rocker_port != rocker_port)
3687                         continue;
3688                 if (!found->learned)
3689                         continue;
3690                 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3691                                             found->key.addr,
3692                                             found->key.vlan_id);
3693                 if (err)
3694                         goto err_out;
3695                 if (!switchdev_trans_ph_prepare(trans))
3696                         hash_del(&found->entry);
3697         }
3698
3699 err_out:
3700         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3701
3702         return err;
3703 }
3704
3705 static void rocker_fdb_cleanup(unsigned long data)
3706 {
3707         struct rocker *rocker = (struct rocker *)data;
3708         struct rocker_port *rocker_port;
3709         struct rocker_fdb_tbl_entry *entry;
3710         struct hlist_node *tmp;
3711         unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3712         unsigned long expires;
3713         unsigned long lock_flags;
3714         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3715                     ROCKER_OP_FLAG_LEARNED;
3716         int bkt;
3717
3718         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3719
3720         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3721                 if (!entry->learned)
3722                         continue;
3723                 rocker_port = entry->key.rocker_port;
3724                 expires = entry->touched + rocker_port->ageing_time;
3725                 if (time_before_eq(expires, jiffies)) {
3726                         rocker_port_fdb_learn(rocker_port, NULL,
3727                                               flags, entry->key.addr,
3728                                               entry->key.vlan_id);
3729                         hash_del(&entry->entry);
3730                 } else if (time_before(expires, next_timer)) {
3731                         next_timer = expires;
3732                 }
3733         }
3734
3735         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3736
3737         mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3738 }
3739
3740 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3741                                   struct switchdev_trans *trans, int flags,
3742                                   __be16 vlan_id)
3743 {
3744         u32 in_pport_mask = 0xffffffff;
3745         __be16 eth_type;
3746         const u8 *dst_mac_mask = ff_mac;
3747         __be16 vlan_id_mask = htons(0xffff);
3748         bool copy_to_cpu = false;
3749         int err;
3750
3751         if (ntohs(vlan_id) == 0)
3752                 vlan_id = rocker_port->internal_vlan_id;
3753
3754         eth_type = htons(ETH_P_IP);
3755         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3756                                        rocker_port->pport, in_pport_mask,
3757                                        eth_type, rocker_port->dev->dev_addr,
3758                                        dst_mac_mask, vlan_id, vlan_id_mask,
3759                                        copy_to_cpu, flags);
3760         if (err)
3761                 return err;
3762
3763         eth_type = htons(ETH_P_IPV6);
3764         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3765                                        rocker_port->pport, in_pport_mask,
3766                                        eth_type, rocker_port->dev->dev_addr,
3767                                        dst_mac_mask, vlan_id, vlan_id_mask,
3768                                        copy_to_cpu, flags);
3769
3770         return err;
3771 }
3772
3773 static int rocker_port_fwding(struct rocker_port *rocker_port,
3774                               struct switchdev_trans *trans, int flags)
3775 {
3776         bool pop_vlan;
3777         u32 out_pport;
3778         __be16 vlan_id;
3779         u16 vid;
3780         int err;
3781
3782         /* Port will be forwarding-enabled if its STP state is LEARNING
3783          * or FORWARDING.  Traffic from CPU can still egress, regardless of
3784          * port STP state.  Use L2 interface group on port VLANs as a way
3785          * to toggle port forwarding: if forwarding is disabled, L2
3786          * interface group will not exist.
3787          */
3788
3789         if (rocker_port->stp_state != BR_STATE_LEARNING &&
3790             rocker_port->stp_state != BR_STATE_FORWARDING)
3791                 flags |= ROCKER_OP_FLAG_REMOVE;
3792
3793         out_pport = rocker_port->pport;
3794         for (vid = 1; vid < VLAN_N_VID; vid++) {
3795                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3796                         continue;
3797                 vlan_id = htons(vid);
3798                 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3799                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3800                                                 vlan_id, out_pport, pop_vlan);
3801                 if (err) {
3802                         netdev_err(rocker_port->dev,
3803                                    "Error (%d) port VLAN l2 group for pport %d\n",
3804                                    err, out_pport);
3805                         return err;
3806                 }
3807         }
3808
3809         return 0;
3810 }
3811
3812 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3813                                   struct switchdev_trans *trans, int flags,
3814                                   u8 state)
3815 {
3816         bool want[ROCKER_CTRL_MAX] = { 0, };
3817         bool prev_ctrls[ROCKER_CTRL_MAX];
3818         u8 uninitialized_var(prev_state);
3819         int err;
3820         int i;
3821
3822         if (switchdev_trans_ph_prepare(trans)) {
3823                 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3824                 prev_state = rocker_port->stp_state;
3825         }
3826
3827         if (rocker_port->stp_state == state)
3828                 return 0;
3829
3830         rocker_port->stp_state = state;
3831
3832         switch (state) {
3833         case BR_STATE_DISABLED:
3834                 /* port is completely disabled */
3835                 break;
3836         case BR_STATE_LISTENING:
3837         case BR_STATE_BLOCKING:
3838                 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3839                 break;
3840         case BR_STATE_LEARNING:
3841         case BR_STATE_FORWARDING:
3842                 if (!rocker_port_is_ovsed(rocker_port))
3843                         want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3844                 want[ROCKER_CTRL_IPV4_MCAST] = true;
3845                 want[ROCKER_CTRL_IPV6_MCAST] = true;
3846                 if (rocker_port_is_bridged(rocker_port))
3847                         want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3848                 else if (rocker_port_is_ovsed(rocker_port))
3849                         want[ROCKER_CTRL_DFLT_OVS] = true;
3850                 else
3851                         want[ROCKER_CTRL_LOCAL_ARP] = true;
3852                 break;
3853         }
3854
3855         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3856                 if (want[i] != rocker_port->ctrls[i]) {
3857                         int ctrl_flags = flags |
3858                                          (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3859                         err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3860                                                &rocker_ctrls[i]);
3861                         if (err)
3862                                 goto err_out;
3863                         rocker_port->ctrls[i] = want[i];
3864                 }
3865         }
3866
3867         err = rocker_port_fdb_flush(rocker_port, trans, flags);
3868         if (err)
3869                 goto err_out;
3870
3871         err = rocker_port_fwding(rocker_port, trans, flags);
3872
3873 err_out:
3874         if (switchdev_trans_ph_prepare(trans)) {
3875                 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3876                 rocker_port->stp_state = prev_state;
3877         }
3878
3879         return err;
3880 }
3881
3882 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3883                                   struct switchdev_trans *trans, int flags)
3884 {
3885         if (rocker_port_is_bridged(rocker_port))
3886                 /* bridge STP will enable port */
3887                 return 0;
3888
3889         /* port is not bridged, so simulate going to FORWARDING state */
3890         return rocker_port_stp_update(rocker_port, trans, flags,
3891                                       BR_STATE_FORWARDING);
3892 }
3893
3894 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3895                                    struct switchdev_trans *trans, int flags)
3896 {
3897         if (rocker_port_is_bridged(rocker_port))
3898                 /* bridge STP will disable port */
3899                 return 0;
3900
3901         /* port is not bridged, so simulate going to DISABLED state */
3902         return rocker_port_stp_update(rocker_port, trans, flags,
3903                                       BR_STATE_DISABLED);
3904 }
3905
3906 static struct rocker_internal_vlan_tbl_entry *
3907 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3908 {
3909         struct rocker_internal_vlan_tbl_entry *found;
3910
3911         hash_for_each_possible(rocker->internal_vlan_tbl, found,
3912                                entry, ifindex) {
3913                 if (found->ifindex == ifindex)
3914                         return found;
3915         }
3916
3917         return NULL;
3918 }
3919
3920 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3921                                                int ifindex)
3922 {
3923         struct rocker *rocker = rocker_port->rocker;
3924         struct rocker_internal_vlan_tbl_entry *entry;
3925         struct rocker_internal_vlan_tbl_entry *found;
3926         unsigned long lock_flags;
3927         int i;
3928
3929         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3930         if (!entry)
3931                 return 0;
3932
3933         entry->ifindex = ifindex;
3934
3935         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3936
3937         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3938         if (found) {
3939                 kfree(entry);
3940                 goto found;
3941         }
3942
3943         found = entry;
3944         hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3945
3946         for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3947                 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3948                         continue;
3949                 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3950                 goto found;
3951         }
3952
3953         netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3954
3955 found:
3956         found->ref_count++;
3957         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3958
3959         return found->vlan_id;
3960 }
3961
3962 static void
3963 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3964                                  int ifindex)
3965 {
3966         struct rocker *rocker = rocker_port->rocker;
3967         struct rocker_internal_vlan_tbl_entry *found;
3968         unsigned long lock_flags;
3969         unsigned long bit;
3970
3971         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3972
3973         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3974         if (!found) {
3975                 netdev_err(rocker_port->dev,
3976                            "ifindex (%d) not found in internal VLAN tbl\n",
3977                            ifindex);
3978                 goto not_found;
3979         }
3980
3981         if (--found->ref_count <= 0) {
3982                 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3983                 clear_bit(bit, rocker->internal_vlan_bitmap);
3984                 hash_del(&found->entry);
3985                 kfree(found);
3986         }
3987
3988 not_found:
3989         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3990 }
3991
3992 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
3993                                 struct switchdev_trans *trans, __be32 dst,
3994                                 int dst_len, const struct fib_info *fi,
3995                                 u32 tb_id, int flags)
3996 {
3997         const struct fib_nh *nh;
3998         __be16 eth_type = htons(ETH_P_IP);
3999         __be32 dst_mask = inet_make_mask(dst_len);
4000         __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4001         u32 priority = fi->fib_priority;
4002         enum rocker_of_dpa_table_id goto_tbl =
4003                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4004         u32 group_id;
4005         bool nh_on_port;
4006         bool has_gw;
4007         u32 index;
4008         int err;
4009
4010         /* XXX support ECMP */
4011
4012         nh = fi->fib_nh;
4013         nh_on_port = (fi->fib_dev == rocker_port->dev);
4014         has_gw = !!nh->nh_gw;
4015
4016         if (has_gw && nh_on_port) {
4017                 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
4018                                           nh->nh_gw, &index);
4019                 if (err)
4020                         return err;
4021
4022                 group_id = ROCKER_GROUP_L3_UNICAST(index);
4023         } else {
4024                 /* Send to CPU for processing */
4025                 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4026         }
4027
4028         err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
4029                                              dst_mask, priority, goto_tbl,
4030                                              group_id, flags);
4031         if (err)
4032                 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4033                            err, &dst);
4034
4035         return err;
4036 }
4037
4038 /*****************
4039  * Net device ops
4040  *****************/
4041
4042 static int rocker_port_open(struct net_device *dev)
4043 {
4044         struct rocker_port *rocker_port = netdev_priv(dev);
4045         int err;
4046
4047         err = rocker_port_dma_rings_init(rocker_port);
4048         if (err)
4049                 return err;
4050
4051         err = request_irq(rocker_msix_tx_vector(rocker_port),
4052                           rocker_tx_irq_handler, 0,
4053                           rocker_driver_name, rocker_port);
4054         if (err) {
4055                 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4056                 goto err_request_tx_irq;
4057         }
4058
4059         err = request_irq(rocker_msix_rx_vector(rocker_port),
4060                           rocker_rx_irq_handler, 0,
4061                           rocker_driver_name, rocker_port);
4062         if (err) {
4063                 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4064                 goto err_request_rx_irq;
4065         }
4066
4067         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
4068         if (err)
4069                 goto err_fwd_enable;
4070
4071         napi_enable(&rocker_port->napi_tx);
4072         napi_enable(&rocker_port->napi_rx);
4073         if (!dev->proto_down)
4074                 rocker_port_set_enable(rocker_port, true);
4075         netif_start_queue(dev);
4076         return 0;
4077
4078 err_fwd_enable:
4079         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4080 err_request_rx_irq:
4081         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4082 err_request_tx_irq:
4083         rocker_port_dma_rings_fini(rocker_port);
4084         return err;
4085 }
4086
4087 static int rocker_port_stop(struct net_device *dev)
4088 {
4089         struct rocker_port *rocker_port = netdev_priv(dev);
4090
4091         netif_stop_queue(dev);
4092         rocker_port_set_enable(rocker_port, false);
4093         napi_disable(&rocker_port->napi_rx);
4094         napi_disable(&rocker_port->napi_tx);
4095         rocker_port_fwd_disable(rocker_port, NULL,
4096                                 ROCKER_OP_FLAG_NOWAIT);
4097         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4098         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4099         rocker_port_dma_rings_fini(rocker_port);
4100
4101         return 0;
4102 }
4103
4104 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4105                                        const struct rocker_desc_info *desc_info)
4106 {
4107         const struct rocker *rocker = rocker_port->rocker;
4108         struct pci_dev *pdev = rocker->pdev;
4109         const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4110         struct rocker_tlv *attr;
4111         int rem;
4112
4113         rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4114         if (!attrs[ROCKER_TLV_TX_FRAGS])
4115                 return;
4116         rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
4117                 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4118                 dma_addr_t dma_handle;
4119                 size_t len;
4120
4121                 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4122                         continue;
4123                 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4124                                         attr);
4125                 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4126                     !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4127                         continue;
4128                 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4129                 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4130                 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4131         }
4132 }
4133
4134 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4135                                        struct rocker_desc_info *desc_info,
4136                                        char *buf, size_t buf_len)
4137 {
4138         const struct rocker *rocker = rocker_port->rocker;
4139         struct pci_dev *pdev = rocker->pdev;
4140         dma_addr_t dma_handle;
4141         struct rocker_tlv *frag;
4142
4143         dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4144         if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4145                 if (net_ratelimit())
4146                         netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4147                 return -EIO;
4148         }
4149         frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4150         if (!frag)
4151                 goto unmap_frag;
4152         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4153                                dma_handle))
4154                 goto nest_cancel;
4155         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4156                                buf_len))
4157                 goto nest_cancel;
4158         rocker_tlv_nest_end(desc_info, frag);
4159         return 0;
4160
4161 nest_cancel:
4162         rocker_tlv_nest_cancel(desc_info, frag);
4163 unmap_frag:
4164         pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4165         return -EMSGSIZE;
4166 }
4167
4168 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4169 {
4170         struct rocker_port *rocker_port = netdev_priv(dev);
4171         struct rocker *rocker = rocker_port->rocker;
4172         struct rocker_desc_info *desc_info;
4173         struct rocker_tlv *frags;
4174         int i;
4175         int err;
4176
4177         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4178         if (unlikely(!desc_info)) {
4179                 if (net_ratelimit())
4180                         netdev_err(dev, "tx ring full when queue awake\n");
4181                 return NETDEV_TX_BUSY;
4182         }
4183
4184         rocker_desc_cookie_ptr_set(desc_info, skb);
4185
4186         frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4187         if (!frags)
4188                 goto out;
4189         err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4190                                           skb->data, skb_headlen(skb));
4191         if (err)
4192                 goto nest_cancel;
4193         if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4194                 err = skb_linearize(skb);
4195                 if (err)
4196                         goto unmap_frags;
4197         }
4198
4199         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4200                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4201
4202                 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4203                                                   skb_frag_address(frag),
4204                                                   skb_frag_size(frag));
4205                 if (err)
4206                         goto unmap_frags;
4207         }
4208         rocker_tlv_nest_end(desc_info, frags);
4209
4210         rocker_desc_gen_clear(desc_info);
4211         rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4212
4213         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4214         if (!desc_info)
4215                 netif_stop_queue(dev);
4216
4217         return NETDEV_TX_OK;
4218
4219 unmap_frags:
4220         rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4221 nest_cancel:
4222         rocker_tlv_nest_cancel(desc_info, frags);
4223 out:
4224         dev_kfree_skb(skb);
4225         dev->stats.tx_dropped++;
4226
4227         return NETDEV_TX_OK;
4228 }
4229
4230 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4231 {
4232         struct sockaddr *addr = p;
4233         struct rocker_port *rocker_port = netdev_priv(dev);
4234         int err;
4235
4236         if (!is_valid_ether_addr(addr->sa_data))
4237                 return -EADDRNOTAVAIL;
4238
4239         err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4240         if (err)
4241                 return err;
4242         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4243         return 0;
4244 }
4245
4246 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4247 {
4248         struct rocker_port *rocker_port = netdev_priv(dev);
4249         int running = netif_running(dev);
4250         int err;
4251
4252 #define ROCKER_PORT_MIN_MTU     68
4253 #define ROCKER_PORT_MAX_MTU     9000
4254
4255         if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4256                 return -EINVAL;
4257
4258         if (running)
4259                 rocker_port_stop(dev);
4260
4261         netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4262         dev->mtu = new_mtu;
4263
4264         err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4265         if (err)
4266                 return err;
4267
4268         if (running)
4269                 err = rocker_port_open(dev);
4270
4271         return err;
4272 }
4273
4274 static int rocker_port_get_phys_port_name(struct net_device *dev,
4275                                           char *buf, size_t len)
4276 {
4277         struct rocker_port *rocker_port = netdev_priv(dev);
4278         struct port_name name = { .buf = buf, .len = len };
4279         int err;
4280
4281         err = rocker_cmd_exec(rocker_port, NULL, 0,
4282                               rocker_cmd_get_port_settings_prep, NULL,
4283                               rocker_cmd_get_port_settings_phys_name_proc,
4284                               &name);
4285
4286         return err ? -EOPNOTSUPP : 0;
4287 }
4288
4289 static int rocker_port_change_proto_down(struct net_device *dev,
4290                                          bool proto_down)
4291 {
4292         struct rocker_port *rocker_port = netdev_priv(dev);
4293
4294         if (rocker_port->dev->flags & IFF_UP)
4295                 rocker_port_set_enable(rocker_port, !proto_down);
4296         rocker_port->dev->proto_down = proto_down;
4297         return 0;
4298 }
4299
4300 static void rocker_port_neigh_destroy(struct neighbour *n)
4301 {
4302         struct rocker_port *rocker_port = netdev_priv(n->dev);
4303         int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4304         __be32 ip_addr = *(__be32 *)n->primary_key;
4305
4306         rocker_port_ipv4_neigh(rocker_port, NULL,
4307                                flags, ip_addr, n->ha);
4308 }
4309
4310 static const struct net_device_ops rocker_port_netdev_ops = {
4311         .ndo_open                       = rocker_port_open,
4312         .ndo_stop                       = rocker_port_stop,
4313         .ndo_start_xmit                 = rocker_port_xmit,
4314         .ndo_set_mac_address            = rocker_port_set_mac_address,
4315         .ndo_change_mtu                 = rocker_port_change_mtu,
4316         .ndo_bridge_getlink             = switchdev_port_bridge_getlink,
4317         .ndo_bridge_setlink             = switchdev_port_bridge_setlink,
4318         .ndo_bridge_dellink             = switchdev_port_bridge_dellink,
4319         .ndo_fdb_add                    = switchdev_port_fdb_add,
4320         .ndo_fdb_del                    = switchdev_port_fdb_del,
4321         .ndo_fdb_dump                   = switchdev_port_fdb_dump,
4322         .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
4323         .ndo_change_proto_down          = rocker_port_change_proto_down,
4324         .ndo_neigh_destroy              = rocker_port_neigh_destroy,
4325 };
4326
4327 /********************
4328  * swdev interface
4329  ********************/
4330
4331 static int rocker_port_attr_get(struct net_device *dev,
4332                                 struct switchdev_attr *attr)
4333 {
4334         const struct rocker_port *rocker_port = netdev_priv(dev);
4335         const struct rocker *rocker = rocker_port->rocker;
4336
4337         switch (attr->id) {
4338         case SWITCHDEV_ATTR_PORT_PARENT_ID:
4339                 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4340                 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4341                 break;
4342         case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
4343                 attr->u.brport_flags = rocker_port->brport_flags;
4344                 break;
4345         default:
4346                 return -EOPNOTSUPP;
4347         }
4348
4349         return 0;
4350 }
4351
4352 static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
4353 {
4354         struct list_head *mem, *tmp;
4355
4356         list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
4357                 list_del(mem);
4358                 kfree(mem);
4359         }
4360 }
4361
4362 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4363                                         struct switchdev_trans *trans,
4364                                         unsigned long brport_flags)
4365 {
4366         unsigned long orig_flags;
4367         int err = 0;
4368
4369         orig_flags = rocker_port->brport_flags;
4370         rocker_port->brport_flags = brport_flags;
4371         if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4372                 err = rocker_port_set_learning(rocker_port, trans);
4373
4374         if (switchdev_trans_ph_prepare(trans))
4375                 rocker_port->brport_flags = orig_flags;
4376
4377         return err;
4378 }
4379
4380 static int rocker_port_attr_set(struct net_device *dev,
4381                                 struct switchdev_attr *attr,
4382                                 struct switchdev_trans *trans)
4383 {
4384         struct rocker_port *rocker_port = netdev_priv(dev);
4385         int err = 0;
4386
4387         switch (trans->ph) {
4388         case SWITCHDEV_TRANS_PREPARE:
4389                 BUG_ON(!list_empty(&rocker_port->trans_mem));
4390                 break;
4391         case SWITCHDEV_TRANS_ABORT:
4392                 rocker_port_trans_abort(rocker_port);
4393                 return 0;
4394         default:
4395                 break;
4396         }
4397
4398         switch (attr->id) {
4399         case SWITCHDEV_ATTR_PORT_STP_STATE:
4400                 err = rocker_port_stp_update(rocker_port, trans,
4401                                              ROCKER_OP_FLAG_NOWAIT,
4402                                              attr->u.stp_state);
4403                 break;
4404         case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
4405                 err = rocker_port_brport_flags_set(rocker_port, trans,
4406                                                    attr->u.brport_flags);
4407                 break;
4408         default:
4409                 err = -EOPNOTSUPP;
4410                 break;
4411         }
4412
4413         return err;
4414 }
4415
4416 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4417                                 struct switchdev_trans *trans,
4418                                 u16 vid, u16 flags)
4419 {
4420         int err;
4421
4422         /* XXX deal with flags for PVID and untagged */
4423
4424         err = rocker_port_vlan(rocker_port, trans, 0, vid);
4425         if (err)
4426                 return err;
4427
4428         err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4429         if (err)
4430                 rocker_port_vlan(rocker_port, trans,
4431                                  ROCKER_OP_FLAG_REMOVE, vid);
4432
4433         return err;
4434 }
4435
4436 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4437                                  struct switchdev_trans *trans,
4438                                  const struct switchdev_obj_vlan *vlan)
4439 {
4440         u16 vid;
4441         int err;
4442
4443         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4444                 err = rocker_port_vlan_add(rocker_port, trans,
4445                                            vid, vlan->flags);
4446                 if (err)
4447                         return err;
4448         }
4449
4450         return 0;
4451 }
4452
4453 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4454                                struct switchdev_trans *trans,
4455                                const struct switchdev_obj_fdb *fdb)
4456 {
4457         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4458         int flags = 0;
4459
4460         if (!rocker_port_is_bridged(rocker_port))
4461                 return -EINVAL;
4462
4463         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4464 }
4465
4466 static int rocker_port_obj_add(struct net_device *dev,
4467                                struct switchdev_obj *obj,
4468                                struct switchdev_trans *trans)
4469 {
4470         struct rocker_port *rocker_port = netdev_priv(dev);
4471         const struct switchdev_obj_ipv4_fib *fib4;
4472         int err = 0;
4473
4474         switch (trans->ph) {
4475         case SWITCHDEV_TRANS_PREPARE:
4476                 BUG_ON(!list_empty(&rocker_port->trans_mem));
4477                 break;
4478         case SWITCHDEV_TRANS_ABORT:
4479                 rocker_port_trans_abort(rocker_port);
4480                 return 0;
4481         default:
4482                 break;
4483         }
4484
4485         switch (obj->id) {
4486         case SWITCHDEV_OBJ_PORT_VLAN:
4487                 err = rocker_port_vlans_add(rocker_port, trans,
4488                                             &obj->u.vlan);
4489                 break;
4490         case SWITCHDEV_OBJ_IPV4_FIB:
4491                 fib4 = &obj->u.ipv4_fib;
4492                 err = rocker_port_fib_ipv4(rocker_port, trans,
4493                                            htonl(fib4->dst), fib4->dst_len,
4494                                            fib4->fi, fib4->tb_id, 0);
4495                 break;
4496         case SWITCHDEV_OBJ_PORT_FDB:
4497                 err = rocker_port_fdb_add(rocker_port, trans, &obj->u.fdb);
4498                 break;
4499         default:
4500                 err = -EOPNOTSUPP;
4501                 break;
4502         }
4503
4504         return err;
4505 }
4506
4507 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4508                                 u16 vid, u16 flags)
4509 {
4510         int err;
4511
4512         err = rocker_port_router_mac(rocker_port, NULL,
4513                                      ROCKER_OP_FLAG_REMOVE, htons(vid));
4514         if (err)
4515                 return err;
4516
4517         return rocker_port_vlan(rocker_port, NULL,
4518                                 ROCKER_OP_FLAG_REMOVE, vid);
4519 }
4520
4521 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4522                                  const struct switchdev_obj_vlan *vlan)
4523 {
4524         u16 vid;
4525         int err;
4526
4527         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4528                 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4529                 if (err)
4530                         return err;
4531         }
4532
4533         return 0;
4534 }
4535
4536 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4537                                struct switchdev_trans *trans,
4538                                const struct switchdev_obj_fdb *fdb)
4539 {
4540         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4541         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
4542
4543         if (!rocker_port_is_bridged(rocker_port))
4544                 return -EINVAL;
4545
4546         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4547 }
4548
4549 static int rocker_port_obj_del(struct net_device *dev,
4550                                struct switchdev_obj *obj)
4551 {
4552         struct rocker_port *rocker_port = netdev_priv(dev);
4553         const struct switchdev_obj_ipv4_fib *fib4;
4554         int err = 0;
4555
4556         switch (obj->id) {
4557         case SWITCHDEV_OBJ_PORT_VLAN:
4558                 err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
4559                 break;
4560         case SWITCHDEV_OBJ_IPV4_FIB:
4561                 fib4 = &obj->u.ipv4_fib;
4562                 err = rocker_port_fib_ipv4(rocker_port, NULL,
4563                                            htonl(fib4->dst), fib4->dst_len,
4564                                            fib4->fi, fib4->tb_id,
4565                                            ROCKER_OP_FLAG_REMOVE);
4566                 break;
4567         case SWITCHDEV_OBJ_PORT_FDB:
4568                 err = rocker_port_fdb_del(rocker_port, NULL, &obj->u.fdb);
4569                 break;
4570         default:
4571                 err = -EOPNOTSUPP;
4572                 break;
4573         }
4574
4575         return err;
4576 }
4577
4578 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4579                                 struct switchdev_obj *obj)
4580 {
4581         struct rocker *rocker = rocker_port->rocker;
4582         struct switchdev_obj_fdb *fdb = &obj->u.fdb;
4583         struct rocker_fdb_tbl_entry *found;
4584         struct hlist_node *tmp;
4585         unsigned long lock_flags;
4586         int bkt;
4587         int err = 0;
4588
4589         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4590         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4591                 if (found->key.rocker_port != rocker_port)
4592                         continue;
4593                 fdb->addr = found->key.addr;
4594                 fdb->ndm_state = NUD_REACHABLE;
4595                 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4596                                                    found->key.vlan_id);
4597                 err = obj->cb(rocker_port->dev, obj);
4598                 if (err)
4599                         break;
4600         }
4601         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4602
4603         return err;
4604 }
4605
4606 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4607                                  struct switchdev_obj *obj)
4608 {
4609         struct switchdev_obj_vlan *vlan = &obj->u.vlan;
4610         u16 vid;
4611         int err = 0;
4612
4613         for (vid = 1; vid < VLAN_N_VID; vid++) {
4614                 if (!test_bit(vid, rocker_port->vlan_bitmap))
4615                         continue;
4616                 vlan->flags = 0;
4617                 if (rocker_vlan_id_is_internal(htons(vid)))
4618                         vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4619                 vlan->vid_begin = vlan->vid_end = vid;
4620                 err = obj->cb(rocker_port->dev, obj);
4621                 if (err)
4622                         break;
4623         }
4624
4625         return err;
4626 }
4627
4628 static int rocker_port_obj_dump(struct net_device *dev,
4629                                 struct switchdev_obj *obj)
4630 {
4631         const struct rocker_port *rocker_port = netdev_priv(dev);
4632         int err = 0;
4633
4634         switch (obj->id) {
4635         case SWITCHDEV_OBJ_PORT_FDB:
4636                 err = rocker_port_fdb_dump(rocker_port, obj);
4637                 break;
4638         case SWITCHDEV_OBJ_PORT_VLAN:
4639                 err = rocker_port_vlan_dump(rocker_port, obj);
4640                 break;
4641         default:
4642                 err = -EOPNOTSUPP;
4643                 break;
4644         }
4645
4646         return err;
4647 }
4648
4649 static const struct switchdev_ops rocker_port_switchdev_ops = {
4650         .switchdev_port_attr_get        = rocker_port_attr_get,
4651         .switchdev_port_attr_set        = rocker_port_attr_set,
4652         .switchdev_port_obj_add         = rocker_port_obj_add,
4653         .switchdev_port_obj_del         = rocker_port_obj_del,
4654         .switchdev_port_obj_dump        = rocker_port_obj_dump,
4655 };
4656
4657 /********************
4658  * ethtool interface
4659  ********************/
4660
4661 static int rocker_port_get_settings(struct net_device *dev,
4662                                     struct ethtool_cmd *ecmd)
4663 {
4664         struct rocker_port *rocker_port = netdev_priv(dev);
4665
4666         return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4667 }
4668
4669 static int rocker_port_set_settings(struct net_device *dev,
4670                                     struct ethtool_cmd *ecmd)
4671 {
4672         struct rocker_port *rocker_port = netdev_priv(dev);
4673
4674         return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4675 }
4676
4677 static void rocker_port_get_drvinfo(struct net_device *dev,
4678                                     struct ethtool_drvinfo *drvinfo)
4679 {
4680         strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4681         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4682 }
4683
4684 static struct rocker_port_stats {
4685         char str[ETH_GSTRING_LEN];
4686         int type;
4687 } rocker_port_stats[] = {
4688         { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,    },
4689         { "rx_bytes",   ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,   },
4690         { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4691         { "rx_errors",  ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS,  },
4692
4693         { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS,    },
4694         { "tx_bytes",   ROCKER_TLV_CMD_PORT_STATS_TX_BYTES,   },
4695         { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4696         { "tx_errors",  ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS,  },
4697 };
4698
4699 #define ROCKER_PORT_STATS_LEN  ARRAY_SIZE(rocker_port_stats)
4700
4701 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4702                                     u8 *data)
4703 {
4704         u8 *p = data;
4705         int i;
4706
4707         switch (stringset) {
4708         case ETH_SS_STATS:
4709                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4710                         memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4711                         p += ETH_GSTRING_LEN;
4712                 }
4713                 break;
4714         }
4715 }
4716
4717 static int
4718 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4719                                struct rocker_desc_info *desc_info,
4720                                void *priv)
4721 {
4722         struct rocker_tlv *cmd_stats;
4723
4724         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4725                                ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4726                 return -EMSGSIZE;
4727
4728         cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4729         if (!cmd_stats)
4730                 return -EMSGSIZE;
4731
4732         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4733                                rocker_port->pport))
4734                 return -EMSGSIZE;
4735
4736         rocker_tlv_nest_end(desc_info, cmd_stats);
4737
4738         return 0;
4739 }
4740
4741 static int
4742 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4743                                        const struct rocker_desc_info *desc_info,
4744                                        void *priv)
4745 {
4746         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4747         const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4748         const struct rocker_tlv *pattr;
4749         u32 pport;
4750         u64 *data = priv;
4751         int i;
4752
4753         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4754
4755         if (!attrs[ROCKER_TLV_CMD_INFO])
4756                 return -EIO;
4757
4758         rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4759                                 attrs[ROCKER_TLV_CMD_INFO]);
4760
4761         if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4762                 return -EIO;
4763
4764         pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4765         if (pport != rocker_port->pport)
4766                 return -EIO;
4767
4768         for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4769                 pattr = stats_attrs[rocker_port_stats[i].type];
4770                 if (!pattr)
4771                         continue;
4772
4773                 data[i] = rocker_tlv_get_u64(pattr);
4774         }
4775
4776         return 0;
4777 }
4778
4779 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4780                                              void *priv)
4781 {
4782         return rocker_cmd_exec(rocker_port, NULL, 0,
4783                                rocker_cmd_get_port_stats_prep, NULL,
4784                                rocker_cmd_get_port_stats_ethtool_proc,
4785                                priv);
4786 }
4787
4788 static void rocker_port_get_stats(struct net_device *dev,
4789                                   struct ethtool_stats *stats, u64 *data)
4790 {
4791         struct rocker_port *rocker_port = netdev_priv(dev);
4792
4793         if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4794                 int i;
4795
4796                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4797                         data[i] = 0;
4798         }
4799 }
4800
4801 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4802 {
4803         switch (sset) {
4804         case ETH_SS_STATS:
4805                 return ROCKER_PORT_STATS_LEN;
4806         default:
4807                 return -EOPNOTSUPP;
4808         }
4809 }
4810
4811 static const struct ethtool_ops rocker_port_ethtool_ops = {
4812         .get_settings           = rocker_port_get_settings,
4813         .set_settings           = rocker_port_set_settings,
4814         .get_drvinfo            = rocker_port_get_drvinfo,
4815         .get_link               = ethtool_op_get_link,
4816         .get_strings            = rocker_port_get_strings,
4817         .get_ethtool_stats      = rocker_port_get_stats,
4818         .get_sset_count         = rocker_port_get_sset_count,
4819 };
4820
4821 /*****************
4822  * NAPI interface
4823  *****************/
4824
4825 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4826 {
4827         return container_of(napi, struct rocker_port, napi_tx);
4828 }
4829
4830 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4831 {
4832         struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4833         const struct rocker *rocker = rocker_port->rocker;
4834         const struct rocker_desc_info *desc_info;
4835         u32 credits = 0;
4836         int err;
4837
4838         /* Cleanup tx descriptors */
4839         while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4840                 struct sk_buff *skb;
4841
4842                 err = rocker_desc_err(desc_info);
4843                 if (err && net_ratelimit())
4844                         netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4845                                    err);
4846                 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4847
4848                 skb = rocker_desc_cookie_ptr_get(desc_info);
4849                 if (err == 0) {
4850                         rocker_port->dev->stats.tx_packets++;
4851                         rocker_port->dev->stats.tx_bytes += skb->len;
4852                 } else {
4853                         rocker_port->dev->stats.tx_errors++;
4854                 }
4855
4856                 dev_kfree_skb_any(skb);
4857                 credits++;
4858         }
4859
4860         if (credits && netif_queue_stopped(rocker_port->dev))
4861                 netif_wake_queue(rocker_port->dev);
4862
4863         napi_complete(napi);
4864         rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4865
4866         return 0;
4867 }
4868
4869 static int rocker_port_rx_proc(const struct rocker *rocker,
4870                                const struct rocker_port *rocker_port,
4871                                struct rocker_desc_info *desc_info)
4872 {
4873         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4874         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4875         size_t rx_len;
4876         u16 rx_flags = 0;
4877
4878         if (!skb)
4879                 return -ENOENT;
4880
4881         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4882         if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4883                 return -EINVAL;
4884         if (attrs[ROCKER_TLV_RX_FLAGS])
4885                 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
4886
4887         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4888
4889         rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4890         skb_put(skb, rx_len);
4891         skb->protocol = eth_type_trans(skb, rocker_port->dev);
4892
4893         if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4894                 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4895
4896         rocker_port->dev->stats.rx_packets++;
4897         rocker_port->dev->stats.rx_bytes += skb->len;
4898
4899         netif_receive_skb(skb);
4900
4901         return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
4902 }
4903
4904 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4905 {
4906         return container_of(napi, struct rocker_port, napi_rx);
4907 }
4908
4909 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4910 {
4911         struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4912         const struct rocker *rocker = rocker_port->rocker;
4913         struct rocker_desc_info *desc_info;
4914         u32 credits = 0;
4915         int err;
4916
4917         /* Process rx descriptors */
4918         while (credits < budget &&
4919                (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4920                 err = rocker_desc_err(desc_info);
4921                 if (err) {
4922                         if (net_ratelimit())
4923                                 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4924                                            err);
4925                 } else {
4926                         err = rocker_port_rx_proc(rocker, rocker_port,
4927                                                   desc_info);
4928                         if (err && net_ratelimit())
4929                                 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4930                                            err);
4931                 }
4932                 if (err)
4933                         rocker_port->dev->stats.rx_errors++;
4934
4935                 rocker_desc_gen_clear(desc_info);
4936                 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4937                 credits++;
4938         }
4939
4940         if (credits < budget)
4941                 napi_complete(napi);
4942
4943         rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4944
4945         return credits;
4946 }
4947
4948 /*****************
4949  * PCI driver ops
4950  *****************/
4951
4952 static void rocker_carrier_init(const struct rocker_port *rocker_port)
4953 {
4954         const struct rocker *rocker = rocker_port->rocker;
4955         u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4956         bool link_up;
4957
4958         link_up = link_status & (1 << rocker_port->pport);
4959         if (link_up)
4960                 netif_carrier_on(rocker_port->dev);
4961         else
4962                 netif_carrier_off(rocker_port->dev);
4963 }
4964
4965 static void rocker_remove_ports(const struct rocker *rocker)
4966 {
4967         struct rocker_port *rocker_port;
4968         int i;
4969
4970         for (i = 0; i < rocker->port_count; i++) {
4971                 rocker_port = rocker->ports[i];
4972                 if (!rocker_port)
4973                         continue;
4974                 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
4975                 unregister_netdev(rocker_port->dev);
4976                 free_netdev(rocker_port->dev);
4977         }
4978         kfree(rocker->ports);
4979 }
4980
4981 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
4982 {
4983         const struct rocker *rocker = rocker_port->rocker;
4984         const struct pci_dev *pdev = rocker->pdev;
4985         int err;
4986
4987         err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4988                                                    rocker_port->dev->dev_addr);
4989         if (err) {
4990                 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4991                 eth_hw_addr_random(rocker_port->dev);
4992         }
4993 }
4994
4995 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4996 {
4997         const struct pci_dev *pdev = rocker->pdev;
4998         struct rocker_port *rocker_port;
4999         struct net_device *dev;
5000         u16 untagged_vid = 0;
5001         int err;
5002
5003         dev = alloc_etherdev(sizeof(struct rocker_port));
5004         if (!dev)
5005                 return -ENOMEM;
5006         rocker_port = netdev_priv(dev);
5007         rocker_port->dev = dev;
5008         rocker_port->rocker = rocker;
5009         rocker_port->port_number = port_number;
5010         rocker_port->pport = port_number + 1;
5011         rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
5012         rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
5013         INIT_LIST_HEAD(&rocker_port->trans_mem);
5014
5015         rocker_port_dev_addr_init(rocker_port);
5016         dev->netdev_ops = &rocker_port_netdev_ops;
5017         dev->ethtool_ops = &rocker_port_ethtool_ops;
5018         dev->switchdev_ops = &rocker_port_switchdev_ops;
5019         netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
5020                        NAPI_POLL_WEIGHT);
5021         netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5022                        NAPI_POLL_WEIGHT);
5023         rocker_carrier_init(rocker_port);
5024
5025         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
5026
5027         err = register_netdev(dev);
5028         if (err) {
5029                 dev_err(&pdev->dev, "register_netdev failed\n");
5030                 goto err_register_netdev;
5031         }
5032         rocker->ports[port_number] = rocker_port;
5033
5034         switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5035
5036         rocker_port_set_learning(rocker_port, NULL);
5037
5038         err = rocker_port_ig_tbl(rocker_port, NULL, 0);
5039         if (err) {
5040                 netdev_err(rocker_port->dev, "install ig port table failed\n");
5041                 goto err_port_ig_tbl;
5042         }
5043
5044         rocker_port->internal_vlan_id =
5045                 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5046
5047         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5048         if (err) {
5049                 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5050                 goto err_untagged_vlan;
5051         }
5052
5053         return 0;
5054
5055 err_untagged_vlan:
5056         rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5057 err_port_ig_tbl:
5058         rocker->ports[port_number] = NULL;
5059         unregister_netdev(dev);
5060 err_register_netdev:
5061         free_netdev(dev);
5062         return err;
5063 }
5064
5065 static int rocker_probe_ports(struct rocker *rocker)
5066 {
5067         int i;
5068         size_t alloc_size;
5069         int err;
5070
5071         alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
5072         rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
5073         if (!rocker->ports)
5074                 return -ENOMEM;
5075         for (i = 0; i < rocker->port_count; i++) {
5076                 err = rocker_probe_port(rocker, i);
5077                 if (err)
5078                         goto remove_ports;
5079         }
5080         return 0;
5081
5082 remove_ports:
5083         rocker_remove_ports(rocker);
5084         return err;
5085 }
5086
5087 static int rocker_msix_init(struct rocker *rocker)
5088 {
5089         struct pci_dev *pdev = rocker->pdev;
5090         int msix_entries;
5091         int i;
5092         int err;
5093
5094         msix_entries = pci_msix_vec_count(pdev);
5095         if (msix_entries < 0)
5096                 return msix_entries;
5097
5098         if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5099                 return -EINVAL;
5100
5101         rocker->msix_entries = kmalloc_array(msix_entries,
5102                                              sizeof(struct msix_entry),
5103                                              GFP_KERNEL);
5104         if (!rocker->msix_entries)
5105                 return -ENOMEM;
5106
5107         for (i = 0; i < msix_entries; i++)
5108                 rocker->msix_entries[i].entry = i;
5109
5110         err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5111         if (err < 0)
5112                 goto err_enable_msix;
5113
5114         return 0;
5115
5116 err_enable_msix:
5117         kfree(rocker->msix_entries);
5118         return err;
5119 }
5120
5121 static void rocker_msix_fini(const struct rocker *rocker)
5122 {
5123         pci_disable_msix(rocker->pdev);
5124         kfree(rocker->msix_entries);
5125 }
5126
5127 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5128 {
5129         struct rocker *rocker;
5130         int err;
5131
5132         rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5133         if (!rocker)
5134                 return -ENOMEM;
5135
5136         err = pci_enable_device(pdev);
5137         if (err) {
5138                 dev_err(&pdev->dev, "pci_enable_device failed\n");
5139                 goto err_pci_enable_device;
5140         }
5141
5142         err = pci_request_regions(pdev, rocker_driver_name);
5143         if (err) {
5144                 dev_err(&pdev->dev, "pci_request_regions failed\n");
5145                 goto err_pci_request_regions;
5146         }
5147
5148         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5149         if (!err) {
5150                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5151                 if (err) {
5152                         dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5153                         goto err_pci_set_dma_mask;
5154                 }
5155         } else {
5156                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5157                 if (err) {
5158                         dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5159                         goto err_pci_set_dma_mask;
5160                 }
5161         }
5162
5163         if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5164                 dev_err(&pdev->dev, "invalid PCI region size\n");
5165                 err = -EINVAL;
5166                 goto err_pci_resource_len_check;
5167         }
5168
5169         rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5170                                   pci_resource_len(pdev, 0));
5171         if (!rocker->hw_addr) {
5172                 dev_err(&pdev->dev, "ioremap failed\n");
5173                 err = -EIO;
5174                 goto err_ioremap;
5175         }
5176         pci_set_master(pdev);
5177
5178         rocker->pdev = pdev;
5179         pci_set_drvdata(pdev, rocker);
5180
5181         rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5182
5183         err = rocker_msix_init(rocker);
5184         if (err) {
5185                 dev_err(&pdev->dev, "MSI-X init failed\n");
5186                 goto err_msix_init;
5187         }
5188
5189         err = rocker_basic_hw_test(rocker);
5190         if (err) {
5191                 dev_err(&pdev->dev, "basic hw test failed\n");
5192                 goto err_basic_hw_test;
5193         }
5194
5195         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5196
5197         err = rocker_dma_rings_init(rocker);
5198         if (err)
5199                 goto err_dma_rings_init;
5200
5201         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5202                           rocker_cmd_irq_handler, 0,
5203                           rocker_driver_name, rocker);
5204         if (err) {
5205                 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5206                 goto err_request_cmd_irq;
5207         }
5208
5209         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5210                           rocker_event_irq_handler, 0,
5211                           rocker_driver_name, rocker);
5212         if (err) {
5213                 dev_err(&pdev->dev, "cannot assign event irq\n");
5214                 goto err_request_event_irq;
5215         }
5216
5217         rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5218
5219         err = rocker_init_tbls(rocker);
5220         if (err) {
5221                 dev_err(&pdev->dev, "cannot init rocker tables\n");
5222                 goto err_init_tbls;
5223         }
5224
5225         setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5226                     (unsigned long) rocker);
5227         mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5228
5229         err = rocker_probe_ports(rocker);
5230         if (err) {
5231                 dev_err(&pdev->dev, "failed to probe ports\n");
5232                 goto err_probe_ports;
5233         }
5234
5235         dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5236                  (int)sizeof(rocker->hw.id), &rocker->hw.id);
5237
5238         return 0;
5239
5240 err_probe_ports:
5241         del_timer_sync(&rocker->fdb_cleanup_timer);
5242         rocker_free_tbls(rocker);
5243 err_init_tbls:
5244         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5245 err_request_event_irq:
5246         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5247 err_request_cmd_irq:
5248         rocker_dma_rings_fini(rocker);
5249 err_dma_rings_init:
5250 err_basic_hw_test:
5251         rocker_msix_fini(rocker);
5252 err_msix_init:
5253         iounmap(rocker->hw_addr);
5254 err_ioremap:
5255 err_pci_resource_len_check:
5256 err_pci_set_dma_mask:
5257         pci_release_regions(pdev);
5258 err_pci_request_regions:
5259         pci_disable_device(pdev);
5260 err_pci_enable_device:
5261         kfree(rocker);
5262         return err;
5263 }
5264
5265 static void rocker_remove(struct pci_dev *pdev)
5266 {
5267         struct rocker *rocker = pci_get_drvdata(pdev);
5268
5269         del_timer_sync(&rocker->fdb_cleanup_timer);
5270         rocker_free_tbls(rocker);
5271         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5272         rocker_remove_ports(rocker);
5273         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5274         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5275         rocker_dma_rings_fini(rocker);
5276         rocker_msix_fini(rocker);
5277         iounmap(rocker->hw_addr);
5278         pci_release_regions(rocker->pdev);
5279         pci_disable_device(rocker->pdev);
5280         kfree(rocker);
5281 }
5282
5283 static struct pci_driver rocker_pci_driver = {
5284         .name           = rocker_driver_name,
5285         .id_table       = rocker_pci_id_table,
5286         .probe          = rocker_probe,
5287         .remove         = rocker_remove,
5288 };
5289
5290 /************************************
5291  * Net device notifier event handler
5292  ************************************/
5293
5294 static bool rocker_port_dev_check(const struct net_device *dev)
5295 {
5296         return dev->netdev_ops == &rocker_port_netdev_ops;
5297 }
5298
5299 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5300                                    struct net_device *bridge)
5301 {
5302         u16 untagged_vid = 0;
5303         int err;
5304
5305         /* Port is joining bridge, so the internal VLAN for the
5306          * port is going to change to the bridge internal VLAN.
5307          * Let's remove untagged VLAN (vid=0) from port and
5308          * re-add once internal VLAN has changed.
5309          */
5310
5311         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5312         if (err)
5313                 return err;
5314
5315         rocker_port_internal_vlan_id_put(rocker_port,
5316                                          rocker_port->dev->ifindex);
5317         rocker_port->internal_vlan_id =
5318                 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5319
5320         rocker_port->bridge_dev = bridge;
5321         switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5322
5323         return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5324 }
5325
5326 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5327 {
5328         u16 untagged_vid = 0;
5329         int err;
5330
5331         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5332         if (err)
5333                 return err;
5334
5335         rocker_port_internal_vlan_id_put(rocker_port,
5336                                          rocker_port->bridge_dev->ifindex);
5337         rocker_port->internal_vlan_id =
5338                 rocker_port_internal_vlan_id_get(rocker_port,
5339                                                  rocker_port->dev->ifindex);
5340
5341         switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5342                                     false);
5343         rocker_port->bridge_dev = NULL;
5344
5345         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5346         if (err)
5347                 return err;
5348
5349         if (rocker_port->dev->flags & IFF_UP)
5350                 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5351
5352         return err;
5353 }
5354
5355
5356 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5357                                    struct net_device *master)
5358 {
5359         int err;
5360
5361         rocker_port->bridge_dev = master;
5362
5363         err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5364         if (err)
5365                 return err;
5366         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5367
5368         return err;
5369 }
5370
5371 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5372                                      struct net_device *master)
5373 {
5374         int err = 0;
5375
5376         if (netif_is_bridge_master(master))
5377                 err = rocker_port_bridge_join(rocker_port, master);
5378         else if (netif_is_ovs_master(master))
5379                 err = rocker_port_ovs_changed(rocker_port, master);
5380         return err;
5381 }
5382
5383 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5384 {
5385         int err = 0;
5386
5387         if (rocker_port_is_bridged(rocker_port))
5388                 err = rocker_port_bridge_leave(rocker_port);
5389         else if (rocker_port_is_ovsed(rocker_port))
5390                 err = rocker_port_ovs_changed(rocker_port, NULL);
5391         return err;
5392 }
5393
5394 static int rocker_netdevice_event(struct notifier_block *unused,
5395                                   unsigned long event, void *ptr)
5396 {
5397         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5398         struct netdev_notifier_changeupper_info *info;
5399         struct rocker_port *rocker_port;
5400         int err;
5401
5402         if (!rocker_port_dev_check(dev))
5403                 return NOTIFY_DONE;
5404
5405         switch (event) {
5406         case NETDEV_CHANGEUPPER:
5407                 info = ptr;
5408                 if (!info->master)
5409                         goto out;
5410                 rocker_port = netdev_priv(dev);
5411                 if (info->linking) {
5412                         err = rocker_port_master_linked(rocker_port,
5413                                                         info->upper_dev);
5414                         if (err)
5415                                 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5416                                             err);
5417                 } else {
5418                         err = rocker_port_master_unlinked(rocker_port);
5419                         if (err)
5420                                 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5421                                             err);
5422                 }
5423                 break;
5424         }
5425 out:
5426         return NOTIFY_DONE;
5427 }
5428
5429 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5430         .notifier_call = rocker_netdevice_event,
5431 };
5432
5433 /************************************
5434  * Net event notifier event handler
5435  ************************************/
5436
5437 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5438 {
5439         struct rocker_port *rocker_port = netdev_priv(dev);
5440         int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5441                     ROCKER_OP_FLAG_NOWAIT;
5442         __be32 ip_addr = *(__be32 *)n->primary_key;
5443
5444         return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5445 }
5446
5447 static int rocker_netevent_event(struct notifier_block *unused,
5448                                  unsigned long event, void *ptr)
5449 {
5450         struct net_device *dev;
5451         struct neighbour *n = ptr;
5452         int err;
5453
5454         switch (event) {
5455         case NETEVENT_NEIGH_UPDATE:
5456                 if (n->tbl != &arp_tbl)
5457                         return NOTIFY_DONE;
5458                 dev = n->dev;
5459                 if (!rocker_port_dev_check(dev))
5460                         return NOTIFY_DONE;
5461                 err = rocker_neigh_update(dev, n);
5462                 if (err)
5463                         netdev_warn(dev,
5464                                     "failed to handle neigh update (err %d)\n",
5465                                     err);
5466                 break;
5467         }
5468
5469         return NOTIFY_DONE;
5470 }
5471
5472 static struct notifier_block rocker_netevent_nb __read_mostly = {
5473         .notifier_call = rocker_netevent_event,
5474 };
5475
5476 /***********************
5477  * Module init and exit
5478  ***********************/
5479
5480 static int __init rocker_module_init(void)
5481 {
5482         int err;
5483
5484         register_netdevice_notifier(&rocker_netdevice_nb);
5485         register_netevent_notifier(&rocker_netevent_nb);
5486         err = pci_register_driver(&rocker_pci_driver);
5487         if (err)
5488                 goto err_pci_register_driver;
5489         return 0;
5490
5491 err_pci_register_driver:
5492         unregister_netevent_notifier(&rocker_netevent_nb);
5493         unregister_netdevice_notifier(&rocker_netdevice_nb);
5494         return err;
5495 }
5496
5497 static void __exit rocker_module_exit(void)
5498 {
5499         unregister_netevent_notifier(&rocker_netevent_nb);
5500         unregister_netdevice_notifier(&rocker_netdevice_nb);
5501         pci_unregister_driver(&rocker_pci_driver);
5502 }
5503
5504 module_init(rocker_module_init);
5505 module_exit(rocker_module_exit);
5506
5507 MODULE_LICENSE("GPL v2");
5508 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5509 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5510 MODULE_DESCRIPTION("Rocker switch device driver");
5511 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);