]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
rocker: Implement FIB offload in deferred work
authorIdo Schimmel <idosch@mellanox.com>
Sat, 3 Dec 2016 15:45:03 +0000 (16:45 +0100)
committerDavid S. Miller <davem@davemloft.net>
Sun, 4 Dec 2016 00:29:35 +0000 (19:29 -0500)
Convert rocker to offload FIBs in deferred work in a similar fashion to
mlxsw, which was converted in the previous commits.

Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/rocker/rocker_main.c
drivers/net/ethernet/rocker/rocker_ofdpa.c

index 424be969da3f3087a54fc51b2fc0bfc78db8d5f3..914e9e1b01ad9046db2a9a72c3de8a0412861864 100644 (file)
@@ -2166,28 +2166,70 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
        .switchdev_port_obj_dump        = rocker_port_obj_dump,
 };
 
-static int rocker_router_fib_event(struct notifier_block *nb,
-                                  unsigned long event, void *ptr)
+struct rocker_fib_event_work {
+       struct work_struct work;
+       struct fib_entry_notifier_info fen_info;
+       struct rocker *rocker;
+       unsigned long event;
+};
+
+static void rocker_router_fib_event_work(struct work_struct *work)
 {
-       struct rocker *rocker = container_of(nb, struct rocker, fib_nb);
-       struct fib_entry_notifier_info *fen_info = ptr;
+       struct rocker_fib_event_work *fib_work =
+               container_of(work, struct rocker_fib_event_work, work);
+       struct rocker *rocker = fib_work->rocker;
        int err;
 
-       switch (event) {
+       /* Protect internal structures from changes */
+       rtnl_lock();
+       switch (fib_work->event) {
        case FIB_EVENT_ENTRY_ADD:
-               err = rocker_world_fib4_add(rocker, fen_info);
+               err = rocker_world_fib4_add(rocker, &fib_work->fen_info);
                if (err)
                        rocker_world_fib4_abort(rocker);
-               else
+               fib_info_put(fib_work->fen_info.fi);
                break;
        case FIB_EVENT_ENTRY_DEL:
-               rocker_world_fib4_del(rocker, fen_info);
+               rocker_world_fib4_del(rocker, &fib_work->fen_info);
+               fib_info_put(fib_work->fen_info.fi);
                break;
        case FIB_EVENT_RULE_ADD: /* fall through */
        case FIB_EVENT_RULE_DEL:
                rocker_world_fib4_abort(rocker);
                break;
        }
+       rtnl_unlock();
+       kfree(fib_work);
+}
+
+/* Called with rcu_read_lock() */
+static int rocker_router_fib_event(struct notifier_block *nb,
+                                  unsigned long event, void *ptr)
+{
+       struct rocker *rocker = container_of(nb, struct rocker, fib_nb);
+       struct rocker_fib_event_work *fib_work;
+
+       fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+       if (WARN_ON(!fib_work))
+               return NOTIFY_BAD;
+
+       INIT_WORK(&fib_work->work, rocker_router_fib_event_work);
+       fib_work->rocker = rocker;
+       fib_work->event = event;
+
+       switch (event) {
+       case FIB_EVENT_ENTRY_ADD: /* fall through */
+       case FIB_EVENT_ENTRY_DEL:
+               memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
+               /* Take referece on fib_info to prevent it from being
+                * freed while work is queued. Release it afterwards.
+                */
+               fib_info_hold(fib_work->fen_info.fi);
+               break;
+       }
+
+       queue_work(rocker->rocker_owq, &fib_work->work);
+
        return NOTIFY_DONE;
 }
 
index 4ca461322d6089553f05a623823f6e92cbe57455..7cd76b6b5cb9f6c1c05f09b509be7e11a79b0478 100644 (file)
@@ -2516,6 +2516,7 @@ static void ofdpa_fini(struct rocker *rocker)
        int bkt;
 
        del_timer_sync(&ofdpa->fdb_cleanup_timer);
+       flush_workqueue(rocker->rocker_owq);
 
        spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
        hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)