]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/infiniband/core/roce_gid_mgmt.c
md/raid10: fix the 'new' raid10 layout to work correctly.
[karo-tx-linux.git] / drivers / infiniband / core / roce_gid_mgmt.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "core_priv.h"
34
35 #include <linux/in.h>
36 #include <linux/in6.h>
37
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
41
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
44
45 enum gid_op_type {
46         GID_DEL = 0,
47         GID_ADD
48 };
49
50 struct update_gid_event_work {
51         struct work_struct work;
52         union ib_gid       gid;
53         struct ib_gid_attr gid_attr;
54         enum gid_op_type gid_op;
55 };
56
57 #define ROCE_NETDEV_CALLBACK_SZ         3
58 struct netdev_event_work_cmd {
59         roce_netdev_callback    cb;
60         roce_netdev_filter      filter;
61         struct net_device       *ndev;
62         struct net_device       *filter_ndev;
63 };
64
65 struct netdev_event_work {
66         struct work_struct              work;
67         struct netdev_event_work_cmd    cmds[ROCE_NETDEV_CALLBACK_SZ];
68 };
69
70 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
71                        u8 port, union ib_gid *gid,
72                        struct ib_gid_attr *gid_attr)
73 {
74         switch (gid_op) {
75         case GID_ADD:
76                 ib_cache_gid_add(ib_dev, port, gid, gid_attr);
77                 break;
78         case GID_DEL:
79                 ib_cache_gid_del(ib_dev, port, gid, gid_attr);
80                 break;
81         }
82 }
83
84 enum bonding_slave_state {
85         BONDING_SLAVE_STATE_ACTIVE      = 1UL << 0,
86         BONDING_SLAVE_STATE_INACTIVE    = 1UL << 1,
87         /* No primary slave or the device isn't a slave in bonding */
88         BONDING_SLAVE_STATE_NA          = 1UL << 2,
89 };
90
91 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
92                                                                    struct net_device *upper)
93 {
94         if (upper && netif_is_bond_master(upper)) {
95                 struct net_device *pdev =
96                         bond_option_active_slave_get_rcu(netdev_priv(upper));
97
98                 if (pdev)
99                         return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
100                                 BONDING_SLAVE_STATE_INACTIVE;
101         }
102
103         return BONDING_SLAVE_STATE_NA;
104 }
105
106 static bool is_upper_dev_rcu(struct net_device *dev, struct net_device *upper)
107 {
108         struct net_device *_upper = NULL;
109         struct list_head *iter;
110
111         netdev_for_each_all_upper_dev_rcu(dev, _upper, iter)
112                 if (_upper == upper)
113                         break;
114
115         return _upper == upper;
116 }
117
118 #define REQUIRED_BOND_STATES            (BONDING_SLAVE_STATE_ACTIVE |   \
119                                          BONDING_SLAVE_STATE_NA)
120 static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
121                                  struct net_device *rdma_ndev, void *cookie)
122 {
123         struct net_device *event_ndev = (struct net_device *)cookie;
124         struct net_device *real_dev;
125         int res;
126
127         if (!rdma_ndev)
128                 return 0;
129
130         rcu_read_lock();
131         real_dev = rdma_vlan_dev_real_dev(event_ndev);
132         if (!real_dev)
133                 real_dev = event_ndev;
134
135         res = ((is_upper_dev_rcu(rdma_ndev, event_ndev) &&
136                (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
137                 REQUIRED_BOND_STATES)) ||
138                real_dev == rdma_ndev);
139
140         rcu_read_unlock();
141         return res;
142 }
143
144 static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
145                                       struct net_device *rdma_ndev, void *cookie)
146 {
147         struct net_device *master_dev;
148         int res;
149
150         if (!rdma_ndev)
151                 return 0;
152
153         rcu_read_lock();
154         master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
155         res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
156                 BONDING_SLAVE_STATE_INACTIVE;
157         rcu_read_unlock();
158
159         return res;
160 }
161
162 static int pass_all_filter(struct ib_device *ib_dev, u8 port,
163                            struct net_device *rdma_ndev, void *cookie)
164 {
165         return 1;
166 }
167
168 static int upper_device_filter(struct ib_device *ib_dev, u8 port,
169                                struct net_device *rdma_ndev, void *cookie)
170 {
171         struct net_device *event_ndev = (struct net_device *)cookie;
172         int res;
173
174         if (!rdma_ndev)
175                 return 0;
176
177         if (rdma_ndev == event_ndev)
178                 return 1;
179
180         rcu_read_lock();
181         res = is_upper_dev_rcu(rdma_ndev, event_ndev);
182         rcu_read_unlock();
183
184         return res;
185 }
186
187 static void update_gid_ip(enum gid_op_type gid_op,
188                           struct ib_device *ib_dev,
189                           u8 port, struct net_device *ndev,
190                           struct sockaddr *addr)
191 {
192         union ib_gid gid;
193         struct ib_gid_attr gid_attr;
194
195         rdma_ip2gid(addr, &gid);
196         memset(&gid_attr, 0, sizeof(gid_attr));
197         gid_attr.ndev = ndev;
198
199         update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
200 }
201
202 static void enum_netdev_default_gids(struct ib_device *ib_dev,
203                                      u8 port, struct net_device *event_ndev,
204                                      struct net_device *rdma_ndev)
205 {
206         rcu_read_lock();
207         if (!rdma_ndev ||
208             ((rdma_ndev != event_ndev &&
209               !is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
210              is_eth_active_slave_of_bonding_rcu(rdma_ndev,
211                                                 netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
212              BONDING_SLAVE_STATE_INACTIVE)) {
213                 rcu_read_unlock();
214                 return;
215         }
216         rcu_read_unlock();
217
218         ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
219                                      IB_CACHE_GID_DEFAULT_MODE_SET);
220 }
221
222 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
223                                             u8 port,
224                                             struct net_device *event_ndev,
225                                             struct net_device *rdma_ndev)
226 {
227         struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
228
229         if (!rdma_ndev)
230                 return;
231
232         if (!real_dev)
233                 real_dev = event_ndev;
234
235         rcu_read_lock();
236
237         if (is_upper_dev_rcu(rdma_ndev, event_ndev) &&
238             is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
239             BONDING_SLAVE_STATE_INACTIVE) {
240                 rcu_read_unlock();
241
242                 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
243                                              IB_CACHE_GID_DEFAULT_MODE_DELETE);
244         } else {
245                 rcu_read_unlock();
246         }
247 }
248
249 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
250                                  u8 port, struct net_device *ndev)
251 {
252         struct in_device *in_dev;
253
254         if (ndev->reg_state >= NETREG_UNREGISTERING)
255                 return;
256
257         in_dev = in_dev_get(ndev);
258         if (!in_dev)
259                 return;
260
261         for_ifa(in_dev) {
262                 struct sockaddr_in ip;
263
264                 ip.sin_family = AF_INET;
265                 ip.sin_addr.s_addr = ifa->ifa_address;
266                 update_gid_ip(GID_ADD, ib_dev, port, ndev,
267                               (struct sockaddr *)&ip);
268         }
269         endfor_ifa(in_dev);
270
271         in_dev_put(in_dev);
272 }
273
274 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
275                                  u8 port, struct net_device *ndev)
276 {
277         struct inet6_ifaddr *ifp;
278         struct inet6_dev *in6_dev;
279         struct sin6_list {
280                 struct list_head        list;
281                 struct sockaddr_in6     sin6;
282         };
283         struct sin6_list *sin6_iter;
284         struct sin6_list *sin6_temp;
285         struct ib_gid_attr gid_attr = {.ndev = ndev};
286         LIST_HEAD(sin6_list);
287
288         if (ndev->reg_state >= NETREG_UNREGISTERING)
289                 return;
290
291         in6_dev = in6_dev_get(ndev);
292         if (!in6_dev)
293                 return;
294
295         read_lock_bh(&in6_dev->lock);
296         list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
297                 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
298
299                 if (!entry) {
300                         pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv6 update\n");
301                         continue;
302                 }
303
304                 entry->sin6.sin6_family = AF_INET6;
305                 entry->sin6.sin6_addr = ifp->addr;
306                 list_add_tail(&entry->list, &sin6_list);
307         }
308         read_unlock_bh(&in6_dev->lock);
309
310         in6_dev_put(in6_dev);
311
312         list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
313                 union ib_gid    gid;
314
315                 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
316                 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
317                 list_del(&sin6_iter->list);
318                 kfree(sin6_iter);
319         }
320 }
321
322 static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
323                             struct net_device *ndev)
324 {
325         enum_netdev_ipv4_ips(ib_dev, port, ndev);
326         if (IS_ENABLED(CONFIG_IPV6))
327                 enum_netdev_ipv6_ips(ib_dev, port, ndev);
328 }
329
330 static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
331                            struct net_device *rdma_ndev, void *cookie)
332 {
333         struct net_device *event_ndev = (struct net_device *)cookie;
334
335         enum_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
336         _add_netdev_ips(ib_dev, port, event_ndev);
337 }
338
339 static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
340                            struct net_device *rdma_ndev, void *cookie)
341 {
342         struct net_device *event_ndev = (struct net_device *)cookie;
343
344         ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
345 }
346
347 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
348                                     u8 port,
349                                     struct net_device *rdma_ndev,
350                                     void *cookie)
351 {
352         struct net *net;
353         struct net_device *ndev;
354
355         /* Lock the rtnl to make sure the netdevs does not move under
356          * our feet
357          */
358         rtnl_lock();
359         for_each_net(net)
360                 for_each_netdev(net, ndev)
361                         if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
362                                 add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
363         rtnl_unlock();
364 }
365
366 /* This function will rescan all of the network devices in the system
367  * and add their gids, as needed, to the relevant RoCE devices. */
368 int roce_rescan_device(struct ib_device *ib_dev)
369 {
370         ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
371                             enum_all_gids_of_dev_cb, NULL);
372
373         return 0;
374 }
375
376 static void callback_for_addr_gid_device_scan(struct ib_device *device,
377                                               u8 port,
378                                               struct net_device *rdma_ndev,
379                                               void *cookie)
380 {
381         struct update_gid_event_work *parsed = cookie;
382
383         return update_gid(parsed->gid_op, device,
384                           port, &parsed->gid,
385                           &parsed->gid_attr);
386 }
387
388 static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
389                                 void *cookie,
390                                 void (*handle_netdev)(struct ib_device *ib_dev,
391                                                       u8 port,
392                                                       struct net_device *ndev))
393 {
394         struct net_device *ndev = (struct net_device *)cookie;
395         struct upper_list {
396                 struct list_head list;
397                 struct net_device *upper;
398         };
399         struct net_device *upper;
400         struct list_head *iter;
401         struct upper_list *upper_iter;
402         struct upper_list *upper_temp;
403         LIST_HEAD(upper_list);
404
405         rcu_read_lock();
406         netdev_for_each_all_upper_dev_rcu(ndev, upper, iter) {
407                 struct upper_list *entry = kmalloc(sizeof(*entry),
408                                                    GFP_ATOMIC);
409
410                 if (!entry) {
411                         pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
412                         continue;
413                 }
414
415                 list_add_tail(&entry->list, &upper_list);
416                 dev_hold(upper);
417                 entry->upper = upper;
418         }
419         rcu_read_unlock();
420
421         handle_netdev(ib_dev, port, ndev);
422         list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
423                                  list) {
424                 handle_netdev(ib_dev, port, upper_iter->upper);
425                 dev_put(upper_iter->upper);
426                 list_del(&upper_iter->list);
427                 kfree(upper_iter);
428         }
429 }
430
431 static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
432                                       struct net_device *event_ndev)
433 {
434         ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
435 }
436
437 static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
438                                  struct net_device *rdma_ndev, void *cookie)
439 {
440         handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
441 }
442
443 static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
444                                  struct net_device *rdma_ndev, void *cookie)
445 {
446         handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
447 }
448
449 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
450                                         struct net_device *rdma_ndev,
451                                         void *cookie)
452 {
453         struct net_device *master_ndev;
454
455         rcu_read_lock();
456         master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
457         if (master_ndev)
458                 dev_hold(master_ndev);
459         rcu_read_unlock();
460
461         if (master_ndev) {
462                 bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
463                                                 rdma_ndev);
464                 dev_put(master_ndev);
465         }
466 }
467
468 static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
469                                    struct net_device *rdma_ndev, void *cookie)
470 {
471         struct net_device *event_ndev = (struct net_device *)cookie;
472
473         bond_delete_netdev_default_gids(ib_dev, port, event_ndev, rdma_ndev);
474 }
475
476 /* The following functions operate on all IB devices. netdevice_event and
477  * addr_event execute ib_enum_all_roce_netdevs through a work.
478  * ib_enum_all_roce_netdevs iterates through all IB devices.
479  */
480
481 static void netdevice_event_work_handler(struct work_struct *_work)
482 {
483         struct netdev_event_work *work =
484                 container_of(_work, struct netdev_event_work, work);
485         unsigned int i;
486
487         for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
488                 ib_enum_all_roce_netdevs(work->cmds[i].filter,
489                                          work->cmds[i].filter_ndev,
490                                          work->cmds[i].cb,
491                                          work->cmds[i].ndev);
492                 dev_put(work->cmds[i].ndev);
493                 dev_put(work->cmds[i].filter_ndev);
494         }
495
496         kfree(work);
497 }
498
499 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
500                                 struct net_device *ndev)
501 {
502         unsigned int i;
503         struct netdev_event_work *ndev_work =
504                 kmalloc(sizeof(*ndev_work), GFP_KERNEL);
505
506         if (!ndev_work) {
507                 pr_warn("roce_gid_mgmt: can't allocate work for netdevice_event\n");
508                 return NOTIFY_DONE;
509         }
510
511         memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
512         for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
513                 if (!ndev_work->cmds[i].ndev)
514                         ndev_work->cmds[i].ndev = ndev;
515                 if (!ndev_work->cmds[i].filter_ndev)
516                         ndev_work->cmds[i].filter_ndev = ndev;
517                 dev_hold(ndev_work->cmds[i].ndev);
518                 dev_hold(ndev_work->cmds[i].filter_ndev);
519         }
520         INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
521
522         queue_work(ib_wq, &ndev_work->work);
523
524         return NOTIFY_DONE;
525 }
526
527 static const struct netdev_event_work_cmd add_cmd = {
528         .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
529 static const struct netdev_event_work_cmd add_cmd_upper_ips = {
530         .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
531
532 static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
533                                         struct netdev_event_work_cmd *cmds)
534 {
535         static const struct netdev_event_work_cmd upper_ips_del_cmd = {
536                 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
537         static const struct netdev_event_work_cmd bonding_default_del_cmd = {
538                 .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
539
540         if (changeupper_info->linking == false) {
541                 cmds[0] = upper_ips_del_cmd;
542                 cmds[0].ndev = changeupper_info->upper_dev;
543                 cmds[1] = add_cmd;
544         } else {
545                 cmds[0] = bonding_default_del_cmd;
546                 cmds[0].ndev = changeupper_info->upper_dev;
547                 cmds[1] = add_cmd_upper_ips;
548                 cmds[1].ndev = changeupper_info->upper_dev;
549                 cmds[1].filter_ndev = changeupper_info->upper_dev;
550         }
551 }
552
553 static int netdevice_event(struct notifier_block *this, unsigned long event,
554                            void *ptr)
555 {
556         static const struct netdev_event_work_cmd del_cmd = {
557                 .cb = del_netdev_ips, .filter = pass_all_filter};
558         static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
559                 .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
560         static const struct netdev_event_work_cmd default_del_cmd = {
561                 .cb = del_netdev_default_ips, .filter = pass_all_filter};
562         static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
563                 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
564         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
565         struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
566
567         if (ndev->type != ARPHRD_ETHER)
568                 return NOTIFY_DONE;
569
570         switch (event) {
571         case NETDEV_REGISTER:
572         case NETDEV_UP:
573                 cmds[0] = bonding_default_del_cmd_join;
574                 cmds[1] = add_cmd;
575                 break;
576
577         case NETDEV_UNREGISTER:
578                 if (ndev->reg_state < NETREG_UNREGISTERED)
579                         cmds[0] = del_cmd;
580                 else
581                         return NOTIFY_DONE;
582                 break;
583
584         case NETDEV_CHANGEADDR:
585                 cmds[0] = default_del_cmd;
586                 cmds[1] = add_cmd;
587                 break;
588
589         case NETDEV_CHANGEUPPER:
590                 netdevice_event_changeupper(
591                         container_of(ptr, struct netdev_notifier_changeupper_info, info),
592                         cmds);
593                 break;
594
595         case NETDEV_BONDING_FAILOVER:
596                 cmds[0] = bonding_event_ips_del_cmd;
597                 cmds[1] = bonding_default_del_cmd_join;
598                 cmds[2] = add_cmd_upper_ips;
599                 break;
600
601         default:
602                 return NOTIFY_DONE;
603         }
604
605         return netdevice_queue_work(cmds, ndev);
606 }
607
608 static void update_gid_event_work_handler(struct work_struct *_work)
609 {
610         struct update_gid_event_work *work =
611                 container_of(_work, struct update_gid_event_work, work);
612
613         ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
614                                  callback_for_addr_gid_device_scan, work);
615
616         dev_put(work->gid_attr.ndev);
617         kfree(work);
618 }
619
620 static int addr_event(struct notifier_block *this, unsigned long event,
621                       struct sockaddr *sa, struct net_device *ndev)
622 {
623         struct update_gid_event_work *work;
624         enum gid_op_type gid_op;
625
626         if (ndev->type != ARPHRD_ETHER)
627                 return NOTIFY_DONE;
628
629         switch (event) {
630         case NETDEV_UP:
631                 gid_op = GID_ADD;
632                 break;
633
634         case NETDEV_DOWN:
635                 gid_op = GID_DEL;
636                 break;
637
638         default:
639                 return NOTIFY_DONE;
640         }
641
642         work = kmalloc(sizeof(*work), GFP_ATOMIC);
643         if (!work) {
644                 pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n");
645                 return NOTIFY_DONE;
646         }
647
648         INIT_WORK(&work->work, update_gid_event_work_handler);
649
650         rdma_ip2gid(sa, &work->gid);
651         work->gid_op = gid_op;
652
653         memset(&work->gid_attr, 0, sizeof(work->gid_attr));
654         dev_hold(ndev);
655         work->gid_attr.ndev   = ndev;
656
657         queue_work(ib_wq, &work->work);
658
659         return NOTIFY_DONE;
660 }
661
662 static int inetaddr_event(struct notifier_block *this, unsigned long event,
663                           void *ptr)
664 {
665         struct sockaddr_in      in;
666         struct net_device       *ndev;
667         struct in_ifaddr        *ifa = ptr;
668
669         in.sin_family = AF_INET;
670         in.sin_addr.s_addr = ifa->ifa_address;
671         ndev = ifa->ifa_dev->dev;
672
673         return addr_event(this, event, (struct sockaddr *)&in, ndev);
674 }
675
676 static int inet6addr_event(struct notifier_block *this, unsigned long event,
677                            void *ptr)
678 {
679         struct sockaddr_in6     in6;
680         struct net_device       *ndev;
681         struct inet6_ifaddr     *ifa6 = ptr;
682
683         in6.sin6_family = AF_INET6;
684         in6.sin6_addr = ifa6->addr;
685         ndev = ifa6->idev->dev;
686
687         return addr_event(this, event, (struct sockaddr *)&in6, ndev);
688 }
689
690 static struct notifier_block nb_netdevice = {
691         .notifier_call = netdevice_event
692 };
693
694 static struct notifier_block nb_inetaddr = {
695         .notifier_call = inetaddr_event
696 };
697
698 static struct notifier_block nb_inet6addr = {
699         .notifier_call = inet6addr_event
700 };
701
702 int __init roce_gid_mgmt_init(void)
703 {
704         register_inetaddr_notifier(&nb_inetaddr);
705         if (IS_ENABLED(CONFIG_IPV6))
706                 register_inet6addr_notifier(&nb_inet6addr);
707         /* We relay on the netdevice notifier to enumerate all
708          * existing devices in the system. Register to this notifier
709          * last to make sure we will not miss any IP add/del
710          * callbacks.
711          */
712         register_netdevice_notifier(&nb_netdevice);
713
714         return 0;
715 }
716
717 void __exit roce_gid_mgmt_cleanup(void)
718 {
719         if (IS_ENABLED(CONFIG_IPV6))
720                 unregister_inet6addr_notifier(&nb_inet6addr);
721         unregister_inetaddr_notifier(&nb_inetaddr);
722         unregister_netdevice_notifier(&nb_netdevice);
723         /* Ensure all gid deletion tasks complete before we go down,
724          * to avoid any reference to free'd memory. By the time
725          * ib-core is removed, all physical devices have been removed,
726          * so no issue with remaining hardware contexts.
727          */
728 }