]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/batman-adv/translation-table.c
batman-adv: add contributor name
[karo-tx-linux.git] / net / batman-adv / translation-table.c
1 /*
2  * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
26 #include "send.h"
27 #include "hash.h"
28 #include "originator.h"
29 #include "routing.h"
30 #include "bridge_loop_avoidance.h"
31
32 #include <linux/crc16.h>
33
34 static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
35                           struct orig_node *orig_node);
36 static void tt_purge(struct work_struct *work);
37 static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
38
39 /* returns 1 if they are the same mac addr */
40 static int compare_tt(const struct hlist_node *node, const void *data2)
41 {
42         const void *data1 = container_of(node, struct tt_common_entry,
43                                          hash_entry);
44
45         return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
46 }
47
48 static void tt_start_timer(struct bat_priv *bat_priv)
49 {
50         INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
51         queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
52                            msecs_to_jiffies(5000));
53 }
54
55 static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
56                                             const void *data)
57 {
58         struct hlist_head *head;
59         struct hlist_node *node;
60         struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
61         uint32_t index;
62
63         if (!hash)
64                 return NULL;
65
66         index = choose_orig(data, hash->size);
67         head = &hash->table[index];
68
69         rcu_read_lock();
70         hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
71                 if (!compare_eth(tt_common_entry, data))
72                         continue;
73
74                 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
75                         continue;
76
77                 tt_common_entry_tmp = tt_common_entry;
78                 break;
79         }
80         rcu_read_unlock();
81
82         return tt_common_entry_tmp;
83 }
84
85 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
86                                                  const void *data)
87 {
88         struct tt_common_entry *tt_common_entry;
89         struct tt_local_entry *tt_local_entry = NULL;
90
91         tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
92         if (tt_common_entry)
93                 tt_local_entry = container_of(tt_common_entry,
94                                               struct tt_local_entry, common);
95         return tt_local_entry;
96 }
97
98 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
99                                                    const void *data)
100 {
101         struct tt_common_entry *tt_common_entry;
102         struct tt_global_entry *tt_global_entry = NULL;
103
104         tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
105         if (tt_common_entry)
106                 tt_global_entry = container_of(tt_common_entry,
107                                                struct tt_global_entry, common);
108         return tt_global_entry;
109
110 }
111
112 static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
113 {
114         if (atomic_dec_and_test(&tt_local_entry->common.refcount))
115                 kfree_rcu(tt_local_entry, common.rcu);
116 }
117
118 static void tt_global_entry_free_rcu(struct rcu_head *rcu)
119 {
120         struct tt_common_entry *tt_common_entry;
121         struct tt_global_entry *tt_global_entry;
122
123         tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
124         tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
125                                        common);
126
127         kfree(tt_global_entry);
128 }
129
130 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
131 {
132         if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
133                 tt_global_del_orig_list(tt_global_entry);
134                 call_rcu(&tt_global_entry->common.rcu,
135                          tt_global_entry_free_rcu);
136         }
137 }
138
139 static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
140 {
141         struct tt_orig_list_entry *orig_entry;
142
143         orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
144         atomic_dec(&orig_entry->orig_node->tt_size);
145         orig_node_free_ref(orig_entry->orig_node);
146         kfree(orig_entry);
147 }
148
149 static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
150 {
151         call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
152 }
153
154 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
155                            uint8_t flags)
156 {
157         struct tt_change_node *tt_change_node;
158
159         tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
160
161         if (!tt_change_node)
162                 return;
163
164         tt_change_node->change.flags = flags;
165         memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
166
167         spin_lock_bh(&bat_priv->tt_changes_list_lock);
168         /* track the change in the OGMinterval list */
169         list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
170         atomic_inc(&bat_priv->tt_local_changes);
171         spin_unlock_bh(&bat_priv->tt_changes_list_lock);
172
173         atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
174 }
175
176 int tt_len(int changes_num)
177 {
178         return changes_num * sizeof(struct tt_change);
179 }
180
181 static int tt_local_init(struct bat_priv *bat_priv)
182 {
183         if (bat_priv->tt_local_hash)
184                 return 1;
185
186         bat_priv->tt_local_hash = hash_new(1024);
187
188         if (!bat_priv->tt_local_hash)
189                 return 0;
190
191         return 1;
192 }
193
194 void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
195                   int ifindex)
196 {
197         struct bat_priv *bat_priv = netdev_priv(soft_iface);
198         struct tt_local_entry *tt_local_entry = NULL;
199         struct tt_global_entry *tt_global_entry = NULL;
200         struct hlist_head *head;
201         struct hlist_node *node;
202         struct tt_orig_list_entry *orig_entry;
203         int hash_added;
204
205         tt_local_entry = tt_local_hash_find(bat_priv, addr);
206
207         if (tt_local_entry) {
208                 tt_local_entry->last_seen = jiffies;
209                 goto out;
210         }
211
212         tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
213         if (!tt_local_entry)
214                 goto out;
215
216         bat_dbg(DBG_TT, bat_priv,
217                 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
218                 (uint8_t)atomic_read(&bat_priv->ttvn));
219
220         memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
221         tt_local_entry->common.flags = NO_FLAGS;
222         if (is_wifi_iface(ifindex))
223                 tt_local_entry->common.flags |= TT_CLIENT_WIFI;
224         atomic_set(&tt_local_entry->common.refcount, 2);
225         tt_local_entry->last_seen = jiffies;
226
227         /* the batman interface mac address should never be purged */
228         if (compare_eth(addr, soft_iface->dev_addr))
229                 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
230
231         /* The local entry has to be marked as NEW to avoid to send it in
232          * a full table response going out before the next ttvn increment
233          * (consistency check) */
234         tt_local_entry->common.flags |= TT_CLIENT_NEW;
235
236         hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
237                          &tt_local_entry->common,
238                          &tt_local_entry->common.hash_entry);
239
240         if (unlikely(hash_added != 0)) {
241                 /* remove the reference for the hash */
242                 tt_local_entry_free_ref(tt_local_entry);
243                 goto out;
244         }
245
246         tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
247
248         /* remove address from global hash if present */
249         tt_global_entry = tt_global_hash_find(bat_priv, addr);
250
251         /* Check whether it is a roaming! */
252         if (tt_global_entry) {
253                 /* These node are probably going to update their tt table */
254                 head = &tt_global_entry->orig_list;
255                 rcu_read_lock();
256                 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
257                         orig_entry->orig_node->tt_poss_change = true;
258
259                         send_roam_adv(bat_priv, tt_global_entry->common.addr,
260                                       orig_entry->orig_node);
261                 }
262                 rcu_read_unlock();
263                 /* The global entry has to be marked as ROAMING and
264                  * has to be kept for consistency purpose
265                  */
266                 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
267                 tt_global_entry->roam_at = jiffies;
268         }
269 out:
270         if (tt_local_entry)
271                 tt_local_entry_free_ref(tt_local_entry);
272         if (tt_global_entry)
273                 tt_global_entry_free_ref(tt_global_entry);
274 }
275
276 int tt_changes_fill_buffer(struct bat_priv *bat_priv,
277                            unsigned char *buff, int buff_len)
278 {
279         int count = 0, tot_changes = 0;
280         struct tt_change_node *entry, *safe;
281
282         if (buff_len > 0)
283                 tot_changes = buff_len / tt_len(1);
284
285         spin_lock_bh(&bat_priv->tt_changes_list_lock);
286         atomic_set(&bat_priv->tt_local_changes, 0);
287
288         list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
289                                  list) {
290                 if (count < tot_changes) {
291                         memcpy(buff + tt_len(count),
292                                &entry->change, sizeof(struct tt_change));
293                         count++;
294                 }
295                 list_del(&entry->list);
296                 kfree(entry);
297         }
298         spin_unlock_bh(&bat_priv->tt_changes_list_lock);
299
300         /* Keep the buffer for possible tt_request */
301         spin_lock_bh(&bat_priv->tt_buff_lock);
302         kfree(bat_priv->tt_buff);
303         bat_priv->tt_buff_len = 0;
304         bat_priv->tt_buff = NULL;
305         /* We check whether this new OGM has no changes due to size
306          * problems */
307         if (buff_len > 0) {
308                 /**
309                  * if kmalloc() fails we will reply with the full table
310                  * instead of providing the diff
311                  */
312                 bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
313                 if (bat_priv->tt_buff) {
314                         memcpy(bat_priv->tt_buff, buff, buff_len);
315                         bat_priv->tt_buff_len = buff_len;
316                 }
317         }
318         spin_unlock_bh(&bat_priv->tt_buff_lock);
319
320         return tot_changes;
321 }
322
323 int tt_local_seq_print_text(struct seq_file *seq, void *offset)
324 {
325         struct net_device *net_dev = (struct net_device *)seq->private;
326         struct bat_priv *bat_priv = netdev_priv(net_dev);
327         struct hashtable_t *hash = bat_priv->tt_local_hash;
328         struct tt_common_entry *tt_common_entry;
329         struct hard_iface *primary_if;
330         struct hlist_node *node;
331         struct hlist_head *head;
332         uint32_t i;
333         int ret = 0;
334
335         primary_if = primary_if_get_selected(bat_priv);
336         if (!primary_if) {
337                 ret = seq_printf(seq,
338                                  "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
339                                  net_dev->name);
340                 goto out;
341         }
342
343         if (primary_if->if_status != IF_ACTIVE) {
344                 ret = seq_printf(seq,
345                                  "BATMAN mesh %s disabled - primary interface not active\n",
346                                  net_dev->name);
347                 goto out;
348         }
349
350         seq_printf(seq,
351                    "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
352                    net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
353
354         for (i = 0; i < hash->size; i++) {
355                 head = &hash->table[i];
356
357                 rcu_read_lock();
358                 hlist_for_each_entry_rcu(tt_common_entry, node,
359                                          head, hash_entry) {
360                         seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
361                                    tt_common_entry->addr,
362                                    (tt_common_entry->flags &
363                                     TT_CLIENT_ROAM ? 'R' : '.'),
364                                    (tt_common_entry->flags &
365                                     TT_CLIENT_NOPURGE ? 'P' : '.'),
366                                    (tt_common_entry->flags &
367                                     TT_CLIENT_NEW ? 'N' : '.'),
368                                    (tt_common_entry->flags &
369                                     TT_CLIENT_PENDING ? 'X' : '.'),
370                                    (tt_common_entry->flags &
371                                     TT_CLIENT_WIFI ? 'W' : '.'));
372                 }
373                 rcu_read_unlock();
374         }
375 out:
376         if (primary_if)
377                 hardif_free_ref(primary_if);
378         return ret;
379 }
380
381 static void tt_local_set_pending(struct bat_priv *bat_priv,
382                                  struct tt_local_entry *tt_local_entry,
383                                  uint16_t flags, const char *message)
384 {
385         tt_local_event(bat_priv, tt_local_entry->common.addr,
386                        tt_local_entry->common.flags | flags);
387
388         /* The local client has to be marked as "pending to be removed" but has
389          * to be kept in the table in order to send it in a full table
390          * response issued before the net ttvn increment (consistency check) */
391         tt_local_entry->common.flags |= TT_CLIENT_PENDING;
392
393         bat_dbg(DBG_TT, bat_priv,
394                 "Local tt entry (%pM) pending to be removed: %s\n",
395                 tt_local_entry->common.addr, message);
396 }
397
398 void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
399                      const char *message, bool roaming)
400 {
401         struct tt_local_entry *tt_local_entry = NULL;
402
403         tt_local_entry = tt_local_hash_find(bat_priv, addr);
404         if (!tt_local_entry)
405                 goto out;
406
407         tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
408                              (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message);
409 out:
410         if (tt_local_entry)
411                 tt_local_entry_free_ref(tt_local_entry);
412 }
413
414 static void tt_local_purge(struct bat_priv *bat_priv)
415 {
416         struct hashtable_t *hash = bat_priv->tt_local_hash;
417         struct tt_local_entry *tt_local_entry;
418         struct tt_common_entry *tt_common_entry;
419         struct hlist_node *node, *node_tmp;
420         struct hlist_head *head;
421         spinlock_t *list_lock; /* protects write access to the hash lists */
422         uint32_t i;
423
424         for (i = 0; i < hash->size; i++) {
425                 head = &hash->table[i];
426                 list_lock = &hash->list_locks[i];
427
428                 spin_lock_bh(list_lock);
429                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
430                                           head, hash_entry) {
431                         tt_local_entry = container_of(tt_common_entry,
432                                                       struct tt_local_entry,
433                                                       common);
434                         if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
435                                 continue;
436
437                         /* entry already marked for deletion */
438                         if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
439                                 continue;
440
441                         if (!has_timed_out(tt_local_entry->last_seen,
442                                            TT_LOCAL_TIMEOUT))
443                                 continue;
444
445                         tt_local_set_pending(bat_priv, tt_local_entry,
446                                              TT_CLIENT_DEL, "timed out");
447                 }
448                 spin_unlock_bh(list_lock);
449         }
450
451 }
452
453 static void tt_local_table_free(struct bat_priv *bat_priv)
454 {
455         struct hashtable_t *hash;
456         spinlock_t *list_lock; /* protects write access to the hash lists */
457         struct tt_common_entry *tt_common_entry;
458         struct tt_local_entry *tt_local_entry;
459         struct hlist_node *node, *node_tmp;
460         struct hlist_head *head;
461         uint32_t i;
462
463         if (!bat_priv->tt_local_hash)
464                 return;
465
466         hash = bat_priv->tt_local_hash;
467
468         for (i = 0; i < hash->size; i++) {
469                 head = &hash->table[i];
470                 list_lock = &hash->list_locks[i];
471
472                 spin_lock_bh(list_lock);
473                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
474                                           head, hash_entry) {
475                         hlist_del_rcu(node);
476                         tt_local_entry = container_of(tt_common_entry,
477                                                       struct tt_local_entry,
478                                                       common);
479                         tt_local_entry_free_ref(tt_local_entry);
480                 }
481                 spin_unlock_bh(list_lock);
482         }
483
484         hash_destroy(hash);
485
486         bat_priv->tt_local_hash = NULL;
487 }
488
489 static int tt_global_init(struct bat_priv *bat_priv)
490 {
491         if (bat_priv->tt_global_hash)
492                 return 1;
493
494         bat_priv->tt_global_hash = hash_new(1024);
495
496         if (!bat_priv->tt_global_hash)
497                 return 0;
498
499         return 1;
500 }
501
502 static void tt_changes_list_free(struct bat_priv *bat_priv)
503 {
504         struct tt_change_node *entry, *safe;
505
506         spin_lock_bh(&bat_priv->tt_changes_list_lock);
507
508         list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
509                                  list) {
510                 list_del(&entry->list);
511                 kfree(entry);
512         }
513
514         atomic_set(&bat_priv->tt_local_changes, 0);
515         spin_unlock_bh(&bat_priv->tt_changes_list_lock);
516 }
517
518 /* find out if an orig_node is already in the list of a tt_global_entry.
519  * returns 1 if found, 0 otherwise
520  */
521 static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
522                                      const struct orig_node *orig_node)
523 {
524         struct tt_orig_list_entry *tmp_orig_entry;
525         const struct hlist_head *head;
526         struct hlist_node *node;
527         bool found = false;
528
529         rcu_read_lock();
530         head = &entry->orig_list;
531         hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
532                 if (tmp_orig_entry->orig_node == orig_node) {
533                         found = true;
534                         break;
535                 }
536         }
537         rcu_read_unlock();
538         return found;
539 }
540
541 static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
542                                      struct orig_node *orig_node,
543                                      int ttvn)
544 {
545         struct tt_orig_list_entry *orig_entry;
546
547         orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
548         if (!orig_entry)
549                 return;
550
551         INIT_HLIST_NODE(&orig_entry->list);
552         atomic_inc(&orig_node->refcount);
553         atomic_inc(&orig_node->tt_size);
554         orig_entry->orig_node = orig_node;
555         orig_entry->ttvn = ttvn;
556
557         spin_lock_bh(&tt_global_entry->list_lock);
558         hlist_add_head_rcu(&orig_entry->list,
559                            &tt_global_entry->orig_list);
560         spin_unlock_bh(&tt_global_entry->list_lock);
561 }
562
563 /* caller must hold orig_node refcount */
564 int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
565                   const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
566                   bool wifi)
567 {
568         struct tt_global_entry *tt_global_entry = NULL;
569         int ret = 0;
570         int hash_added;
571
572         tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
573
574         if (!tt_global_entry) {
575                 tt_global_entry = kzalloc(sizeof(*tt_global_entry),
576                                           GFP_ATOMIC);
577                 if (!tt_global_entry)
578                         goto out;
579
580                 memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
581
582                 tt_global_entry->common.flags = NO_FLAGS;
583                 tt_global_entry->roam_at = 0;
584                 atomic_set(&tt_global_entry->common.refcount, 2);
585
586                 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
587                 spin_lock_init(&tt_global_entry->list_lock);
588
589                 hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
590                                  choose_orig, &tt_global_entry->common,
591                                  &tt_global_entry->common.hash_entry);
592
593                 if (unlikely(hash_added != 0)) {
594                         /* remove the reference for the hash */
595                         tt_global_entry_free_ref(tt_global_entry);
596                         goto out_remove;
597                 }
598
599                 tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
600         } else {
601                 /* there is already a global entry, use this one. */
602
603                 /* If there is the TT_CLIENT_ROAM flag set, there is only one
604                  * originator left in the list and we previously received a
605                  * delete + roaming change for this originator.
606                  *
607                  * We should first delete the old originator before adding the
608                  * new one.
609                  */
610                 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
611                         tt_global_del_orig_list(tt_global_entry);
612                         tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
613                         tt_global_entry->roam_at = 0;
614                 }
615
616                 if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
617                         tt_global_add_orig_entry(tt_global_entry, orig_node,
618                                                  ttvn);
619         }
620
621         if (wifi)
622                 tt_global_entry->common.flags |= TT_CLIENT_WIFI;
623
624         bat_dbg(DBG_TT, bat_priv,
625                 "Creating new global tt entry: %pM (via %pM)\n",
626                 tt_global_entry->common.addr, orig_node->orig);
627
628 out_remove:
629         /* remove address from local hash if present */
630         tt_local_remove(bat_priv, tt_global_entry->common.addr,
631                         "global tt received", roaming);
632         ret = 1;
633 out:
634         if (tt_global_entry)
635                 tt_global_entry_free_ref(tt_global_entry);
636         return ret;
637 }
638
639 /* print all orig nodes who announce the address for this global entry.
640  * it is assumed that the caller holds rcu_read_lock();
641  */
642 static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
643                                   struct seq_file *seq)
644 {
645         struct hlist_head *head;
646         struct hlist_node *node;
647         struct tt_orig_list_entry *orig_entry;
648         struct tt_common_entry *tt_common_entry;
649         uint16_t flags;
650         uint8_t last_ttvn;
651
652         tt_common_entry = &tt_global_entry->common;
653
654         head = &tt_global_entry->orig_list;
655
656         hlist_for_each_entry_rcu(orig_entry, node, head, list) {
657                 flags = tt_common_entry->flags;
658                 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
659                 seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
660                            tt_global_entry->common.addr, orig_entry->ttvn,
661                            orig_entry->orig_node->orig, last_ttvn,
662                            (flags & TT_CLIENT_ROAM ? 'R' : '.'),
663                            (flags & TT_CLIENT_WIFI ? 'W' : '.'));
664         }
665 }
666
667 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
668 {
669         struct net_device *net_dev = (struct net_device *)seq->private;
670         struct bat_priv *bat_priv = netdev_priv(net_dev);
671         struct hashtable_t *hash = bat_priv->tt_global_hash;
672         struct tt_common_entry *tt_common_entry;
673         struct tt_global_entry *tt_global_entry;
674         struct hard_iface *primary_if;
675         struct hlist_node *node;
676         struct hlist_head *head;
677         uint32_t i;
678         int ret = 0;
679
680         primary_if = primary_if_get_selected(bat_priv);
681         if (!primary_if) {
682                 ret = seq_printf(seq,
683                                  "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
684                                  net_dev->name);
685                 goto out;
686         }
687
688         if (primary_if->if_status != IF_ACTIVE) {
689                 ret = seq_printf(seq,
690                                  "BATMAN mesh %s disabled - primary interface not active\n",
691                                  net_dev->name);
692                 goto out;
693         }
694
695         seq_printf(seq,
696                    "Globally announced TT entries received via the mesh %s\n",
697                    net_dev->name);
698         seq_printf(seq, "       %-13s %s       %-15s %s %s\n",
699                    "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
700
701         for (i = 0; i < hash->size; i++) {
702                 head = &hash->table[i];
703
704                 rcu_read_lock();
705                 hlist_for_each_entry_rcu(tt_common_entry, node,
706                                          head, hash_entry) {
707                         tt_global_entry = container_of(tt_common_entry,
708                                                        struct tt_global_entry,
709                                                        common);
710                         tt_global_print_entry(tt_global_entry, seq);
711                 }
712                 rcu_read_unlock();
713         }
714 out:
715         if (primary_if)
716                 hardif_free_ref(primary_if);
717         return ret;
718 }
719
720 /* deletes the orig list of a tt_global_entry */
721 static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
722 {
723         struct hlist_head *head;
724         struct hlist_node *node, *safe;
725         struct tt_orig_list_entry *orig_entry;
726
727         spin_lock_bh(&tt_global_entry->list_lock);
728         head = &tt_global_entry->orig_list;
729         hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
730                 hlist_del_rcu(node);
731                 tt_orig_list_entry_free_ref(orig_entry);
732         }
733         spin_unlock_bh(&tt_global_entry->list_lock);
734
735 }
736
737 static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
738                                      struct tt_global_entry *tt_global_entry,
739                                      struct orig_node *orig_node,
740                                      const char *message)
741 {
742         struct hlist_head *head;
743         struct hlist_node *node, *safe;
744         struct tt_orig_list_entry *orig_entry;
745
746         spin_lock_bh(&tt_global_entry->list_lock);
747         head = &tt_global_entry->orig_list;
748         hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
749                 if (orig_entry->orig_node == orig_node) {
750                         bat_dbg(DBG_TT, bat_priv,
751                                 "Deleting %pM from global tt entry %pM: %s\n",
752                                 orig_node->orig, tt_global_entry->common.addr,
753                                 message);
754                         hlist_del_rcu(node);
755                         tt_orig_list_entry_free_ref(orig_entry);
756                 }
757         }
758         spin_unlock_bh(&tt_global_entry->list_lock);
759 }
760
761 static void tt_global_del_struct(struct bat_priv *bat_priv,
762                                  struct tt_global_entry *tt_global_entry,
763                                  const char *message)
764 {
765         bat_dbg(DBG_TT, bat_priv,
766                 "Deleting global tt entry %pM: %s\n",
767                 tt_global_entry->common.addr, message);
768
769         hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
770                     tt_global_entry->common.addr);
771         tt_global_entry_free_ref(tt_global_entry);
772
773 }
774
775 /* If the client is to be deleted, we check if it is the last origantor entry
776  * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
777  * otherwise we simply remove the originator scheduled for deletion.
778  */
779 static void tt_global_del_roaming(struct bat_priv *bat_priv,
780                                   struct tt_global_entry *tt_global_entry,
781                                   struct orig_node *orig_node,
782                                   const char *message)
783 {
784         bool last_entry = true;
785         struct hlist_head *head;
786         struct hlist_node *node;
787         struct tt_orig_list_entry *orig_entry;
788
789         /* no local entry exists, case 1:
790          * Check if this is the last one or if other entries exist.
791          */
792
793         rcu_read_lock();
794         head = &tt_global_entry->orig_list;
795         hlist_for_each_entry_rcu(orig_entry, node, head, list) {
796                 if (orig_entry->orig_node != orig_node) {
797                         last_entry = false;
798                         break;
799                 }
800         }
801         rcu_read_unlock();
802
803         if (last_entry) {
804                 /* its the last one, mark for roaming. */
805                 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
806                 tt_global_entry->roam_at = jiffies;
807         } else
808                 /* there is another entry, we can simply delete this
809                  * one and can still use the other one.
810                  */
811                 tt_global_del_orig_entry(bat_priv, tt_global_entry,
812                                          orig_node, message);
813 }
814
815
816
817 static void tt_global_del(struct bat_priv *bat_priv,
818                           struct orig_node *orig_node,
819                           const unsigned char *addr,
820                           const char *message, bool roaming)
821 {
822         struct tt_global_entry *tt_global_entry = NULL;
823         struct tt_local_entry *tt_local_entry = NULL;
824
825         tt_global_entry = tt_global_hash_find(bat_priv, addr);
826         if (!tt_global_entry)
827                 goto out;
828
829         if (!roaming) {
830                 tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
831                                          message);
832
833                 if (hlist_empty(&tt_global_entry->orig_list))
834                         tt_global_del_struct(bat_priv, tt_global_entry,
835                                              message);
836
837                 goto out;
838         }
839
840         /* if we are deleting a global entry due to a roam
841          * event, there are two possibilities:
842          * 1) the client roamed from node A to node B => if there
843          *    is only one originator left for this client, we mark
844          *    it with TT_CLIENT_ROAM, we start a timer and we
845          *    wait for node B to claim it. In case of timeout
846          *    the entry is purged.
847          *
848          *    If there are other originators left, we directly delete
849          *    the originator.
850          * 2) the client roamed to us => we can directly delete
851          *    the global entry, since it is useless now. */
852
853         tt_local_entry = tt_local_hash_find(bat_priv,
854                                             tt_global_entry->common.addr);
855         if (tt_local_entry) {
856                 /* local entry exists, case 2: client roamed to us. */
857                 tt_global_del_orig_list(tt_global_entry);
858                 tt_global_del_struct(bat_priv, tt_global_entry, message);
859         } else
860                 /* no local entry exists, case 1: check for roaming */
861                 tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
862                                       message);
863
864
865 out:
866         if (tt_global_entry)
867                 tt_global_entry_free_ref(tt_global_entry);
868         if (tt_local_entry)
869                 tt_local_entry_free_ref(tt_local_entry);
870 }
871
872 void tt_global_del_orig(struct bat_priv *bat_priv,
873                         struct orig_node *orig_node, const char *message)
874 {
875         struct tt_global_entry *tt_global_entry;
876         struct tt_common_entry *tt_common_entry;
877         uint32_t i;
878         struct hashtable_t *hash = bat_priv->tt_global_hash;
879         struct hlist_node *node, *safe;
880         struct hlist_head *head;
881         spinlock_t *list_lock; /* protects write access to the hash lists */
882
883         if (!hash)
884                 return;
885
886         for (i = 0; i < hash->size; i++) {
887                 head = &hash->table[i];
888                 list_lock = &hash->list_locks[i];
889
890                 spin_lock_bh(list_lock);
891                 hlist_for_each_entry_safe(tt_common_entry, node, safe,
892                                           head, hash_entry) {
893                         tt_global_entry = container_of(tt_common_entry,
894                                                        struct tt_global_entry,
895                                                        common);
896
897                         tt_global_del_orig_entry(bat_priv, tt_global_entry,
898                                                  orig_node, message);
899
900                         if (hlist_empty(&tt_global_entry->orig_list)) {
901                                 bat_dbg(DBG_TT, bat_priv,
902                                         "Deleting global tt entry %pM: %s\n",
903                                         tt_global_entry->common.addr,
904                                         message);
905                                 hlist_del_rcu(node);
906                                 tt_global_entry_free_ref(tt_global_entry);
907                         }
908                 }
909                 spin_unlock_bh(list_lock);
910         }
911         atomic_set(&orig_node->tt_size, 0);
912         orig_node->tt_initialised = false;
913 }
914
915 static void tt_global_roam_purge(struct bat_priv *bat_priv)
916 {
917         struct hashtable_t *hash = bat_priv->tt_global_hash;
918         struct tt_common_entry *tt_common_entry;
919         struct tt_global_entry *tt_global_entry;
920         struct hlist_node *node, *node_tmp;
921         struct hlist_head *head;
922         spinlock_t *list_lock; /* protects write access to the hash lists */
923         uint32_t i;
924
925         for (i = 0; i < hash->size; i++) {
926                 head = &hash->table[i];
927                 list_lock = &hash->list_locks[i];
928
929                 spin_lock_bh(list_lock);
930                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
931                                           head, hash_entry) {
932                         tt_global_entry = container_of(tt_common_entry,
933                                                        struct tt_global_entry,
934                                                        common);
935                         if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
936                                 continue;
937                         if (!has_timed_out(tt_global_entry->roam_at,
938                                            TT_CLIENT_ROAM_TIMEOUT))
939                                 continue;
940
941                         bat_dbg(DBG_TT, bat_priv,
942                                 "Deleting global tt entry (%pM): Roaming timeout\n",
943                                 tt_global_entry->common.addr);
944
945                         hlist_del_rcu(node);
946                         tt_global_entry_free_ref(tt_global_entry);
947                 }
948                 spin_unlock_bh(list_lock);
949         }
950
951 }
952
953 static void tt_global_table_free(struct bat_priv *bat_priv)
954 {
955         struct hashtable_t *hash;
956         spinlock_t *list_lock; /* protects write access to the hash lists */
957         struct tt_common_entry *tt_common_entry;
958         struct tt_global_entry *tt_global_entry;
959         struct hlist_node *node, *node_tmp;
960         struct hlist_head *head;
961         uint32_t i;
962
963         if (!bat_priv->tt_global_hash)
964                 return;
965
966         hash = bat_priv->tt_global_hash;
967
968         for (i = 0; i < hash->size; i++) {
969                 head = &hash->table[i];
970                 list_lock = &hash->list_locks[i];
971
972                 spin_lock_bh(list_lock);
973                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
974                                           head, hash_entry) {
975                         hlist_del_rcu(node);
976                         tt_global_entry = container_of(tt_common_entry,
977                                                        struct tt_global_entry,
978                                                        common);
979                         tt_global_entry_free_ref(tt_global_entry);
980                 }
981                 spin_unlock_bh(list_lock);
982         }
983
984         hash_destroy(hash);
985
986         bat_priv->tt_global_hash = NULL;
987 }
988
989 static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
990                             struct tt_global_entry *tt_global_entry)
991 {
992         bool ret = false;
993
994         if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
995             tt_global_entry->common.flags & TT_CLIENT_WIFI)
996                 ret = true;
997
998         return ret;
999 }
1000
1001 struct orig_node *transtable_search(struct bat_priv *bat_priv,
1002                                     const uint8_t *src, const uint8_t *addr)
1003 {
1004         struct tt_local_entry *tt_local_entry = NULL;
1005         struct tt_global_entry *tt_global_entry = NULL;
1006         struct orig_node *orig_node = NULL;
1007         struct neigh_node *router = NULL;
1008         struct hlist_head *head;
1009         struct hlist_node *node;
1010         struct tt_orig_list_entry *orig_entry;
1011         int best_tq;
1012
1013         if (src && atomic_read(&bat_priv->ap_isolation)) {
1014                 tt_local_entry = tt_local_hash_find(bat_priv, src);
1015                 if (!tt_local_entry)
1016                         goto out;
1017         }
1018
1019         tt_global_entry = tt_global_hash_find(bat_priv, addr);
1020         if (!tt_global_entry)
1021                 goto out;
1022
1023         /* check whether the clients should not communicate due to AP
1024          * isolation */
1025         if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
1026                 goto out;
1027
1028         best_tq = 0;
1029
1030         rcu_read_lock();
1031         head = &tt_global_entry->orig_list;
1032         hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1033                 router = orig_node_get_router(orig_entry->orig_node);
1034                 if (!router)
1035                         continue;
1036
1037                 if (router->tq_avg > best_tq) {
1038                         orig_node = orig_entry->orig_node;
1039                         best_tq = router->tq_avg;
1040                 }
1041                 neigh_node_free_ref(router);
1042         }
1043         /* found anything? */
1044         if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1045                 orig_node = NULL;
1046         rcu_read_unlock();
1047 out:
1048         if (tt_global_entry)
1049                 tt_global_entry_free_ref(tt_global_entry);
1050         if (tt_local_entry)
1051                 tt_local_entry_free_ref(tt_local_entry);
1052
1053         return orig_node;
1054 }
1055
1056 /* Calculates the checksum of the local table of a given orig_node */
1057 static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1058                               struct orig_node *orig_node)
1059 {
1060         uint16_t total = 0, total_one;
1061         struct hashtable_t *hash = bat_priv->tt_global_hash;
1062         struct tt_common_entry *tt_common_entry;
1063         struct tt_global_entry *tt_global_entry;
1064         struct hlist_node *node;
1065         struct hlist_head *head;
1066         uint32_t i;
1067         int j;
1068
1069         for (i = 0; i < hash->size; i++) {
1070                 head = &hash->table[i];
1071
1072                 rcu_read_lock();
1073                 hlist_for_each_entry_rcu(tt_common_entry, node,
1074                                          head, hash_entry) {
1075                         tt_global_entry = container_of(tt_common_entry,
1076                                                        struct tt_global_entry,
1077                                                        common);
1078                         /* Roaming clients are in the global table for
1079                          * consistency only. They don't have to be
1080                          * taken into account while computing the
1081                          * global crc
1082                          */
1083                         if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
1084                                 continue;
1085
1086                         /* find out if this global entry is announced by this
1087                          * originator
1088                          */
1089                         if (!tt_global_entry_has_orig(tt_global_entry,
1090                                                       orig_node))
1091                                 continue;
1092
1093                         total_one = 0;
1094                         for (j = 0; j < ETH_ALEN; j++)
1095                                 total_one = crc16_byte(total_one,
1096                                         tt_global_entry->common.addr[j]);
1097                         total ^= total_one;
1098                 }
1099                 rcu_read_unlock();
1100         }
1101
1102         return total;
1103 }
1104
1105 /* Calculates the checksum of the local table */
1106 uint16_t tt_local_crc(struct bat_priv *bat_priv)
1107 {
1108         uint16_t total = 0, total_one;
1109         struct hashtable_t *hash = bat_priv->tt_local_hash;
1110         struct tt_common_entry *tt_common_entry;
1111         struct hlist_node *node;
1112         struct hlist_head *head;
1113         uint32_t i;
1114         int j;
1115
1116         for (i = 0; i < hash->size; i++) {
1117                 head = &hash->table[i];
1118
1119                 rcu_read_lock();
1120                 hlist_for_each_entry_rcu(tt_common_entry, node,
1121                                          head, hash_entry) {
1122                         /* not yet committed clients have not to be taken into
1123                          * account while computing the CRC */
1124                         if (tt_common_entry->flags & TT_CLIENT_NEW)
1125                                 continue;
1126                         total_one = 0;
1127                         for (j = 0; j < ETH_ALEN; j++)
1128                                 total_one = crc16_byte(total_one,
1129                                                    tt_common_entry->addr[j]);
1130                         total ^= total_one;
1131                 }
1132                 rcu_read_unlock();
1133         }
1134
1135         return total;
1136 }
1137
1138 static void tt_req_list_free(struct bat_priv *bat_priv)
1139 {
1140         struct tt_req_node *node, *safe;
1141
1142         spin_lock_bh(&bat_priv->tt_req_list_lock);
1143
1144         list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1145                 list_del(&node->list);
1146                 kfree(node);
1147         }
1148
1149         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1150 }
1151
1152 static void tt_save_orig_buffer(struct bat_priv *bat_priv,
1153                                 struct orig_node *orig_node,
1154                                 const unsigned char *tt_buff,
1155                                 uint8_t tt_num_changes)
1156 {
1157         uint16_t tt_buff_len = tt_len(tt_num_changes);
1158
1159         /* Replace the old buffer only if I received something in the
1160          * last OGM (the OGM could carry no changes) */
1161         spin_lock_bh(&orig_node->tt_buff_lock);
1162         if (tt_buff_len > 0) {
1163                 kfree(orig_node->tt_buff);
1164                 orig_node->tt_buff_len = 0;
1165                 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1166                 if (orig_node->tt_buff) {
1167                         memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1168                         orig_node->tt_buff_len = tt_buff_len;
1169                 }
1170         }
1171         spin_unlock_bh(&orig_node->tt_buff_lock);
1172 }
1173
1174 static void tt_req_purge(struct bat_priv *bat_priv)
1175 {
1176         struct tt_req_node *node, *safe;
1177
1178         spin_lock_bh(&bat_priv->tt_req_list_lock);
1179         list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1180                 if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
1181                         list_del(&node->list);
1182                         kfree(node);
1183                 }
1184         }
1185         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1186 }
1187
1188 /* returns the pointer to the new tt_req_node struct if no request
1189  * has already been issued for this orig_node, NULL otherwise */
1190 static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
1191                                           struct orig_node *orig_node)
1192 {
1193         struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1194
1195         spin_lock_bh(&bat_priv->tt_req_list_lock);
1196         list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
1197                 if (compare_eth(tt_req_node_tmp, orig_node) &&
1198                     !has_timed_out(tt_req_node_tmp->issued_at,
1199                                    TT_REQUEST_TIMEOUT))
1200                         goto unlock;
1201         }
1202
1203         tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1204         if (!tt_req_node)
1205                 goto unlock;
1206
1207         memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1208         tt_req_node->issued_at = jiffies;
1209
1210         list_add(&tt_req_node->list, &bat_priv->tt_req_list);
1211 unlock:
1212         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1213         return tt_req_node;
1214 }
1215
1216 /* data_ptr is useless here, but has to be kept to respect the prototype */
1217 static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
1218 {
1219         const struct tt_common_entry *tt_common_entry = entry_ptr;
1220
1221         if (tt_common_entry->flags & TT_CLIENT_NEW)
1222                 return 0;
1223         return 1;
1224 }
1225
1226 static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
1227 {
1228         const struct tt_common_entry *tt_common_entry = entry_ptr;
1229         const struct tt_global_entry *tt_global_entry;
1230         const struct orig_node *orig_node = data_ptr;
1231
1232         if (tt_common_entry->flags & TT_CLIENT_ROAM)
1233                 return 0;
1234
1235         tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1236                                        common);
1237
1238         return tt_global_entry_has_orig(tt_global_entry, orig_node);
1239 }
1240
1241 static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1242                                               struct hashtable_t *hash,
1243                                               struct hard_iface *primary_if,
1244                                               int (*valid_cb)(const void *,
1245                                                               const void *),
1246                                               void *cb_data)
1247 {
1248         struct tt_common_entry *tt_common_entry;
1249         struct tt_query_packet *tt_response;
1250         struct tt_change *tt_change;
1251         struct hlist_node *node;
1252         struct hlist_head *head;
1253         struct sk_buff *skb = NULL;
1254         uint16_t tt_tot, tt_count;
1255         ssize_t tt_query_size = sizeof(struct tt_query_packet);
1256         uint32_t i;
1257
1258         if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1259                 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1260                 tt_len -= tt_len % sizeof(struct tt_change);
1261         }
1262         tt_tot = tt_len / sizeof(struct tt_change);
1263
1264         skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
1265         if (!skb)
1266                 goto out;
1267
1268         skb_reserve(skb, ETH_HLEN);
1269         tt_response = (struct tt_query_packet *)skb_put(skb,
1270                                                      tt_query_size + tt_len);
1271         tt_response->ttvn = ttvn;
1272
1273         tt_change = (struct tt_change *)(skb->data + tt_query_size);
1274         tt_count = 0;
1275
1276         rcu_read_lock();
1277         for (i = 0; i < hash->size; i++) {
1278                 head = &hash->table[i];
1279
1280                 hlist_for_each_entry_rcu(tt_common_entry, node,
1281                                          head, hash_entry) {
1282                         if (tt_count == tt_tot)
1283                                 break;
1284
1285                         if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
1286                                 continue;
1287
1288                         memcpy(tt_change->addr, tt_common_entry->addr,
1289                                ETH_ALEN);
1290                         tt_change->flags = NO_FLAGS;
1291
1292                         tt_count++;
1293                         tt_change++;
1294                 }
1295         }
1296         rcu_read_unlock();
1297
1298         /* store in the message the number of entries we have successfully
1299          * copied */
1300         tt_response->tt_data = htons(tt_count);
1301
1302 out:
1303         return skb;
1304 }
1305
1306 static int send_tt_request(struct bat_priv *bat_priv,
1307                            struct orig_node *dst_orig_node,
1308                            uint8_t ttvn, uint16_t tt_crc, bool full_table)
1309 {
1310         struct sk_buff *skb = NULL;
1311         struct tt_query_packet *tt_request;
1312         struct neigh_node *neigh_node = NULL;
1313         struct hard_iface *primary_if;
1314         struct tt_req_node *tt_req_node = NULL;
1315         int ret = 1;
1316
1317         primary_if = primary_if_get_selected(bat_priv);
1318         if (!primary_if)
1319                 goto out;
1320
1321         /* The new tt_req will be issued only if I'm not waiting for a
1322          * reply from the same orig_node yet */
1323         tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
1324         if (!tt_req_node)
1325                 goto out;
1326
1327         skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1328         if (!skb)
1329                 goto out;
1330
1331         skb_reserve(skb, ETH_HLEN);
1332
1333         tt_request = (struct tt_query_packet *)skb_put(skb,
1334                                 sizeof(struct tt_query_packet));
1335
1336         tt_request->header.packet_type = BAT_TT_QUERY;
1337         tt_request->header.version = COMPAT_VERSION;
1338         memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1339         memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1340         tt_request->header.ttl = TTL;
1341         tt_request->ttvn = ttvn;
1342         tt_request->tt_data = htons(tt_crc);
1343         tt_request->flags = TT_REQUEST;
1344
1345         if (full_table)
1346                 tt_request->flags |= TT_FULL_TABLE;
1347
1348         neigh_node = orig_node_get_router(dst_orig_node);
1349         if (!neigh_node)
1350                 goto out;
1351
1352         bat_dbg(DBG_TT, bat_priv,
1353                 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1354                 dst_orig_node->orig, neigh_node->addr,
1355                 (full_table ? 'F' : '.'));
1356
1357         send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1358         ret = 0;
1359
1360 out:
1361         if (neigh_node)
1362                 neigh_node_free_ref(neigh_node);
1363         if (primary_if)
1364                 hardif_free_ref(primary_if);
1365         if (ret)
1366                 kfree_skb(skb);
1367         if (ret && tt_req_node) {
1368                 spin_lock_bh(&bat_priv->tt_req_list_lock);
1369                 list_del(&tt_req_node->list);
1370                 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1371                 kfree(tt_req_node);
1372         }
1373         return ret;
1374 }
1375
1376 static bool send_other_tt_response(struct bat_priv *bat_priv,
1377                                    struct tt_query_packet *tt_request)
1378 {
1379         struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1380         struct neigh_node *neigh_node = NULL;
1381         struct hard_iface *primary_if = NULL;
1382         uint8_t orig_ttvn, req_ttvn, ttvn;
1383         int ret = false;
1384         unsigned char *tt_buff;
1385         bool full_table;
1386         uint16_t tt_len, tt_tot;
1387         struct sk_buff *skb = NULL;
1388         struct tt_query_packet *tt_response;
1389
1390         bat_dbg(DBG_TT, bat_priv,
1391                 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1392                 tt_request->src, tt_request->ttvn, tt_request->dst,
1393                 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1394
1395         /* Let's get the orig node of the REAL destination */
1396         req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
1397         if (!req_dst_orig_node)
1398                 goto out;
1399
1400         res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
1401         if (!res_dst_orig_node)
1402                 goto out;
1403
1404         neigh_node = orig_node_get_router(res_dst_orig_node);
1405         if (!neigh_node)
1406                 goto out;
1407
1408         primary_if = primary_if_get_selected(bat_priv);
1409         if (!primary_if)
1410                 goto out;
1411
1412         orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1413         req_ttvn = tt_request->ttvn;
1414
1415         /* I don't have the requested data */
1416         if (orig_ttvn != req_ttvn ||
1417             tt_request->tt_data != req_dst_orig_node->tt_crc)
1418                 goto out;
1419
1420         /* If the full table has been explicitly requested */
1421         if (tt_request->flags & TT_FULL_TABLE ||
1422             !req_dst_orig_node->tt_buff)
1423                 full_table = true;
1424         else
1425                 full_table = false;
1426
1427         /* In this version, fragmentation is not implemented, then
1428          * I'll send only one packet with as much TT entries as I can */
1429         if (!full_table) {
1430                 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1431                 tt_len = req_dst_orig_node->tt_buff_len;
1432                 tt_tot = tt_len / sizeof(struct tt_change);
1433
1434                 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1435                                     tt_len + ETH_HLEN);
1436                 if (!skb)
1437                         goto unlock;
1438
1439                 skb_reserve(skb, ETH_HLEN);
1440                 tt_response = (struct tt_query_packet *)skb_put(skb,
1441                                 sizeof(struct tt_query_packet) + tt_len);
1442                 tt_response->ttvn = req_ttvn;
1443                 tt_response->tt_data = htons(tt_tot);
1444
1445                 tt_buff = skb->data + sizeof(struct tt_query_packet);
1446                 /* Copy the last orig_node's OGM buffer */
1447                 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1448                        req_dst_orig_node->tt_buff_len);
1449
1450                 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1451         } else {
1452                 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1453                                                 sizeof(struct tt_change);
1454                 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1455
1456                 skb = tt_response_fill_table(tt_len, ttvn,
1457                                              bat_priv->tt_global_hash,
1458                                              primary_if, tt_global_valid_entry,
1459                                              req_dst_orig_node);
1460                 if (!skb)
1461                         goto out;
1462
1463                 tt_response = (struct tt_query_packet *)skb->data;
1464         }
1465
1466         tt_response->header.packet_type = BAT_TT_QUERY;
1467         tt_response->header.version = COMPAT_VERSION;
1468         tt_response->header.ttl = TTL;
1469         memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1470         memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1471         tt_response->flags = TT_RESPONSE;
1472
1473         if (full_table)
1474                 tt_response->flags |= TT_FULL_TABLE;
1475
1476         bat_dbg(DBG_TT, bat_priv,
1477                 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1478                 res_dst_orig_node->orig, neigh_node->addr,
1479                 req_dst_orig_node->orig, req_ttvn);
1480
1481         send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1482         ret = true;
1483         goto out;
1484
1485 unlock:
1486         spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1487
1488 out:
1489         if (res_dst_orig_node)
1490                 orig_node_free_ref(res_dst_orig_node);
1491         if (req_dst_orig_node)
1492                 orig_node_free_ref(req_dst_orig_node);
1493         if (neigh_node)
1494                 neigh_node_free_ref(neigh_node);
1495         if (primary_if)
1496                 hardif_free_ref(primary_if);
1497         if (!ret)
1498                 kfree_skb(skb);
1499         return ret;
1500
1501 }
1502 static bool send_my_tt_response(struct bat_priv *bat_priv,
1503                                 struct tt_query_packet *tt_request)
1504 {
1505         struct orig_node *orig_node = NULL;
1506         struct neigh_node *neigh_node = NULL;
1507         struct hard_iface *primary_if = NULL;
1508         uint8_t my_ttvn, req_ttvn, ttvn;
1509         int ret = false;
1510         unsigned char *tt_buff;
1511         bool full_table;
1512         uint16_t tt_len, tt_tot;
1513         struct sk_buff *skb = NULL;
1514         struct tt_query_packet *tt_response;
1515
1516         bat_dbg(DBG_TT, bat_priv,
1517                 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1518                 tt_request->src, tt_request->ttvn,
1519                 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1520
1521
1522         my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1523         req_ttvn = tt_request->ttvn;
1524
1525         orig_node = orig_hash_find(bat_priv, tt_request->src);
1526         if (!orig_node)
1527                 goto out;
1528
1529         neigh_node = orig_node_get_router(orig_node);
1530         if (!neigh_node)
1531                 goto out;
1532
1533         primary_if = primary_if_get_selected(bat_priv);
1534         if (!primary_if)
1535                 goto out;
1536
1537         /* If the full table has been explicitly requested or the gap
1538          * is too big send the whole local translation table */
1539         if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1540             !bat_priv->tt_buff)
1541                 full_table = true;
1542         else
1543                 full_table = false;
1544
1545         /* In this version, fragmentation is not implemented, then
1546          * I'll send only one packet with as much TT entries as I can */
1547         if (!full_table) {
1548                 spin_lock_bh(&bat_priv->tt_buff_lock);
1549                 tt_len = bat_priv->tt_buff_len;
1550                 tt_tot = tt_len / sizeof(struct tt_change);
1551
1552                 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1553                                     tt_len + ETH_HLEN);
1554                 if (!skb)
1555                         goto unlock;
1556
1557                 skb_reserve(skb, ETH_HLEN);
1558                 tt_response = (struct tt_query_packet *)skb_put(skb,
1559                                 sizeof(struct tt_query_packet) + tt_len);
1560                 tt_response->ttvn = req_ttvn;
1561                 tt_response->tt_data = htons(tt_tot);
1562
1563                 tt_buff = skb->data + sizeof(struct tt_query_packet);
1564                 memcpy(tt_buff, bat_priv->tt_buff,
1565                        bat_priv->tt_buff_len);
1566                 spin_unlock_bh(&bat_priv->tt_buff_lock);
1567         } else {
1568                 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1569                                                 sizeof(struct tt_change);
1570                 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1571
1572                 skb = tt_response_fill_table(tt_len, ttvn,
1573                                              bat_priv->tt_local_hash,
1574                                              primary_if, tt_local_valid_entry,
1575                                              NULL);
1576                 if (!skb)
1577                         goto out;
1578
1579                 tt_response = (struct tt_query_packet *)skb->data;
1580         }
1581
1582         tt_response->header.packet_type = BAT_TT_QUERY;
1583         tt_response->header.version = COMPAT_VERSION;
1584         tt_response->header.ttl = TTL;
1585         memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1586         memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1587         tt_response->flags = TT_RESPONSE;
1588
1589         if (full_table)
1590                 tt_response->flags |= TT_FULL_TABLE;
1591
1592         bat_dbg(DBG_TT, bat_priv,
1593                 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1594                 orig_node->orig, neigh_node->addr,
1595                 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1596
1597         send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1598         ret = true;
1599         goto out;
1600
1601 unlock:
1602         spin_unlock_bh(&bat_priv->tt_buff_lock);
1603 out:
1604         if (orig_node)
1605                 orig_node_free_ref(orig_node);
1606         if (neigh_node)
1607                 neigh_node_free_ref(neigh_node);
1608         if (primary_if)
1609                 hardif_free_ref(primary_if);
1610         if (!ret)
1611                 kfree_skb(skb);
1612         /* This packet was for me, so it doesn't need to be re-routed */
1613         return true;
1614 }
1615
1616 bool send_tt_response(struct bat_priv *bat_priv,
1617                       struct tt_query_packet *tt_request)
1618 {
1619         if (is_my_mac(tt_request->dst)) {
1620                 /* don't answer backbone gws! */
1621                 if (bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1622                         return true;
1623
1624                 return send_my_tt_response(bat_priv, tt_request);
1625         } else {
1626                 return send_other_tt_response(bat_priv, tt_request);
1627         }
1628 }
1629
1630 static void _tt_update_changes(struct bat_priv *bat_priv,
1631                                struct orig_node *orig_node,
1632                                struct tt_change *tt_change,
1633                                uint16_t tt_num_changes, uint8_t ttvn)
1634 {
1635         int i;
1636
1637         for (i = 0; i < tt_num_changes; i++) {
1638                 if ((tt_change + i)->flags & TT_CLIENT_DEL)
1639                         tt_global_del(bat_priv, orig_node,
1640                                       (tt_change + i)->addr,
1641                                       "tt removed by changes",
1642                                       (tt_change + i)->flags & TT_CLIENT_ROAM);
1643                 else
1644                         if (!tt_global_add(bat_priv, orig_node,
1645                                            (tt_change + i)->addr, ttvn, false,
1646                                            (tt_change + i)->flags &
1647                                                         TT_CLIENT_WIFI))
1648                                 /* In case of problem while storing a
1649                                  * global_entry, we stop the updating
1650                                  * procedure without committing the
1651                                  * ttvn change. This will avoid to send
1652                                  * corrupted data on tt_request
1653                                  */
1654                                 return;
1655         }
1656         orig_node->tt_initialised = true;
1657 }
1658
1659 static void tt_fill_gtable(struct bat_priv *bat_priv,
1660                            struct tt_query_packet *tt_response)
1661 {
1662         struct orig_node *orig_node = NULL;
1663
1664         orig_node = orig_hash_find(bat_priv, tt_response->src);
1665         if (!orig_node)
1666                 goto out;
1667
1668         /* Purge the old table first.. */
1669         tt_global_del_orig(bat_priv, orig_node, "Received full table");
1670
1671         _tt_update_changes(bat_priv, orig_node,
1672                            (struct tt_change *)(tt_response + 1),
1673                            tt_response->tt_data, tt_response->ttvn);
1674
1675         spin_lock_bh(&orig_node->tt_buff_lock);
1676         kfree(orig_node->tt_buff);
1677         orig_node->tt_buff_len = 0;
1678         orig_node->tt_buff = NULL;
1679         spin_unlock_bh(&orig_node->tt_buff_lock);
1680
1681         atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1682
1683 out:
1684         if (orig_node)
1685                 orig_node_free_ref(orig_node);
1686 }
1687
1688 static void tt_update_changes(struct bat_priv *bat_priv,
1689                               struct orig_node *orig_node,
1690                               uint16_t tt_num_changes, uint8_t ttvn,
1691                               struct tt_change *tt_change)
1692 {
1693         _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1694                            ttvn);
1695
1696         tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
1697                             tt_num_changes);
1698         atomic_set(&orig_node->last_ttvn, ttvn);
1699 }
1700
1701 bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1702 {
1703         struct tt_local_entry *tt_local_entry = NULL;
1704         bool ret = false;
1705
1706         tt_local_entry = tt_local_hash_find(bat_priv, addr);
1707         if (!tt_local_entry)
1708                 goto out;
1709         /* Check if the client has been logically deleted (but is kept for
1710          * consistency purpose) */
1711         if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
1712                 goto out;
1713         ret = true;
1714 out:
1715         if (tt_local_entry)
1716                 tt_local_entry_free_ref(tt_local_entry);
1717         return ret;
1718 }
1719
1720 void handle_tt_response(struct bat_priv *bat_priv,
1721                         struct tt_query_packet *tt_response)
1722 {
1723         struct tt_req_node *node, *safe;
1724         struct orig_node *orig_node = NULL;
1725
1726         bat_dbg(DBG_TT, bat_priv,
1727                 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1728                 tt_response->src, tt_response->ttvn, tt_response->tt_data,
1729                 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1730
1731         /* we should have never asked a backbone gw */
1732         if (bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1733                 goto out;
1734
1735         orig_node = orig_hash_find(bat_priv, tt_response->src);
1736         if (!orig_node)
1737                 goto out;
1738
1739         if (tt_response->flags & TT_FULL_TABLE)
1740                 tt_fill_gtable(bat_priv, tt_response);
1741         else
1742                 tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
1743                                   tt_response->ttvn,
1744                                   (struct tt_change *)(tt_response + 1));
1745
1746         /* Delete the tt_req_node from pending tt_requests list */
1747         spin_lock_bh(&bat_priv->tt_req_list_lock);
1748         list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1749                 if (!compare_eth(node->addr, tt_response->src))
1750                         continue;
1751                 list_del(&node->list);
1752                 kfree(node);
1753         }
1754         spin_unlock_bh(&bat_priv->tt_req_list_lock);
1755
1756         /* Recalculate the CRC for this orig_node and store it */
1757         orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1758         /* Roaming phase is over: tables are in sync again. I can
1759          * unset the flag */
1760         orig_node->tt_poss_change = false;
1761 out:
1762         if (orig_node)
1763                 orig_node_free_ref(orig_node);
1764 }
1765
1766 int tt_init(struct bat_priv *bat_priv)
1767 {
1768         if (!tt_local_init(bat_priv))
1769                 return 0;
1770
1771         if (!tt_global_init(bat_priv))
1772                 return 0;
1773
1774         tt_start_timer(bat_priv);
1775
1776         return 1;
1777 }
1778
1779 static void tt_roam_list_free(struct bat_priv *bat_priv)
1780 {
1781         struct tt_roam_node *node, *safe;
1782
1783         spin_lock_bh(&bat_priv->tt_roam_list_lock);
1784
1785         list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1786                 list_del(&node->list);
1787                 kfree(node);
1788         }
1789
1790         spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1791 }
1792
1793 static void tt_roam_purge(struct bat_priv *bat_priv)
1794 {
1795         struct tt_roam_node *node, *safe;
1796
1797         spin_lock_bh(&bat_priv->tt_roam_list_lock);
1798         list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1799                 if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
1800                         continue;
1801
1802                 list_del(&node->list);
1803                 kfree(node);
1804         }
1805         spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1806 }
1807
1808 /* This function checks whether the client already reached the
1809  * maximum number of possible roaming phases. In this case the ROAMING_ADV
1810  * will not be sent.
1811  *
1812  * returns true if the ROAMING_ADV can be sent, false otherwise */
1813 static bool tt_check_roam_count(struct bat_priv *bat_priv,
1814                                 uint8_t *client)
1815 {
1816         struct tt_roam_node *tt_roam_node;
1817         bool ret = false;
1818
1819         spin_lock_bh(&bat_priv->tt_roam_list_lock);
1820         /* The new tt_req will be issued only if I'm not waiting for a
1821          * reply from the same orig_node yet */
1822         list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1823                 if (!compare_eth(tt_roam_node->addr, client))
1824                         continue;
1825
1826                 if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
1827                         continue;
1828
1829                 if (!atomic_dec_not_zero(&tt_roam_node->counter))
1830                         /* Sorry, you roamed too many times! */
1831                         goto unlock;
1832                 ret = true;
1833                 break;
1834         }
1835
1836         if (!ret) {
1837                 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1838                 if (!tt_roam_node)
1839                         goto unlock;
1840
1841                 tt_roam_node->first_time = jiffies;
1842                 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
1843                 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1844
1845                 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1846                 ret = true;
1847         }
1848
1849 unlock:
1850         spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1851         return ret;
1852 }
1853
1854 static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1855                           struct orig_node *orig_node)
1856 {
1857         struct neigh_node *neigh_node = NULL;
1858         struct sk_buff *skb = NULL;
1859         struct roam_adv_packet *roam_adv_packet;
1860         int ret = 1;
1861         struct hard_iface *primary_if;
1862
1863         /* before going on we have to check whether the client has
1864          * already roamed to us too many times */
1865         if (!tt_check_roam_count(bat_priv, client))
1866                 goto out;
1867
1868         skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
1869         if (!skb)
1870                 goto out;
1871
1872         skb_reserve(skb, ETH_HLEN);
1873
1874         roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1875                                         sizeof(struct roam_adv_packet));
1876
1877         roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
1878         roam_adv_packet->header.version = COMPAT_VERSION;
1879         roam_adv_packet->header.ttl = TTL;
1880         primary_if = primary_if_get_selected(bat_priv);
1881         if (!primary_if)
1882                 goto out;
1883         memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1884         hardif_free_ref(primary_if);
1885         memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1886         memcpy(roam_adv_packet->client, client, ETH_ALEN);
1887
1888         neigh_node = orig_node_get_router(orig_node);
1889         if (!neigh_node)
1890                 goto out;
1891
1892         bat_dbg(DBG_TT, bat_priv,
1893                 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1894                 orig_node->orig, client, neigh_node->addr);
1895
1896         send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1897         ret = 0;
1898
1899 out:
1900         if (neigh_node)
1901                 neigh_node_free_ref(neigh_node);
1902         if (ret)
1903                 kfree_skb(skb);
1904         return;
1905 }
1906
1907 static void tt_purge(struct work_struct *work)
1908 {
1909         struct delayed_work *delayed_work =
1910                 container_of(work, struct delayed_work, work);
1911         struct bat_priv *bat_priv =
1912                 container_of(delayed_work, struct bat_priv, tt_work);
1913
1914         tt_local_purge(bat_priv);
1915         tt_global_roam_purge(bat_priv);
1916         tt_req_purge(bat_priv);
1917         tt_roam_purge(bat_priv);
1918
1919         tt_start_timer(bat_priv);
1920 }
1921
1922 void tt_free(struct bat_priv *bat_priv)
1923 {
1924         cancel_delayed_work_sync(&bat_priv->tt_work);
1925
1926         tt_local_table_free(bat_priv);
1927         tt_global_table_free(bat_priv);
1928         tt_req_list_free(bat_priv);
1929         tt_changes_list_free(bat_priv);
1930         tt_roam_list_free(bat_priv);
1931
1932         kfree(bat_priv->tt_buff);
1933 }
1934
1935 /* This function will enable or disable the specified flags for all the entries
1936  * in the given hash table and returns the number of modified entries */
1937 static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
1938                              bool enable)
1939 {
1940         uint32_t i;
1941         uint16_t changed_num = 0;
1942         struct hlist_head *head;
1943         struct hlist_node *node;
1944         struct tt_common_entry *tt_common_entry;
1945
1946         if (!hash)
1947                 goto out;
1948
1949         for (i = 0; i < hash->size; i++) {
1950                 head = &hash->table[i];
1951
1952                 rcu_read_lock();
1953                 hlist_for_each_entry_rcu(tt_common_entry, node,
1954                                          head, hash_entry) {
1955                         if (enable) {
1956                                 if ((tt_common_entry->flags & flags) == flags)
1957                                         continue;
1958                                 tt_common_entry->flags |= flags;
1959                         } else {
1960                                 if (!(tt_common_entry->flags & flags))
1961                                         continue;
1962                                 tt_common_entry->flags &= ~flags;
1963                         }
1964                         changed_num++;
1965                 }
1966                 rcu_read_unlock();
1967         }
1968 out:
1969         return changed_num;
1970 }
1971
1972 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
1973 static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
1974 {
1975         struct hashtable_t *hash = bat_priv->tt_local_hash;
1976         struct tt_common_entry *tt_common_entry;
1977         struct tt_local_entry *tt_local_entry;
1978         struct hlist_node *node, *node_tmp;
1979         struct hlist_head *head;
1980         spinlock_t *list_lock; /* protects write access to the hash lists */
1981         uint32_t i;
1982
1983         if (!hash)
1984                 return;
1985
1986         for (i = 0; i < hash->size; i++) {
1987                 head = &hash->table[i];
1988                 list_lock = &hash->list_locks[i];
1989
1990                 spin_lock_bh(list_lock);
1991                 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1992                                           head, hash_entry) {
1993                         if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
1994                                 continue;
1995
1996                         bat_dbg(DBG_TT, bat_priv,
1997                                 "Deleting local tt entry (%pM): pending\n",
1998                                 tt_common_entry->addr);
1999
2000                         atomic_dec(&bat_priv->num_local_tt);
2001                         hlist_del_rcu(node);
2002                         tt_local_entry = container_of(tt_common_entry,
2003                                                       struct tt_local_entry,
2004                                                       common);
2005                         tt_local_entry_free_ref(tt_local_entry);
2006                 }
2007                 spin_unlock_bh(list_lock);
2008         }
2009
2010 }
2011
2012 void tt_commit_changes(struct bat_priv *bat_priv)
2013 {
2014         uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
2015                                             TT_CLIENT_NEW, false);
2016         /* all the reset entries have now to be effectively counted as local
2017          * entries */
2018         atomic_add(changed_num, &bat_priv->num_local_tt);
2019         tt_local_purge_pending_clients(bat_priv);
2020
2021         /* Increment the TTVN only once per OGM interval */
2022         atomic_inc(&bat_priv->ttvn);
2023         bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
2024                 (uint8_t)atomic_read(&bat_priv->ttvn));
2025         bat_priv->tt_poss_change = false;
2026 }
2027
2028 bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
2029 {
2030         struct tt_local_entry *tt_local_entry = NULL;
2031         struct tt_global_entry *tt_global_entry = NULL;
2032         bool ret = true;
2033
2034         if (!atomic_read(&bat_priv->ap_isolation))
2035                 return false;
2036
2037         tt_local_entry = tt_local_hash_find(bat_priv, dst);
2038         if (!tt_local_entry)
2039                 goto out;
2040
2041         tt_global_entry = tt_global_hash_find(bat_priv, src);
2042         if (!tt_global_entry)
2043                 goto out;
2044
2045         if (_is_ap_isolated(tt_local_entry, tt_global_entry))
2046                 goto out;
2047
2048         ret = false;
2049
2050 out:
2051         if (tt_global_entry)
2052                 tt_global_entry_free_ref(tt_global_entry);
2053         if (tt_local_entry)
2054                 tt_local_entry_free_ref(tt_local_entry);
2055         return ret;
2056 }
2057
2058 void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
2059                     const unsigned char *tt_buff, uint8_t tt_num_changes,
2060                     uint8_t ttvn, uint16_t tt_crc)
2061 {
2062         uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2063         bool full_table = true;
2064
2065         /* don't care about a backbone gateways updates. */
2066         if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2067                 return;
2068
2069         /* orig table not initialised AND first diff is in the OGM OR the ttvn
2070          * increased by one -> we can apply the attached changes */
2071         if ((!orig_node->tt_initialised && ttvn == 1) ||
2072             ttvn - orig_ttvn == 1) {
2073                 /* the OGM could not contain the changes due to their size or
2074                  * because they have already been sent TT_OGM_APPEND_MAX times.
2075                  * In this case send a tt request */
2076                 if (!tt_num_changes) {
2077                         full_table = false;
2078                         goto request_table;
2079                 }
2080
2081                 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
2082                                   (struct tt_change *)tt_buff);
2083
2084                 /* Even if we received the precomputed crc with the OGM, we
2085                  * prefer to recompute it to spot any possible inconsistency
2086                  * in the global table */
2087                 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
2088
2089                 /* The ttvn alone is not enough to guarantee consistency
2090                  * because a single value could represent different states
2091                  * (due to the wrap around). Thus a node has to check whether
2092                  * the resulting table (after applying the changes) is still
2093                  * consistent or not. E.g. a node could disconnect while its
2094                  * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2095                  * checking the CRC value is mandatory to detect the
2096                  * inconsistency */
2097                 if (orig_node->tt_crc != tt_crc)
2098                         goto request_table;
2099
2100                 /* Roaming phase is over: tables are in sync again. I can
2101                  * unset the flag */
2102                 orig_node->tt_poss_change = false;
2103         } else {
2104                 /* if we missed more than one change or our tables are not
2105                  * in sync anymore -> request fresh tt data */
2106
2107                 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2108                     orig_node->tt_crc != tt_crc) {
2109 request_table:
2110                         bat_dbg(DBG_TT, bat_priv,
2111                                 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2112                                 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2113                                 orig_node->tt_crc, tt_num_changes);
2114                         send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
2115                                         full_table);
2116                         return;
2117                 }
2118         }
2119 }