]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/wireless/brcm80211/brcmfmac/flowring.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide
[karo-tx-linux.git] / drivers / net / wireless / brcm80211 / brcmfmac / flowring.c
1 /* Copyright (c) 2014 Broadcom Corporation
2  *
3  * Permission to use, copy, modify, and/or distribute this software for any
4  * purpose with or without fee is hereby granted, provided that the above
5  * copyright notice and this permission notice appear in all copies.
6  *
7  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14  */
15
16
17 #include <linux/types.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <brcmu_utils.h>
21
22 #include "core.h"
23 #include "debug.h"
24 #include "bus.h"
25 #include "proto.h"
26 #include "flowring.h"
27 #include "msgbuf.h"
28 #include "common.h"
29
30
31 #define BRCMF_FLOWRING_HIGH             1024
32 #define BRCMF_FLOWRING_LOW              (BRCMF_FLOWRING_HIGH - 256)
33 #define BRCMF_FLOWRING_INVALID_IFIDX    0xff
34
35 #define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
36 #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
37
38 static const u8 brcmf_flowring_prio2fifo[] = {
39         1,
40         0,
41         0,
42         1,
43         2,
44         2,
45         3,
46         3
47 };
48
49
50 static bool
51 brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN])
52 {
53         struct brcmf_flowring_tdls_entry *search;
54
55         search = flow->tdls_entry;
56
57         while (search) {
58                 if (memcmp(search->mac, mac, ETH_ALEN) == 0)
59                         return true;
60                 search = search->next;
61         }
62
63         return false;
64 }
65
66
67 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
68                           u8 prio, u8 ifidx)
69 {
70         struct brcmf_flowring_hash *hash;
71         u8 hash_idx;
72         u32 i;
73         bool found;
74         bool sta;
75         u8 fifo;
76         u8 *mac;
77
78         fifo = brcmf_flowring_prio2fifo[prio];
79         sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
80         mac = da;
81         if ((!sta) && (is_multicast_ether_addr(da))) {
82                 mac = (u8 *)ALLFFMAC;
83                 fifo = 0;
84         }
85         if ((sta) && (flow->tdls_active) &&
86             (brcmf_flowring_is_tdls_mac(flow, da))) {
87                 sta = false;
88         }
89         hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
90                           BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
91         found = false;
92         hash = flow->hash;
93         for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
94                 if ((sta || (memcmp(hash[hash_idx].mac, mac, ETH_ALEN) == 0)) &&
95                     (hash[hash_idx].fifo == fifo) &&
96                     (hash[hash_idx].ifidx == ifidx)) {
97                         found = true;
98                         break;
99                 }
100                 hash_idx++;
101         }
102         if (found)
103                 return hash[hash_idx].flowid;
104
105         return BRCMF_FLOWRING_INVALID_ID;
106 }
107
108
109 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
110                           u8 prio, u8 ifidx)
111 {
112         struct brcmf_flowring_ring *ring;
113         struct brcmf_flowring_hash *hash;
114         u8 hash_idx;
115         u32 i;
116         bool found;
117         u8 fifo;
118         bool sta;
119         u8 *mac;
120
121         fifo = brcmf_flowring_prio2fifo[prio];
122         sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
123         mac = da;
124         if ((!sta) && (is_multicast_ether_addr(da))) {
125                 mac = (u8 *)ALLFFMAC;
126                 fifo = 0;
127         }
128         if ((sta) && (flow->tdls_active) &&
129             (brcmf_flowring_is_tdls_mac(flow, da))) {
130                 sta = false;
131         }
132         hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
133                           BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
134         found = false;
135         hash = flow->hash;
136         for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
137                 if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) &&
138                     (is_zero_ether_addr(hash[hash_idx].mac))) {
139                         found = true;
140                         break;
141                 }
142                 hash_idx++;
143         }
144         if (found) {
145                 for (i = 0; i < flow->nrofrings; i++) {
146                         if (flow->rings[i] == NULL)
147                                 break;
148                 }
149                 if (i == flow->nrofrings)
150                         return -ENOMEM;
151
152                 ring = kzalloc(sizeof(*ring), GFP_ATOMIC);
153                 if (!ring)
154                         return -ENOMEM;
155
156                 memcpy(hash[hash_idx].mac, mac, ETH_ALEN);
157                 hash[hash_idx].fifo = fifo;
158                 hash[hash_idx].ifidx = ifidx;
159                 hash[hash_idx].flowid = i;
160
161                 ring->hash_id = hash_idx;
162                 ring->status = RING_CLOSED;
163                 skb_queue_head_init(&ring->skblist);
164                 flow->rings[i] = ring;
165
166                 return i;
167         }
168         return BRCMF_FLOWRING_INVALID_ID;
169 }
170
171
172 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
173 {
174         struct brcmf_flowring_ring *ring;
175
176         ring = flow->rings[flowid];
177
178         return flow->hash[ring->hash_id].fifo;
179 }
180
181
182 static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
183                                  bool blocked)
184 {
185         struct brcmf_flowring_ring *ring;
186         struct brcmf_bus *bus_if;
187         struct brcmf_pub *drvr;
188         struct brcmf_if *ifp;
189         bool currently_blocked;
190         int i;
191         u8 ifidx;
192         unsigned long flags;
193
194         spin_lock_irqsave(&flow->block_lock, flags);
195
196         ring = flow->rings[flowid];
197         if (ring->blocked == blocked) {
198                 spin_unlock_irqrestore(&flow->block_lock, flags);
199                 return;
200         }
201         ifidx = brcmf_flowring_ifidx_get(flow, flowid);
202
203         currently_blocked = false;
204         for (i = 0; i < flow->nrofrings; i++) {
205                 if ((flow->rings[i]) && (i != flowid)) {
206                         ring = flow->rings[i];
207                         if ((ring->status == RING_OPEN) &&
208                             (brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
209                                 if (ring->blocked) {
210                                         currently_blocked = true;
211                                         break;
212                                 }
213                         }
214                 }
215         }
216         flow->rings[flowid]->blocked = blocked;
217         if (currently_blocked) {
218                 spin_unlock_irqrestore(&flow->block_lock, flags);
219                 return;
220         }
221
222         bus_if = dev_get_drvdata(flow->dev);
223         drvr = bus_if->drvr;
224         ifp = brcmf_get_ifp(drvr, ifidx);
225         brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked);
226
227         spin_unlock_irqrestore(&flow->block_lock, flags);
228 }
229
230
231 void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
232 {
233         struct brcmf_flowring_ring *ring;
234         u8 hash_idx;
235         struct sk_buff *skb;
236
237         ring = flow->rings[flowid];
238         if (!ring)
239                 return;
240         brcmf_flowring_block(flow, flowid, false);
241         hash_idx = ring->hash_id;
242         flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
243         eth_zero_addr(flow->hash[hash_idx].mac);
244         flow->rings[flowid] = NULL;
245
246         skb = skb_dequeue(&ring->skblist);
247         while (skb) {
248                 brcmu_pkt_buf_free_skb(skb);
249                 skb = skb_dequeue(&ring->skblist);
250         }
251
252         kfree(ring);
253 }
254
255
256 u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
257                            struct sk_buff *skb)
258 {
259         struct brcmf_flowring_ring *ring;
260
261         ring = flow->rings[flowid];
262
263         skb_queue_tail(&ring->skblist, skb);
264
265         if (!ring->blocked &&
266             (skb_queue_len(&ring->skblist) > BRCMF_FLOWRING_HIGH)) {
267                 brcmf_flowring_block(flow, flowid, true);
268                 brcmf_dbg(MSGBUF, "Flowcontrol: BLOCK for ring %d\n", flowid);
269                 /* To prevent (work around) possible race condition, check
270                  * queue len again. It is also possible to use locking to
271                  * protect, but that is undesirable for every enqueue and
272                  * dequeue. This simple check will solve a possible race
273                  * condition if it occurs.
274                  */
275                 if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
276                         brcmf_flowring_block(flow, flowid, false);
277         }
278         return skb_queue_len(&ring->skblist);
279 }
280
281
282 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
283 {
284         struct brcmf_flowring_ring *ring;
285         struct sk_buff *skb;
286
287         ring = flow->rings[flowid];
288         if (ring->status != RING_OPEN)
289                 return NULL;
290
291         skb = skb_dequeue(&ring->skblist);
292
293         if (ring->blocked &&
294             (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)) {
295                 brcmf_flowring_block(flow, flowid, false);
296                 brcmf_dbg(MSGBUF, "Flowcontrol: OPEN for ring %d\n", flowid);
297         }
298
299         return skb;
300 }
301
302
303 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
304                              struct sk_buff *skb)
305 {
306         struct brcmf_flowring_ring *ring;
307
308         ring = flow->rings[flowid];
309
310         skb_queue_head(&ring->skblist, skb);
311 }
312
313
314 u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
315 {
316         struct brcmf_flowring_ring *ring;
317
318         ring = flow->rings[flowid];
319         if (!ring)
320                 return 0;
321
322         if (ring->status != RING_OPEN)
323                 return 0;
324
325         return skb_queue_len(&ring->skblist);
326 }
327
328
329 void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
330 {
331         struct brcmf_flowring_ring *ring;
332
333         ring = flow->rings[flowid];
334         if (!ring) {
335                 brcmf_err("Ring NULL, for flowid %d\n", flowid);
336                 return;
337         }
338
339         ring->status = RING_OPEN;
340 }
341
342
343 u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
344 {
345         struct brcmf_flowring_ring *ring;
346         u8 hash_idx;
347
348         ring = flow->rings[flowid];
349         hash_idx = ring->hash_id;
350
351         return flow->hash[hash_idx].ifidx;
352 }
353
354
355 struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings)
356 {
357         struct brcmf_flowring *flow;
358         u32 i;
359
360         flow = kzalloc(sizeof(*flow), GFP_KERNEL);
361         if (flow) {
362                 flow->dev = dev;
363                 flow->nrofrings = nrofrings;
364                 spin_lock_init(&flow->block_lock);
365                 for (i = 0; i < ARRAY_SIZE(flow->addr_mode); i++)
366                         flow->addr_mode[i] = ADDR_INDIRECT;
367                 for (i = 0; i < ARRAY_SIZE(flow->hash); i++)
368                         flow->hash[i].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
369                 flow->rings = kcalloc(nrofrings, sizeof(*flow->rings),
370                                       GFP_KERNEL);
371                 if (!flow->rings) {
372                         kfree(flow);
373                         flow = NULL;
374                 }
375         }
376
377         return flow;
378 }
379
380
381 void brcmf_flowring_detach(struct brcmf_flowring *flow)
382 {
383         struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
384         struct brcmf_pub *drvr = bus_if->drvr;
385         struct brcmf_flowring_tdls_entry *search;
386         struct brcmf_flowring_tdls_entry *remove;
387         u8 flowid;
388
389         for (flowid = 0; flowid < flow->nrofrings; flowid++) {
390                 if (flow->rings[flowid])
391                         brcmf_msgbuf_delete_flowring(drvr, flowid);
392         }
393
394         search = flow->tdls_entry;
395         while (search) {
396                 remove = search;
397                 search = search->next;
398                 kfree(remove);
399         }
400         kfree(flow->rings);
401         kfree(flow);
402 }
403
404
405 void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
406                                         enum proto_addr_mode addr_mode)
407 {
408         struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
409         struct brcmf_pub *drvr = bus_if->drvr;
410         u32 i;
411         u8 flowid;
412
413         if (flow->addr_mode[ifidx] != addr_mode) {
414                 for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
415                         if (flow->hash[i].ifidx == ifidx) {
416                                 flowid = flow->hash[i].flowid;
417                                 if (flow->rings[flowid]->status != RING_OPEN)
418                                         continue;
419                                 flow->rings[flowid]->status = RING_CLOSING;
420                                 brcmf_msgbuf_delete_flowring(drvr, flowid);
421                         }
422                 }
423                 flow->addr_mode[ifidx] = addr_mode;
424         }
425 }
426
427
428 void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
429                                 u8 peer[ETH_ALEN])
430 {
431         struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
432         struct brcmf_pub *drvr = bus_if->drvr;
433         struct brcmf_flowring_hash *hash;
434         struct brcmf_flowring_tdls_entry *prev;
435         struct brcmf_flowring_tdls_entry *search;
436         u32 i;
437         u8 flowid;
438         bool sta;
439
440         sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
441
442         search = flow->tdls_entry;
443         prev = NULL;
444         while (search) {
445                 if (memcmp(search->mac, peer, ETH_ALEN) == 0) {
446                         sta = false;
447                         break;
448                 }
449                 prev = search;
450                 search = search->next;
451         }
452
453         hash = flow->hash;
454         for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
455                 if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
456                     (hash[i].ifidx == ifidx)) {
457                         flowid = flow->hash[i].flowid;
458                         if (flow->rings[flowid]->status == RING_OPEN) {
459                                 flow->rings[flowid]->status = RING_CLOSING;
460                                 brcmf_msgbuf_delete_flowring(drvr, flowid);
461                         }
462                 }
463         }
464
465         if (search) {
466                 if (prev)
467                         prev->next = search->next;
468                 else
469                         flow->tdls_entry = search->next;
470                 kfree(search);
471                 if (flow->tdls_entry == NULL)
472                         flow->tdls_active = false;
473         }
474 }
475
476
477 void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
478                                   u8 peer[ETH_ALEN])
479 {
480         struct brcmf_flowring_tdls_entry *tdls_entry;
481         struct brcmf_flowring_tdls_entry *search;
482
483         tdls_entry = kzalloc(sizeof(*tdls_entry), GFP_ATOMIC);
484         if (tdls_entry == NULL)
485                 return;
486
487         memcpy(tdls_entry->mac, peer, ETH_ALEN);
488         tdls_entry->next = NULL;
489         if (flow->tdls_entry == NULL) {
490                 flow->tdls_entry = tdls_entry;
491         } else {
492                 search = flow->tdls_entry;
493                 if (memcmp(search->mac, peer, ETH_ALEN) == 0)
494                         return;
495                 while (search->next) {
496                         search = search->next;
497                         if (memcmp(search->mac, peer, ETH_ALEN) == 0)
498                                 return;
499                 }
500                 search->next = tdls_entry;
501         }
502
503         flow->tdls_active = true;
504 }