2 * net/tipc/bcast.c: TIPC broadcast code
4 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <linux/tipc_config.h>
42 #include "name_distr.h"
46 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
47 #define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
48 #define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
50 const char tipc_bclink_name[] = "broadcast-link";
53 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
54 * @primary: pointer to primary bearer
55 * @secondary: pointer to secondary bearer
57 * Bearers must have same priority and same set of reachable destinations
61 struct tipc_bcbearer_pair {
62 struct tipc_bearer *primary;
63 struct tipc_bearer *secondary;
66 #define BCBEARER MAX_BEARERS
69 * struct tipc_bcbearer - bearer used by broadcast link
70 * @bearer: (non-standard) broadcast bearer structure
71 * @media: (non-standard) broadcast media structure
72 * @bpairs: array of bearer pairs
73 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
74 * @remains: temporary node map used by tipc_bcbearer_send()
75 * @remains_new: temporary node map used tipc_bcbearer_send()
77 * Note: The fields labelled "temporary" are incorporated into the bearer
78 * to avoid consuming potentially limited stack space through the use of
79 * large local variables within multicast routines. Concurrent access is
80 * prevented through use of the spinlock "bcast_lock".
82 struct tipc_bcbearer {
83 struct tipc_bearer bearer;
84 struct tipc_media media;
85 struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
86 struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
87 struct tipc_node_map remains;
88 struct tipc_node_map remains_new;
92 * struct tipc_bc_base - link used for broadcast messages
93 * @link: (non-standard) broadcast link structure
94 * @node: (non-standard) node structure representing b'cast link's peer node
95 * @bcast_nodes: map of broadcast-capable nodes
96 * @retransmit_to: node that most recently requested a retransmit
98 * Handles sequence numbering, fragmentation, bundling, etc.
100 struct tipc_bc_base {
101 struct tipc_link link;
102 struct tipc_node node;
103 struct sk_buff_head arrvq;
104 struct sk_buff_head inputq;
105 struct tipc_node_map bcast_nodes;
106 struct tipc_node *retransmit_to;
109 static struct tipc_bc_base *tipc_bc_base(struct net *net)
111 return tipc_net(net)->bcbase;
115 * tipc_nmap_equal - test for equality of node maps
117 static int tipc_nmap_equal(struct tipc_node_map *nm_a,
118 struct tipc_node_map *nm_b)
120 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
123 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
124 struct tipc_node_map *nm_b,
125 struct tipc_node_map *nm_diff);
126 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
127 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
128 static void tipc_bclink_lock(struct net *net)
130 tipc_bcast_lock(net);
133 static void tipc_bclink_unlock(struct net *net)
135 tipc_bcast_unlock(net);
138 void tipc_bclink_input(struct net *net)
140 struct tipc_net *tn = net_generic(net, tipc_net_id);
142 tipc_sk_mcast_rcv(net, &tn->bcbase->arrvq, &tn->bcbase->inputq);
145 uint tipc_bcast_get_mtu(void)
147 return MAX_PKT_DEFAULT_MCAST;
150 static u32 bcbuf_acks(struct sk_buff *buf)
152 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
155 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
157 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
160 static void bcbuf_decr_acks(struct sk_buff *buf)
162 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
165 void tipc_bclink_add_node(struct net *net, u32 addr)
167 struct tipc_net *tn = net_generic(net, tipc_net_id);
169 tipc_bclink_lock(net);
170 tipc_nmap_add(&tn->bcbase->bcast_nodes, addr);
171 tipc_bclink_unlock(net);
174 void tipc_bclink_remove_node(struct net *net, u32 addr)
176 struct tipc_net *tn = net_generic(net, tipc_net_id);
178 tipc_bclink_lock(net);
179 tipc_nmap_remove(&tn->bcbase->bcast_nodes, addr);
181 /* Last node? => reset backlog queue */
182 if (!tn->bcbase->bcast_nodes.count)
183 tipc_link_purge_backlog(&tn->bcbase->link);
185 tipc_bclink_unlock(net);
188 static void bclink_set_last_sent(struct net *net)
190 struct tipc_net *tn = net_generic(net, tipc_net_id);
191 struct tipc_link *bcl = tn->bcl;
193 bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
196 u32 tipc_bclink_get_last_sent(struct net *net)
198 struct tipc_net *tn = net_generic(net, tipc_net_id);
200 return tn->bcl->silent_intv_cnt;
203 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
205 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
206 seqno : node->bclink.last_sent;
210 * tipc_bclink_retransmit_to - get most recent node to request retransmission
212 * Called with bclink_lock locked
214 struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
216 struct tipc_net *tn = net_generic(net, tipc_net_id);
218 return tn->bcbase->retransmit_to;
222 * bclink_retransmit_pkt - retransmit broadcast packets
223 * @after: sequence number of last packet to *not* retransmit
224 * @to: sequence number of last packet to retransmit
226 * Called with bclink_lock locked
228 static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
231 struct tipc_link *bcl = tn->bcl;
233 skb_queue_walk(&bcl->transmq, skb) {
234 if (more(buf_seqno(skb), after)) {
235 tipc_link_retransmit(bcl, skb, mod(to - after));
242 * bclink_prepare_wakeup - prepare users for wakeup after congestion
243 * @bcl: broadcast link
244 * @resultq: queue for users which can be woken up
245 * Move a number of waiting users, as permitted by available space in
246 * the send queue, from link wait queue to specified queue for wakeup
248 static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
250 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
252 struct sk_buff *skb, *tmp;
254 skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
255 imp = TIPC_SKB_CB(skb)->chain_imp;
256 lim = bcl->window + bcl->backlog[imp].limit;
257 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
258 if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
260 skb_unlink(skb, &bcl->wakeupq);
261 skb_queue_tail(resultq, skb);
266 * tipc_bclink_wakeup_users - wake up pending users
268 * Called with no locks taken
270 void tipc_bclink_wakeup_users(struct net *net)
272 struct tipc_net *tn = net_generic(net, tipc_net_id);
273 struct tipc_link *bcl = tn->bcl;
274 struct sk_buff_head resultq;
276 skb_queue_head_init(&resultq);
277 bclink_prepare_wakeup(bcl, &resultq);
278 tipc_sk_rcv(net, &resultq);
282 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
283 * @n_ptr: node that sent acknowledgement info
284 * @acked: broadcast sequence # that has been acknowledged
286 * Node is locked, bclink_lock unlocked.
288 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
290 struct sk_buff *skb, *tmp;
291 unsigned int released = 0;
292 struct net *net = n_ptr->net;
293 struct tipc_net *tn = net_generic(net, tipc_net_id);
295 if (unlikely(!n_ptr->bclink.recv_permitted))
298 tipc_bclink_lock(net);
300 /* Bail out if tx queue is empty (no clean up is required) */
301 skb = skb_peek(&tn->bcl->transmq);
305 /* Determine which messages need to be acknowledged */
306 if (acked == INVALID_LINK_SEQ) {
308 * Contact with specified node has been lost, so need to
309 * acknowledge sent messages only (if other nodes still exist)
310 * or both sent and unsent messages (otherwise)
312 if (tn->bcbase->bcast_nodes.count)
313 acked = tn->bcl->silent_intv_cnt;
315 acked = tn->bcl->snd_nxt;
318 * Bail out if specified sequence number does not correspond
319 * to a message that has been sent and not yet acknowledged
321 if (less(acked, buf_seqno(skb)) ||
322 less(tn->bcl->silent_intv_cnt, acked) ||
323 less_eq(acked, n_ptr->bclink.acked))
327 /* Skip over packets that node has previously acknowledged */
328 skb_queue_walk(&tn->bcl->transmq, skb) {
329 if (more(buf_seqno(skb), n_ptr->bclink.acked))
333 /* Update packets that node is now acknowledging */
334 skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
335 if (more(buf_seqno(skb), acked))
337 bcbuf_decr_acks(skb);
338 bclink_set_last_sent(net);
339 if (bcbuf_acks(skb) == 0) {
340 __skb_unlink(skb, &tn->bcl->transmq);
345 n_ptr->bclink.acked = acked;
347 /* Try resolving broadcast link congestion, if necessary */
348 if (unlikely(skb_peek(&tn->bcl->backlogq))) {
349 tipc_link_push_packets(tn->bcl);
350 bclink_set_last_sent(net);
352 if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
353 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
355 tipc_bclink_unlock(net);
359 * tipc_bclink_update_link_state - update broadcast link state
361 * RCU and node lock set
363 void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
367 struct net *net = n_ptr->net;
368 struct tipc_net *tn = net_generic(net, tipc_net_id);
370 /* Ignore "stale" link state info */
371 if (less_eq(last_sent, n_ptr->bclink.last_in))
374 /* Update link synchronization state; quit if in sync */
375 bclink_update_last_sent(n_ptr, last_sent);
377 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
380 /* Update out-of-sync state; quit if loss is still unconfirmed */
381 if ((++n_ptr->bclink.oos_state) == 1) {
382 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
384 n_ptr->bclink.oos_state++;
387 /* Don't NACK if one has been recently sent (or seen) */
388 if (n_ptr->bclink.oos_state & 0x1)
392 buf = tipc_buf_acquire(INT_H_SIZE);
394 struct tipc_msg *msg = buf_msg(buf);
395 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
396 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
398 tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
399 INT_H_SIZE, n_ptr->addr);
400 msg_set_non_seq(msg, 1);
401 msg_set_mc_netid(msg, tn->net_id);
402 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
403 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
404 msg_set_bcgap_to(msg, to);
406 tipc_bclink_lock(net);
407 tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
408 tn->bcl->stats.sent_nacks++;
409 tipc_bclink_unlock(net);
412 n_ptr->bclink.oos_state++;
416 void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
418 u16 last = msg_last_bcast(hdr);
419 int mtyp = msg_type(hdr);
421 if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
423 if (mtyp == STATE_MSG) {
424 tipc_bclink_update_link_state(n, last);
427 /* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
428 * and transfer synch info in LINK_PROTOCOL messages.
430 if (tipc_node_is_up(n))
432 if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
434 n->bclink.last_sent = last;
435 n->bclink.last_in = last;
436 n->bclink.oos_state = 0;
440 * bclink_peek_nack - monitor retransmission requests sent by other nodes
442 * Delay any upcoming NACK by this node if another node has already
443 * requested the first message this node is going to ask for.
445 static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
447 struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
449 if (unlikely(!n_ptr))
452 tipc_node_lock(n_ptr);
453 if (n_ptr->bclink.recv_permitted &&
454 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
455 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
456 n_ptr->bclink.oos_state = 2;
457 tipc_node_unlock(n_ptr);
458 tipc_node_put(n_ptr);
461 /* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
462 * and to identified node local sockets
463 * @net: the applicable net namespace
464 * @list: chain of buffers containing message
465 * Consumes the buffer chain, except when returning -ELINKCONG
466 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
468 int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
470 struct tipc_net *tn = net_generic(net, tipc_net_id);
471 struct tipc_link *bcl = tn->bcl;
472 struct tipc_bc_base *bclink = tn->bcbase;
476 struct sk_buff_head arrvq;
477 struct sk_buff_head inputq;
479 /* Prepare clone of message for local node */
480 skb = tipc_msg_reassemble(list);
482 return -EHOSTUNREACH;
484 /* Broadcast to all nodes */
485 if (likely(bclink)) {
486 tipc_bclink_lock(net);
487 if (likely(bclink->bcast_nodes.count)) {
488 rc = __tipc_link_xmit(net, bcl, list);
490 u32 len = skb_queue_len(&bcl->transmq);
492 bclink_set_last_sent(net);
493 bcl->stats.queue_sz_counts++;
494 bcl->stats.accu_queue_sz += len;
498 tipc_bclink_unlock(net);
502 __skb_queue_purge(list);
508 /* Deliver message clone */
509 __skb_queue_head_init(&arrvq);
510 skb_queue_head_init(&inputq);
511 __skb_queue_tail(&arrvq, skb);
512 tipc_sk_mcast_rcv(net, &arrvq, &inputq);
517 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
519 * Called with both sending node's lock and bclink_lock taken.
521 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
523 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
525 bclink_update_last_sent(node, seqno);
526 node->bclink.last_in = seqno;
527 node->bclink.oos_state = 0;
528 tn->bcl->stats.recv_info++;
531 * Unicast an ACK periodically, ensuring that
532 * all nodes in the cluster don't ACK at the same time
534 if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
535 tipc_link_proto_xmit(node_active_link(node, node->addr),
536 STATE_MSG, 0, 0, 0, 0);
537 tn->bcl->stats.sent_acks++;
542 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
544 * RCU is locked, no other locks set
546 void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
548 struct tipc_net *tn = net_generic(net, tipc_net_id);
549 struct tipc_link *bcl = tn->bcl;
550 struct tipc_msg *msg = buf_msg(buf);
551 struct tipc_node *node;
556 struct sk_buff *iskb;
557 struct sk_buff_head *arrvq, *inputq;
559 /* Screen out unwanted broadcast messages */
560 if (msg_mc_netid(msg) != tn->net_id)
563 node = tipc_node_find(net, msg_prevnode(msg));
567 tipc_node_lock(node);
568 if (unlikely(!node->bclink.recv_permitted))
571 /* Handle broadcast protocol message */
572 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
573 if (msg_type(msg) != STATE_MSG)
575 if (msg_destnode(msg) == tn->own_addr) {
576 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
577 tipc_bclink_lock(net);
578 bcl->stats.recv_nacks++;
579 tn->bcbase->retransmit_to = node;
580 bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
582 tipc_bclink_unlock(net);
583 tipc_node_unlock(node);
585 tipc_node_unlock(node);
586 bclink_peek_nack(net, msg);
592 /* Handle in-sequence broadcast message */
593 seqno = msg_seqno(msg);
594 next_in = mod(node->bclink.last_in + 1);
595 arrvq = &tn->bcbase->arrvq;
596 inputq = &tn->bcbase->inputq;
598 if (likely(seqno == next_in)) {
600 /* Deliver message to destination */
601 if (likely(msg_isdata(msg))) {
602 tipc_bclink_lock(net);
603 bclink_accept_pkt(node, seqno);
604 spin_lock_bh(&inputq->lock);
605 __skb_queue_tail(arrvq, buf);
606 spin_unlock_bh(&inputq->lock);
607 node->action_flags |= TIPC_BCAST_MSG_EVT;
608 tipc_bclink_unlock(net);
609 tipc_node_unlock(node);
610 } else if (msg_user(msg) == MSG_BUNDLER) {
611 tipc_bclink_lock(net);
612 bclink_accept_pkt(node, seqno);
613 bcl->stats.recv_bundles++;
614 bcl->stats.recv_bundled += msg_msgcnt(msg);
616 while (tipc_msg_extract(buf, &iskb, &pos)) {
617 spin_lock_bh(&inputq->lock);
618 __skb_queue_tail(arrvq, iskb);
619 spin_unlock_bh(&inputq->lock);
621 node->action_flags |= TIPC_BCAST_MSG_EVT;
622 tipc_bclink_unlock(net);
623 tipc_node_unlock(node);
624 } else if (msg_user(msg) == MSG_FRAGMENTER) {
625 tipc_bclink_lock(net);
626 bclink_accept_pkt(node, seqno);
627 tipc_buf_append(&node->bclink.reasm_buf, &buf);
628 if (unlikely(!buf && !node->bclink.reasm_buf)) {
629 tipc_bclink_unlock(net);
632 bcl->stats.recv_fragments++;
634 bcl->stats.recv_fragmented++;
636 tipc_bclink_unlock(net);
639 tipc_bclink_unlock(net);
640 tipc_node_unlock(node);
642 tipc_bclink_lock(net);
643 bclink_accept_pkt(node, seqno);
644 tipc_bclink_unlock(net);
645 tipc_node_unlock(node);
650 /* Determine new synchronization state */
651 tipc_node_lock(node);
652 if (unlikely(!tipc_node_is_up(node)))
655 if (node->bclink.last_in == node->bclink.last_sent)
658 if (skb_queue_empty(&node->bclink.deferdq)) {
659 node->bclink.oos_state = 1;
663 msg = buf_msg(skb_peek(&node->bclink.deferdq));
664 seqno = msg_seqno(msg);
665 next_in = mod(next_in + 1);
666 if (seqno != next_in)
669 /* Take in-sequence message from deferred queue & deliver it */
670 buf = __skb_dequeue(&node->bclink.deferdq);
674 /* Handle out-of-sequence broadcast message */
675 if (less(next_in, seqno)) {
676 deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
678 bclink_update_last_sent(node, seqno);
682 tipc_bclink_lock(net);
685 bcl->stats.deferred_recv++;
687 bcl->stats.duplicates++;
689 tipc_bclink_unlock(net);
692 tipc_node_unlock(node);
698 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
700 return (n_ptr->bclink.recv_permitted &&
701 (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
706 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
708 * Send packet over as many bearers as necessary to reach all nodes
709 * that have joined the broadcast link.
711 * Returns 0 (packet sent successfully) under all circumstances,
712 * since the broadcast link's pseudo-bearer never blocks
714 static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
715 struct tipc_bearer *unused1,
716 struct tipc_media_addr *unused2)
719 struct tipc_msg *msg = buf_msg(buf);
720 struct tipc_net *tn = net_generic(net, tipc_net_id);
721 struct tipc_bcbearer *bcbearer = tn->bcbearer;
722 struct tipc_bc_base *bclink = tn->bcbase;
724 /* Prepare broadcast link message for reliable transmission,
725 * if first time trying to send it;
726 * preparation is skipped for broadcast link protocol messages
727 * since they are sent in an unreliable manner and don't need it
729 if (likely(!msg_non_seq(buf_msg(buf)))) {
730 bcbuf_set_acks(buf, bclink->bcast_nodes.count);
731 msg_set_non_seq(msg, 1);
732 msg_set_mc_netid(msg, tn->net_id);
733 tn->bcl->stats.sent_info++;
734 if (WARN_ON(!bclink->bcast_nodes.count)) {
740 /* Send buffer over bearers until all targets reached */
741 bcbearer->remains = bclink->bcast_nodes;
743 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
744 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
745 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
746 struct tipc_bearer *bp[2] = {p, s};
747 struct tipc_bearer *b = bp[msg_link_selector(msg)];
748 struct sk_buff *tbuf;
751 break; /* No more bearers to try */
754 tipc_nmap_diff(&bcbearer->remains, &b->nodes,
755 &bcbearer->remains_new);
756 if (bcbearer->remains_new.count == bcbearer->remains.count)
757 continue; /* Nothing added by bearer pair */
760 /* Use original buffer for first bearer */
761 tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
763 /* Avoid concurrent buffer access */
764 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
767 tipc_bearer_send(net, b->identity, tbuf,
769 kfree_skb(tbuf); /* Bearer keeps a clone */
771 if (bcbearer->remains_new.count == 0)
772 break; /* All targets reached */
774 bcbearer->remains = bcbearer->remains_new;
781 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
783 void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
784 u32 node, bool action)
786 struct tipc_net *tn = net_generic(net, tipc_net_id);
787 struct tipc_bcbearer *bcbearer = tn->bcbearer;
788 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
789 struct tipc_bcbearer_pair *bp_curr;
790 struct tipc_bearer *b;
794 tipc_bclink_lock(net);
797 tipc_nmap_add(nm_ptr, node);
799 tipc_nmap_remove(nm_ptr, node);
801 /* Group bearers by priority (can assume max of two per priority) */
802 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
805 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
806 b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
807 if (!b || !b->nodes.count)
810 if (!bp_temp[b->priority].primary)
811 bp_temp[b->priority].primary = b;
813 bp_temp[b->priority].secondary = b;
817 /* Create array of bearer pairs for broadcasting */
818 bp_curr = bcbearer->bpairs;
819 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
821 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
823 if (!bp_temp[pri].primary)
826 bp_curr->primary = bp_temp[pri].primary;
828 if (bp_temp[pri].secondary) {
829 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
830 &bp_temp[pri].secondary->nodes)) {
831 bp_curr->secondary = bp_temp[pri].secondary;
834 bp_curr->primary = bp_temp[pri].secondary;
841 tipc_bclink_unlock(net);
844 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
845 struct tipc_stats *stats)
855 struct nla_map map[] = {
856 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
857 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
858 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
859 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
860 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
861 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
862 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
863 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
864 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
865 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
866 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
867 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
868 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
869 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
870 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
871 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
872 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
873 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
874 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
875 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
878 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
882 for (i = 0; i < ARRAY_SIZE(map); i++)
883 if (nla_put_u32(skb, map[i].key, map[i].val))
886 nla_nest_end(skb, nest);
890 nla_nest_cancel(skb, nest);
895 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
899 struct nlattr *attrs;
901 struct tipc_net *tn = net_generic(net, tipc_net_id);
902 struct tipc_link *bcl = tn->bcl;
907 tipc_bclink_lock(net);
909 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
910 NLM_F_MULTI, TIPC_NL_LINK_GET);
914 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
918 /* The broadcast link is always up */
919 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
922 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
924 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
926 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
928 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
931 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
934 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
936 nla_nest_end(msg->skb, prop);
938 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
942 tipc_bclink_unlock(net);
943 nla_nest_end(msg->skb, attrs);
944 genlmsg_end(msg->skb, hdr);
949 nla_nest_cancel(msg->skb, prop);
951 nla_nest_cancel(msg->skb, attrs);
953 tipc_bclink_unlock(net);
954 genlmsg_cancel(msg->skb, hdr);
959 int tipc_bclink_reset_stats(struct net *net)
961 struct tipc_net *tn = net_generic(net, tipc_net_id);
962 struct tipc_link *bcl = tn->bcl;
967 tipc_bclink_lock(net);
968 memset(&bcl->stats, 0, sizeof(bcl->stats));
969 tipc_bclink_unlock(net);
973 int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
975 struct tipc_net *tn = net_generic(net, tipc_net_id);
976 struct tipc_link *bcl = tn->bcl;
980 if (limit < BCLINK_WIN_MIN)
981 limit = BCLINK_WIN_MIN;
982 if (limit > TIPC_MAX_LINK_WIN)
984 tipc_bclink_lock(net);
985 tipc_link_set_queue_limits(bcl, limit);
986 tipc_bclink_unlock(net);
990 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
994 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
996 if (!attrs[TIPC_NLA_LINK_PROP])
999 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
1003 if (!props[TIPC_NLA_PROP_WIN])
1006 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1008 return tipc_bclink_set_queue_limits(net, win);
1011 int tipc_bcast_init(struct net *net)
1013 struct tipc_net *tn = net_generic(net, tipc_net_id);
1014 struct tipc_bcbearer *bcbearer;
1015 struct tipc_bc_base *bclink;
1016 struct tipc_link *bcl;
1018 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
1022 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
1028 bcl = &bclink->link;
1029 bcbearer->bearer.media = &bcbearer->media;
1030 bcbearer->media.send_msg = tipc_bcbearer_send;
1031 sprintf(bcbearer->media.name, "tipc-broadcast");
1033 spin_lock_init(&tipc_net(net)->bclock);
1034 __skb_queue_head_init(&bcl->transmq);
1035 __skb_queue_head_init(&bcl->backlogq);
1036 __skb_queue_head_init(&bcl->deferdq);
1037 skb_queue_head_init(&bcl->wakeupq);
1039 spin_lock_init(&bclink->node.lock);
1040 __skb_queue_head_init(&bclink->arrvq);
1041 skb_queue_head_init(&bclink->inputq);
1042 bcl->owner = &bclink->node;
1043 bcl->owner->net = net;
1044 bcl->mtu = MAX_PKT_DEFAULT_MCAST;
1045 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
1046 bcl->bearer_id = MAX_BEARERS;
1047 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
1048 bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
1050 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
1051 tn->bcbearer = bcbearer;
1052 tn->bcbase = bclink;
1057 void tipc_bcast_reinit(struct net *net)
1059 struct tipc_bc_base *b = tipc_bc_base(net);
1061 msg_set_prevnode(b->link.pmsg, tipc_own_addr(net));
1064 void tipc_bcast_stop(struct net *net)
1066 struct tipc_net *tn = net_generic(net, tipc_net_id);
1068 tipc_bclink_lock(net);
1069 tipc_link_purge_queues(tn->bcl);
1070 tipc_bclink_unlock(net);
1071 RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
1073 kfree(tn->bcbearer);
1078 * tipc_nmap_add - add a node to a node map
1080 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
1082 int n = tipc_node(node);
1084 u32 mask = (1 << (n % WSIZE));
1086 if ((nm_ptr->map[w] & mask) == 0) {
1088 nm_ptr->map[w] |= mask;
1093 * tipc_nmap_remove - remove a node from a node map
1095 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
1097 int n = tipc_node(node);
1099 u32 mask = (1 << (n % WSIZE));
1101 if ((nm_ptr->map[w] & mask) != 0) {
1102 nm_ptr->map[w] &= ~mask;
1108 * tipc_nmap_diff - find differences between node maps
1109 * @nm_a: input node map A
1110 * @nm_b: input node map B
1111 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
1113 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
1114 struct tipc_node_map *nm_b,
1115 struct tipc_node_map *nm_diff)
1117 int stop = ARRAY_SIZE(nm_a->map);
1122 memset(nm_diff, 0, sizeof(*nm_diff));
1123 for (w = 0; w < stop; w++) {
1124 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
1125 nm_diff->map[w] = map;
1127 for (b = 0 ; b < WSIZE; b++) {