2 * net/tipc/bcast.c: TIPC broadcast code
4 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_distr.h"
44 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
45 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
47 const char tipc_bclink_name[] = "broadcast-link";
49 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
50 struct tipc_node_map *nm_b,
51 struct tipc_node_map *nm_diff);
52 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
53 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
55 static void tipc_bclink_lock(struct net *net)
57 struct tipc_net *tn = net_generic(net, tipc_net_id);
59 spin_lock_bh(&tn->bclink->lock);
62 static void tipc_bclink_unlock(struct net *net)
64 struct tipc_net *tn = net_generic(net, tipc_net_id);
65 struct tipc_node *node = NULL;
67 if (likely(!tn->bclink->flags)) {
68 spin_unlock_bh(&tn->bclink->lock);
72 if (tn->bclink->flags & TIPC_BCLINK_RESET) {
73 tn->bclink->flags &= ~TIPC_BCLINK_RESET;
74 node = tipc_bclink_retransmit_to(net);
76 spin_unlock_bh(&tn->bclink->lock);
79 tipc_link_reset_all(node);
82 void tipc_bclink_input(struct net *net)
84 struct tipc_net *tn = net_generic(net, tipc_net_id);
86 tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
89 uint tipc_bclink_get_mtu(void)
91 return MAX_PKT_DEFAULT_MCAST;
94 void tipc_bclink_set_flags(struct net *net, unsigned int flags)
96 struct tipc_net *tn = net_generic(net, tipc_net_id);
98 tn->bclink->flags |= flags;
101 static u32 bcbuf_acks(struct sk_buff *buf)
103 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
106 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
108 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
111 static void bcbuf_decr_acks(struct sk_buff *buf)
113 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
116 void tipc_bclink_add_node(struct net *net, u32 addr)
118 struct tipc_net *tn = net_generic(net, tipc_net_id);
120 tipc_bclink_lock(net);
121 tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
122 tipc_bclink_unlock(net);
125 void tipc_bclink_remove_node(struct net *net, u32 addr)
127 struct tipc_net *tn = net_generic(net, tipc_net_id);
129 tipc_bclink_lock(net);
130 tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
131 tipc_bclink_unlock(net);
134 static void bclink_set_last_sent(struct net *net)
136 struct tipc_net *tn = net_generic(net, tipc_net_id);
137 struct tipc_link *bcl = tn->bcl;
138 struct sk_buff *skb = skb_peek(&bcl->backlogq);
141 bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1);
143 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
146 u32 tipc_bclink_get_last_sent(struct net *net)
148 struct tipc_net *tn = net_generic(net, tipc_net_id);
150 return tn->bcl->fsm_msg_cnt;
153 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
155 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
156 seqno : node->bclink.last_sent;
161 * tipc_bclink_retransmit_to - get most recent node to request retransmission
163 * Called with bclink_lock locked
165 struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
167 struct tipc_net *tn = net_generic(net, tipc_net_id);
169 return tn->bclink->retransmit_to;
173 * bclink_retransmit_pkt - retransmit broadcast packets
174 * @after: sequence number of last packet to *not* retransmit
175 * @to: sequence number of last packet to retransmit
177 * Called with bclink_lock locked
179 static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
182 struct tipc_link *bcl = tn->bcl;
184 skb_queue_walk(&bcl->transmq, skb) {
185 if (more(buf_seqno(skb), after)) {
186 tipc_link_retransmit(bcl, skb, mod(to - after));
193 * tipc_bclink_wakeup_users - wake up pending users
195 * Called with no locks taken
197 void tipc_bclink_wakeup_users(struct net *net)
199 struct tipc_net *tn = net_generic(net, tipc_net_id);
201 tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
205 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
206 * @n_ptr: node that sent acknowledgement info
207 * @acked: broadcast sequence # that has been acknowledged
209 * Node is locked, bclink_lock unlocked.
211 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
213 struct sk_buff *skb, *tmp;
214 unsigned int released = 0;
215 struct net *net = n_ptr->net;
216 struct tipc_net *tn = net_generic(net, tipc_net_id);
218 if (unlikely(!n_ptr->bclink.recv_permitted))
221 tipc_bclink_lock(net);
223 /* Bail out if tx queue is empty (no clean up is required) */
224 skb = skb_peek(&tn->bcl->transmq);
228 /* Determine which messages need to be acknowledged */
229 if (acked == INVALID_LINK_SEQ) {
231 * Contact with specified node has been lost, so need to
232 * acknowledge sent messages only (if other nodes still exist)
233 * or both sent and unsent messages (otherwise)
235 if (tn->bclink->bcast_nodes.count)
236 acked = tn->bcl->fsm_msg_cnt;
238 acked = tn->bcl->next_out_no;
241 * Bail out if specified sequence number does not correspond
242 * to a message that has been sent and not yet acknowledged
244 if (less(acked, buf_seqno(skb)) ||
245 less(tn->bcl->fsm_msg_cnt, acked) ||
246 less_eq(acked, n_ptr->bclink.acked))
250 /* Skip over packets that node has previously acknowledged */
251 skb_queue_walk(&tn->bcl->transmq, skb) {
252 if (more(buf_seqno(skb), n_ptr->bclink.acked))
256 /* Update packets that node is now acknowledging */
257 skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
258 if (more(buf_seqno(skb), acked))
260 bcbuf_decr_acks(skb);
261 bclink_set_last_sent(net);
262 if (bcbuf_acks(skb) == 0) {
263 __skb_unlink(skb, &tn->bcl->transmq);
268 n_ptr->bclink.acked = acked;
270 /* Try resolving broadcast link congestion, if necessary */
271 if (unlikely(skb_peek(&tn->bcl->backlogq))) {
272 tipc_link_push_packets(tn->bcl);
273 bclink_set_last_sent(net);
275 if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
276 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
278 tipc_bclink_unlock(net);
282 * tipc_bclink_update_link_state - update broadcast link state
284 * RCU and node lock set
286 void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
290 struct net *net = n_ptr->net;
291 struct tipc_net *tn = net_generic(net, tipc_net_id);
293 /* Ignore "stale" link state info */
294 if (less_eq(last_sent, n_ptr->bclink.last_in))
297 /* Update link synchronization state; quit if in sync */
298 bclink_update_last_sent(n_ptr, last_sent);
300 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
303 /* Update out-of-sync state; quit if loss is still unconfirmed */
304 if ((++n_ptr->bclink.oos_state) == 1) {
305 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
307 n_ptr->bclink.oos_state++;
310 /* Don't NACK if one has been recently sent (or seen) */
311 if (n_ptr->bclink.oos_state & 0x1)
315 buf = tipc_buf_acquire(INT_H_SIZE);
317 struct tipc_msg *msg = buf_msg(buf);
318 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
319 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
321 tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
322 INT_H_SIZE, n_ptr->addr);
323 msg_set_non_seq(msg, 1);
324 msg_set_mc_netid(msg, tn->net_id);
325 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
326 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
327 msg_set_bcgap_to(msg, to);
329 tipc_bclink_lock(net);
330 tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
331 tn->bcl->stats.sent_nacks++;
332 tipc_bclink_unlock(net);
335 n_ptr->bclink.oos_state++;
340 * bclink_peek_nack - monitor retransmission requests sent by other nodes
342 * Delay any upcoming NACK by this node if another node has already
343 * requested the first message this node is going to ask for.
345 static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
347 struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
349 if (unlikely(!n_ptr))
352 tipc_node_lock(n_ptr);
354 if (n_ptr->bclink.recv_permitted &&
355 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
356 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
357 n_ptr->bclink.oos_state = 2;
359 tipc_node_unlock(n_ptr);
362 /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
363 * and to identified node local sockets
364 * @net: the applicable net namespace
365 * @list: chain of buffers containing message
366 * Consumes the buffer chain, except when returning -ELINKCONG
367 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
369 int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
371 struct tipc_net *tn = net_generic(net, tipc_net_id);
372 struct tipc_link *bcl = tn->bcl;
373 struct tipc_bclink *bclink = tn->bclink;
377 struct sk_buff_head arrvq;
378 struct sk_buff_head inputq;
380 /* Prepare clone of message for local node */
381 skb = tipc_msg_reassemble(list);
382 if (unlikely(!skb)) {
383 __skb_queue_purge(list);
384 return -EHOSTUNREACH;
386 /* Broadcast to all nodes */
387 if (likely(bclink)) {
388 tipc_bclink_lock(net);
389 if (likely(bclink->bcast_nodes.count)) {
390 rc = __tipc_link_xmit(net, bcl, list);
392 u32 len = skb_queue_len(&bcl->transmq);
394 bclink_set_last_sent(net);
395 bcl->stats.queue_sz_counts++;
396 bcl->stats.accu_queue_sz += len;
400 tipc_bclink_unlock(net);
404 __skb_queue_purge(list);
410 /* Deliver message clone */
411 __skb_queue_head_init(&arrvq);
412 skb_queue_head_init(&inputq);
413 __skb_queue_tail(&arrvq, skb);
414 tipc_sk_mcast_rcv(net, &arrvq, &inputq);
419 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
421 * Called with both sending node's lock and bclink_lock taken.
423 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
425 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
427 bclink_update_last_sent(node, seqno);
428 node->bclink.last_in = seqno;
429 node->bclink.oos_state = 0;
430 tn->bcl->stats.recv_info++;
433 * Unicast an ACK periodically, ensuring that
434 * all nodes in the cluster don't ACK at the same time
436 if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
437 tipc_link_proto_xmit(node->active_links[node->addr & 1],
438 STATE_MSG, 0, 0, 0, 0, 0);
439 tn->bcl->stats.sent_acks++;
444 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
446 * RCU is locked, no other locks set
448 void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
450 struct tipc_net *tn = net_generic(net, tipc_net_id);
451 struct tipc_link *bcl = tn->bcl;
452 struct tipc_msg *msg = buf_msg(buf);
453 struct tipc_node *node;
458 struct sk_buff *iskb;
459 struct sk_buff_head *arrvq, *inputq;
461 /* Screen out unwanted broadcast messages */
462 if (msg_mc_netid(msg) != tn->net_id)
465 node = tipc_node_find(net, msg_prevnode(msg));
469 tipc_node_lock(node);
470 if (unlikely(!node->bclink.recv_permitted))
473 /* Handle broadcast protocol message */
474 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
475 if (msg_type(msg) != STATE_MSG)
477 if (msg_destnode(msg) == tn->own_addr) {
478 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
479 tipc_node_unlock(node);
480 tipc_bclink_lock(net);
481 bcl->stats.recv_nacks++;
482 tn->bclink->retransmit_to = node;
483 bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
485 tipc_bclink_unlock(net);
487 tipc_node_unlock(node);
488 bclink_peek_nack(net, msg);
493 /* Handle in-sequence broadcast message */
494 seqno = msg_seqno(msg);
495 next_in = mod(node->bclink.last_in + 1);
496 arrvq = &tn->bclink->arrvq;
497 inputq = &tn->bclink->inputq;
499 if (likely(seqno == next_in)) {
501 /* Deliver message to destination */
502 if (likely(msg_isdata(msg))) {
503 tipc_bclink_lock(net);
504 bclink_accept_pkt(node, seqno);
505 spin_lock_bh(&inputq->lock);
506 __skb_queue_tail(arrvq, buf);
507 spin_unlock_bh(&inputq->lock);
508 node->action_flags |= TIPC_BCAST_MSG_EVT;
509 tipc_bclink_unlock(net);
510 tipc_node_unlock(node);
511 } else if (msg_user(msg) == MSG_BUNDLER) {
512 tipc_bclink_lock(net);
513 bclink_accept_pkt(node, seqno);
514 bcl->stats.recv_bundles++;
515 bcl->stats.recv_bundled += msg_msgcnt(msg);
517 while (tipc_msg_extract(buf, &iskb, &pos)) {
518 spin_lock_bh(&inputq->lock);
519 __skb_queue_tail(arrvq, iskb);
520 spin_unlock_bh(&inputq->lock);
522 node->action_flags |= TIPC_BCAST_MSG_EVT;
523 tipc_bclink_unlock(net);
524 tipc_node_unlock(node);
525 } else if (msg_user(msg) == MSG_FRAGMENTER) {
526 tipc_bclink_lock(net);
527 bclink_accept_pkt(node, seqno);
528 tipc_buf_append(&node->bclink.reasm_buf, &buf);
529 if (unlikely(!buf && !node->bclink.reasm_buf)) {
530 tipc_bclink_unlock(net);
533 bcl->stats.recv_fragments++;
535 bcl->stats.recv_fragmented++;
537 tipc_bclink_unlock(net);
540 tipc_bclink_unlock(net);
541 tipc_node_unlock(node);
543 tipc_bclink_lock(net);
544 bclink_accept_pkt(node, seqno);
545 tipc_bclink_unlock(net);
546 tipc_node_unlock(node);
551 /* Determine new synchronization state */
552 tipc_node_lock(node);
553 if (unlikely(!tipc_node_is_up(node)))
556 if (node->bclink.last_in == node->bclink.last_sent)
559 if (skb_queue_empty(&node->bclink.deferdq)) {
560 node->bclink.oos_state = 1;
564 msg = buf_msg(skb_peek(&node->bclink.deferdq));
565 seqno = msg_seqno(msg);
566 next_in = mod(next_in + 1);
567 if (seqno != next_in)
570 /* Take in-sequence message from deferred queue & deliver it */
571 buf = __skb_dequeue(&node->bclink.deferdq);
575 /* Handle out-of-sequence broadcast message */
576 if (less(next_in, seqno)) {
577 deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
579 bclink_update_last_sent(node, seqno);
583 tipc_bclink_lock(net);
586 bcl->stats.deferred_recv++;
588 bcl->stats.duplicates++;
590 tipc_bclink_unlock(net);
593 tipc_node_unlock(node);
598 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
600 return (n_ptr->bclink.recv_permitted &&
601 (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
606 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
608 * Send packet over as many bearers as necessary to reach all nodes
609 * that have joined the broadcast link.
611 * Returns 0 (packet sent successfully) under all circumstances,
612 * since the broadcast link's pseudo-bearer never blocks
614 static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
615 struct tipc_bearer *unused1,
616 struct tipc_media_addr *unused2)
619 struct tipc_msg *msg = buf_msg(buf);
620 struct tipc_net *tn = net_generic(net, tipc_net_id);
621 struct tipc_bcbearer *bcbearer = tn->bcbearer;
622 struct tipc_bclink *bclink = tn->bclink;
624 /* Prepare broadcast link message for reliable transmission,
625 * if first time trying to send it;
626 * preparation is skipped for broadcast link protocol messages
627 * since they are sent in an unreliable manner and don't need it
629 if (likely(!msg_non_seq(buf_msg(buf)))) {
630 bcbuf_set_acks(buf, bclink->bcast_nodes.count);
631 msg_set_non_seq(msg, 1);
632 msg_set_mc_netid(msg, tn->net_id);
633 tn->bcl->stats.sent_info++;
634 if (WARN_ON(!bclink->bcast_nodes.count)) {
640 /* Send buffer over bearers until all targets reached */
641 bcbearer->remains = bclink->bcast_nodes;
643 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
644 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
645 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
646 struct tipc_bearer *bp[2] = {p, s};
647 struct tipc_bearer *b = bp[msg_link_selector(msg)];
648 struct sk_buff *tbuf;
651 break; /* No more bearers to try */
654 tipc_nmap_diff(&bcbearer->remains, &b->nodes,
655 &bcbearer->remains_new);
656 if (bcbearer->remains_new.count == bcbearer->remains.count)
657 continue; /* Nothing added by bearer pair */
660 /* Use original buffer for first bearer */
661 tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
663 /* Avoid concurrent buffer access */
664 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
667 tipc_bearer_send(net, b->identity, tbuf,
669 kfree_skb(tbuf); /* Bearer keeps a clone */
671 if (bcbearer->remains_new.count == 0)
672 break; /* All targets reached */
674 bcbearer->remains = bcbearer->remains_new;
681 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
683 void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
684 u32 node, bool action)
686 struct tipc_net *tn = net_generic(net, tipc_net_id);
687 struct tipc_bcbearer *bcbearer = tn->bcbearer;
688 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
689 struct tipc_bcbearer_pair *bp_curr;
690 struct tipc_bearer *b;
694 tipc_bclink_lock(net);
697 tipc_nmap_add(nm_ptr, node);
699 tipc_nmap_remove(nm_ptr, node);
701 /* Group bearers by priority (can assume max of two per priority) */
702 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
705 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
706 b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
707 if (!b || !b->nodes.count)
710 if (!bp_temp[b->priority].primary)
711 bp_temp[b->priority].primary = b;
713 bp_temp[b->priority].secondary = b;
717 /* Create array of bearer pairs for broadcasting */
718 bp_curr = bcbearer->bpairs;
719 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
721 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
723 if (!bp_temp[pri].primary)
726 bp_curr->primary = bp_temp[pri].primary;
728 if (bp_temp[pri].secondary) {
729 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
730 &bp_temp[pri].secondary->nodes)) {
731 bp_curr->secondary = bp_temp[pri].secondary;
734 bp_curr->primary = bp_temp[pri].secondary;
741 tipc_bclink_unlock(net);
744 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
745 struct tipc_stats *stats)
755 struct nla_map map[] = {
756 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
757 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
758 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
759 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
760 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
761 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
762 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
763 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
764 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
765 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
766 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
767 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
768 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
769 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
770 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
771 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
772 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
773 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
774 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
775 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
778 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
782 for (i = 0; i < ARRAY_SIZE(map); i++)
783 if (nla_put_u32(skb, map[i].key, map[i].val))
786 nla_nest_end(skb, nest);
790 nla_nest_cancel(skb, nest);
795 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
799 struct nlattr *attrs;
801 struct tipc_net *tn = net_generic(net, tipc_net_id);
802 struct tipc_link *bcl = tn->bcl;
807 tipc_bclink_lock(net);
809 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
810 NLM_F_MULTI, TIPC_NL_LINK_GET);
814 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
818 /* The broadcast link is always up */
819 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
822 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
824 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
826 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
828 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
831 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
834 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
836 nla_nest_end(msg->skb, prop);
838 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
842 tipc_bclink_unlock(net);
843 nla_nest_end(msg->skb, attrs);
844 genlmsg_end(msg->skb, hdr);
849 nla_nest_cancel(msg->skb, prop);
851 nla_nest_cancel(msg->skb, attrs);
853 tipc_bclink_unlock(net);
854 genlmsg_cancel(msg->skb, hdr);
859 int tipc_bclink_reset_stats(struct net *net)
861 struct tipc_net *tn = net_generic(net, tipc_net_id);
862 struct tipc_link *bcl = tn->bcl;
867 tipc_bclink_lock(net);
868 memset(&bcl->stats, 0, sizeof(bcl->stats));
869 tipc_bclink_unlock(net);
873 int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
875 struct tipc_net *tn = net_generic(net, tipc_net_id);
876 struct tipc_link *bcl = tn->bcl;
880 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
883 tipc_bclink_lock(net);
884 tipc_link_set_queue_limits(bcl, limit);
885 tipc_bclink_unlock(net);
889 int tipc_bclink_init(struct net *net)
891 struct tipc_net *tn = net_generic(net, tipc_net_id);
892 struct tipc_bcbearer *bcbearer;
893 struct tipc_bclink *bclink;
894 struct tipc_link *bcl;
896 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
900 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
907 bcbearer->bearer.media = &bcbearer->media;
908 bcbearer->media.send_msg = tipc_bcbearer_send;
909 sprintf(bcbearer->media.name, "tipc-broadcast");
911 spin_lock_init(&bclink->lock);
912 __skb_queue_head_init(&bcl->transmq);
913 __skb_queue_head_init(&bcl->backlogq);
914 __skb_queue_head_init(&bcl->deferdq);
915 skb_queue_head_init(&bcl->wakeupq);
916 bcl->next_out_no = 1;
917 spin_lock_init(&bclink->node.lock);
918 __skb_queue_head_init(&bclink->arrvq);
919 skb_queue_head_init(&bclink->inputq);
920 bcl->owner = &bclink->node;
921 bcl->owner->net = net;
922 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
923 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
924 bcl->bearer_id = MAX_BEARERS;
925 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
926 bcl->state = WORKING_WORKING;
927 bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
928 msg_set_prevnode(bcl->pmsg, tn->own_addr);
929 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
930 tn->bcbearer = bcbearer;
936 void tipc_bclink_stop(struct net *net)
938 struct tipc_net *tn = net_generic(net, tipc_net_id);
940 tipc_bclink_lock(net);
941 tipc_link_purge_queues(tn->bcl);
942 tipc_bclink_unlock(net);
944 RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
951 * tipc_nmap_add - add a node to a node map
953 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
955 int n = tipc_node(node);
957 u32 mask = (1 << (n % WSIZE));
959 if ((nm_ptr->map[w] & mask) == 0) {
961 nm_ptr->map[w] |= mask;
966 * tipc_nmap_remove - remove a node from a node map
968 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
970 int n = tipc_node(node);
972 u32 mask = (1 << (n % WSIZE));
974 if ((nm_ptr->map[w] & mask) != 0) {
975 nm_ptr->map[w] &= ~mask;
981 * tipc_nmap_diff - find differences between node maps
982 * @nm_a: input node map A
983 * @nm_b: input node map B
984 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
986 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
987 struct tipc_node_map *nm_b,
988 struct tipc_node_map *nm_diff)
990 int stop = ARRAY_SIZE(nm_a->map);
995 memset(nm_diff, 0, sizeof(*nm_diff));
996 for (w = 0; w < stop; w++) {
997 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
998 nm_diff->map[w] = map;
1000 for (b = 0 ; b < WSIZE; b++) {