2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the NetFilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
10 * ip_vs_sync: sync connection info from master load balancer to backups
14 * Alexandre Cassen : Added master & backup support at a time.
15 * Alexandre Cassen : Added SyncID support for incoming sync
17 * Justin Ossevoort : Fix endian problem on sync message size.
20 #define KMSG_COMPONENT "IPVS"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/inetdevice.h>
26 #include <linux/net.h>
27 #include <linux/completion.h>
28 #include <linux/delay.h>
29 #include <linux/skbuff.h>
31 #include <linux/igmp.h> /* for ip_mc_join_group */
32 #include <linux/udp.h>
33 #include <linux/err.h>
34 #include <linux/kthread.h>
35 #include <linux/wait.h>
36 #include <linux/kernel.h>
38 #include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
43 #include <net/ip_vs.h>
45 #define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */
46 #define IP_VS_SYNC_PORT 8848 /* multicast port */
48 #define SYNC_PROTO_VER 1 /* Protocol version in header */
51 * IPVS sync connection entry
52 * Version 0, i.e. original version.
54 struct ip_vs_sync_conn_v0 {
57 /* Protocol, addresses and port numbers */
58 __u8 protocol; /* Which protocol (TCP/UDP) */
62 __be32 caddr; /* client address */
63 __be32 vaddr; /* virtual address */
64 __be32 daddr; /* destination address */
66 /* Flags and state transition */
67 __be16 flags; /* status flags */
68 __be16 state; /* state info */
70 /* The sequence options start here */
73 struct ip_vs_sync_conn_options {
74 struct ip_vs_seq in_seq; /* incoming seq. struct */
75 struct ip_vs_seq out_seq; /* outgoing seq. struct */
79 Sync Connection format (sync_conn)
82 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
83 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
84 | Type | Protocol | Ver. | Size |
85 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
87 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
89 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
91 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
93 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
95 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
97 | IP-Addresses (v4 or v6) |
99 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
101 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 | Param. Type | Param. Length | Param. data |
103 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
105 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 | | Param Type | Param. Length |
107 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
109 | Last Param data should be padded for 32 bit alignment |
110 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
114 * Type 0, IPv4 sync connection format
116 struct ip_vs_sync_v4 {
118 __u8 protocol; /* Which protocol (TCP/UDP) */
119 __be16 ver_size; /* Version msb 4 bits */
120 /* Flags and state transition */
121 __be32 flags; /* status flags */
122 __be16 state; /* state info */
123 /* Protocol, addresses and port numbers */
127 __be32 fwmark; /* Firewall mark from skb */
128 __be32 timeout; /* cp timeout */
129 __be32 caddr; /* client address */
130 __be32 vaddr; /* virtual address */
131 __be32 daddr; /* destination address */
132 /* The sequence options start here */
133 /* PE data padded to 32bit alignment after seq. options */
136 * Type 2 messages IPv6
138 struct ip_vs_sync_v6 {
140 __u8 protocol; /* Which protocol (TCP/UDP) */
141 __be16 ver_size; /* Version msb 4 bits */
142 /* Flags and state transition */
143 __be32 flags; /* status flags */
144 __be16 state; /* state info */
145 /* Protocol, addresses and port numbers */
149 __be32 fwmark; /* Firewall mark from skb */
150 __be32 timeout; /* cp timeout */
151 struct in6_addr caddr; /* client address */
152 struct in6_addr vaddr; /* virtual address */
153 struct in6_addr daddr; /* destination address */
154 /* The sequence options start here */
155 /* PE data padded to 32bit alignment after seq. options */
158 union ip_vs_sync_conn {
159 struct ip_vs_sync_v4 v4;
160 struct ip_vs_sync_v6 v6;
163 /* Bits in Type field in above */
164 #define STYPE_INET6 0
165 #define STYPE_F_INET6 (1 << STYPE_INET6)
167 #define SVER_SHIFT 12 /* Shift to get version */
168 #define SVER_MASK 0x0fff /* Mask to strip version */
170 #define IPVS_OPT_SEQ_DATA 1
171 #define IPVS_OPT_PE_DATA 2
172 #define IPVS_OPT_PE_NAME 3
173 #define IPVS_OPT_PARAM 7
175 #define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1))
176 #define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1))
177 #define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1))
178 #define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
180 struct ip_vs_sync_thread_data {
185 /* Version 0 definition of packet sizes */
186 #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0))
187 #define FULL_CONN_SIZE \
188 (sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options))
192 The master mulitcasts messages (Datagrams) to the backup load balancers
193 in the following format.
196 Note, first byte should be Zero, so ver 0 receivers will drop the packet.
199 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
200 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
201 | 0 | SyncID | Size |
202 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
203 | Count Conns | Version | Reserved, set to Zero |
204 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
206 | IPVS Sync Connection (1) |
207 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
211 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
213 | IPVS Sync Connection (n) |
214 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
218 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
219 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
220 | Count Conns | SyncID | Size |
221 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
222 | IPVS Sync Connection (1) |
225 #define SYNC_MESG_HEADER_LEN 4
226 #define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */
228 /* Version 0 header */
229 struct ip_vs_sync_mesg {
234 /* ip_vs_sync_conn entries start here */
237 /* Version 1 header */
238 struct ip_vs_sync_mesg_v2 {
239 __u8 reserved; /* must be zero */
243 __s8 version; /* SYNC_PROTO_VER */
245 /* ip_vs_sync_conn entries start here */
248 /* the maximum length of sync (sending/receiving) message */
249 static int sync_send_mesg_maxlen;
250 static int sync_recv_mesg_maxlen;
252 struct ip_vs_sync_buff {
253 struct list_head list;
254 unsigned long firstuse;
256 /* pointers for the message data */
257 struct ip_vs_sync_mesg *mesg;
263 /* the sync_buff list head and the lock */
264 static LIST_HEAD(ip_vs_sync_queue);
265 static DEFINE_SPINLOCK(ip_vs_sync_lock);
267 /* current sync_buff for accepting new conn entries */
268 static struct ip_vs_sync_buff *curr_sb = NULL;
269 static DEFINE_SPINLOCK(curr_sb_lock);
271 /* ipvs sync daemon state */
272 volatile int ip_vs_sync_state = IP_VS_STATE_NONE;
273 volatile int ip_vs_master_syncid = 0;
274 volatile int ip_vs_backup_syncid = 0;
276 /* multicast interface name */
277 char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
278 char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
280 /* sync daemon tasks */
281 static struct task_struct *sync_master_thread;
282 static struct task_struct *sync_backup_thread;
285 static struct sockaddr_in mcast_addr = {
286 .sin_family = AF_INET,
287 .sin_port = cpu_to_be16(IP_VS_SYNC_PORT),
288 .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
292 * Copy of struct ip_vs_seq
293 * From unaligned network order to aligned host order
295 static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
297 ho->init_seq = get_unaligned_be32(&no->init_seq);
298 ho->delta = get_unaligned_be32(&no->delta);
299 ho->previous_delta = get_unaligned_be32(&no->previous_delta);
302 static inline struct ip_vs_sync_buff *sb_dequeue(void)
304 struct ip_vs_sync_buff *sb;
306 spin_lock_bh(&ip_vs_sync_lock);
307 if (list_empty(&ip_vs_sync_queue)) {
310 sb = list_entry(ip_vs_sync_queue.next,
311 struct ip_vs_sync_buff,
315 spin_unlock_bh(&ip_vs_sync_lock);
320 static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(void)
322 struct ip_vs_sync_buff *sb;
324 if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
327 if (!(sb->mesg=kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC))) {
331 sb->mesg->nr_conns = 0;
332 sb->mesg->syncid = ip_vs_master_syncid;
334 sb->head = (unsigned char *)sb->mesg + 4;
335 sb->end = (unsigned char *)sb->mesg + sync_send_mesg_maxlen;
336 sb->firstuse = jiffies;
340 static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
346 static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
348 spin_lock(&ip_vs_sync_lock);
349 if (ip_vs_sync_state & IP_VS_STATE_MASTER)
350 list_add_tail(&sb->list, &ip_vs_sync_queue);
352 ip_vs_sync_buff_release(sb);
353 spin_unlock(&ip_vs_sync_lock);
357 * Get the current sync buffer if it has been created for more
358 * than the specified time or the specified time is zero.
360 static inline struct ip_vs_sync_buff *
361 get_curr_sync_buff(unsigned long time)
363 struct ip_vs_sync_buff *sb;
365 spin_lock_bh(&curr_sb_lock);
366 if (curr_sb && (time == 0 ||
367 time_before(jiffies - curr_sb->firstuse, time))) {
372 spin_unlock_bh(&curr_sb_lock);
378 * Add an ip_vs_conn information into the current sync_buff.
379 * Called by ip_vs_in.
381 void ip_vs_sync_conn(const struct ip_vs_conn *cp)
383 struct ip_vs_sync_mesg *m;
384 struct ip_vs_sync_conn_v0 *s;
387 spin_lock(&curr_sb_lock);
389 if (!(curr_sb=ip_vs_sync_buff_create())) {
390 spin_unlock(&curr_sb_lock);
391 pr_err("ip_vs_sync_buff_create failed.\n");
396 len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
399 s = (struct ip_vs_sync_conn_v0 *)curr_sb->head;
402 s->protocol = cp->protocol;
403 s->cport = cp->cport;
404 s->vport = cp->vport;
405 s->dport = cp->dport;
406 s->caddr = cp->caddr.ip;
407 s->vaddr = cp->vaddr.ip;
408 s->daddr = cp->daddr.ip;
409 s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED);
410 s->state = htons(cp->state);
411 if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
412 struct ip_vs_sync_conn_options *opt =
413 (struct ip_vs_sync_conn_options *)&s[1];
414 memcpy(opt, &cp->in_seq, sizeof(*opt));
419 curr_sb->head += len;
421 /* check if there is a space for next one */
422 if (curr_sb->head+FULL_CONN_SIZE > curr_sb->end) {
423 sb_queue_tail(curr_sb);
426 spin_unlock(&curr_sb_lock);
428 /* synchronize its controller if it has */
430 ip_vs_sync_conn(cp->control);
434 * fill_param used by version 1
437 ip_vs_conn_fill_param_sync(int af, union ip_vs_sync_conn *sc,
438 struct ip_vs_conn_param *p,
439 __u8 *pe_data, unsigned int pe_data_len,
440 __u8 *pe_name, unsigned int pe_name_len)
442 #ifdef CONFIG_IP_VS_IPV6
444 ip_vs_conn_fill_param(af, sc->v6.protocol,
445 (const union nf_inet_addr *)&sc->v6.caddr,
447 (const union nf_inet_addr *)&sc->v6.vaddr,
451 ip_vs_conn_fill_param(af, sc->v4.protocol,
452 (const union nf_inet_addr *)&sc->v4.caddr,
454 (const union nf_inet_addr *)&sc->v4.vaddr,
459 char buff[IP_VS_PENAME_MAXLEN+1];
461 memcpy(buff, pe_name, pe_name_len);
463 p->pe = __ip_vs_pe_getbyname(buff);
465 IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n", buff);
469 IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n");
473 p->pe_data = kmalloc(pe_data_len, GFP_ATOMIC);
476 module_put(p->pe->module);
479 memcpy(p->pe_data, pe_data, pe_data_len);
480 p->pe_data_len = pe_data_len;
486 * Connection Add / Update.
487 * Common for version 0 and 1 reception of backup sync_conns.
491 static void ip_vs_proc_conn(struct ip_vs_conn_param *param, unsigned flags,
492 unsigned state, unsigned protocol, unsigned type,
493 const union nf_inet_addr *daddr, __be16 dport,
494 unsigned long timeout, __u32 fwmark,
495 struct ip_vs_sync_conn_options *opt,
496 struct ip_vs_protocol *pp)
498 struct ip_vs_dest *dest;
499 struct ip_vs_conn *cp;
502 if (!(flags & IP_VS_CONN_F_TEMPLATE))
503 cp = ip_vs_conn_in_get(param);
505 cp = ip_vs_ct_in_get(param);
507 if (cp && param->pe_data) /* Free pe_data */
508 kfree(param->pe_data);
511 * Find the appropriate destination for the connection.
512 * If it is not found the connection will remain unbound
515 dest = ip_vs_find_dest(type, daddr, dport, param->vaddr,
516 param->vport, protocol, fwmark);
518 /* Set the approprite ativity flag */
519 if (protocol == IPPROTO_TCP) {
520 if (state != IP_VS_TCP_S_ESTABLISHED)
521 flags |= IP_VS_CONN_F_INACTIVE;
523 flags &= ~IP_VS_CONN_F_INACTIVE;
524 } else if (protocol == IPPROTO_SCTP) {
525 if (state != IP_VS_SCTP_S_ESTABLISHED)
526 flags |= IP_VS_CONN_F_INACTIVE;
528 flags &= ~IP_VS_CONN_F_INACTIVE;
530 cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
532 atomic_dec(&dest->refcnt);
535 kfree(param->pe_data);
536 IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
539 } else if (!cp->dest) {
540 dest = ip_vs_try_bind_dest(cp);
542 atomic_dec(&dest->refcnt);
543 } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
544 (cp->state != state)) {
545 /* update active/inactive flag for the connection */
547 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
548 (state != IP_VS_TCP_S_ESTABLISHED)) {
549 atomic_dec(&dest->activeconns);
550 atomic_inc(&dest->inactconns);
551 cp->flags |= IP_VS_CONN_F_INACTIVE;
552 } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
553 (state == IP_VS_TCP_S_ESTABLISHED)) {
554 atomic_inc(&dest->activeconns);
555 atomic_dec(&dest->inactconns);
556 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
558 } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
559 (cp->state != state)) {
561 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
562 (state != IP_VS_SCTP_S_ESTABLISHED)) {
563 atomic_dec(&dest->activeconns);
564 atomic_inc(&dest->inactconns);
565 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
570 memcpy(&cp->in_seq, opt, sizeof(*opt));
571 atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
573 cp->old_state = cp->state;
575 * For Ver 0 messages style
576 * - Not possible to recover the right timeout for templates
577 * - can not find the right fwmark
578 * virtual service. If needed, we can do it for
579 * non-fwmark persistent services.
580 * Ver 1 messages style.
584 if (timeout > MAX_SCHEDULE_TIMEOUT / HZ)
585 timeout = MAX_SCHEDULE_TIMEOUT / HZ;
586 cp->timeout = timeout*HZ;
587 } else if (!(flags & IP_VS_CONN_F_TEMPLATE) && pp->timeout_table)
588 cp->timeout = pp->timeout_table[state];
590 cp->timeout = (3*60*HZ);
595 * Process received multicast message for Version 0
597 static void ip_vs_process_message_v0(const char *buffer, const size_t buflen)
599 struct ip_vs_sync_mesg *m = (struct ip_vs_sync_mesg *)buffer;
600 struct ip_vs_sync_conn_v0 *s;
601 struct ip_vs_sync_conn_options *opt;
602 struct ip_vs_protocol *pp;
603 struct ip_vs_conn_param param;
607 p = (char *)buffer + sizeof(struct ip_vs_sync_mesg);
608 for (i=0; i<m->nr_conns; i++) {
609 unsigned flags, state;
611 if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
612 IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
615 s = (struct ip_vs_sync_conn_v0 *) p;
616 flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
617 flags &= ~IP_VS_CONN_F_HASHED;
618 if (flags & IP_VS_CONN_F_SEQ_MASK) {
619 opt = (struct ip_vs_sync_conn_options *)&s[1];
621 if (p > buffer+buflen) {
622 IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n");
627 p += SIMPLE_CONN_SIZE;
630 state = ntohs(s->state);
631 if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
632 pp = ip_vs_proto_get(s->protocol);
634 IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n",
638 if (state >= pp->num_states) {
639 IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n",
644 /* protocol in templates is not used for state/timeout */
647 IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n",
653 ip_vs_conn_fill_param(AF_INET, s->protocol,
654 (const union nf_inet_addr *)&s->caddr,
656 (const union nf_inet_addr *)&s->vaddr,
659 /* Send timeout as Zero */
660 ip_vs_proc_conn(¶m, flags, state, s->protocol, AF_INET,
661 (union nf_inet_addr *)&s->daddr, s->dport,
669 static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen,
671 struct ip_vs_sync_conn_options *opt)
673 struct ip_vs_sync_conn_options *topt;
675 topt = (struct ip_vs_sync_conn_options *)p;
677 if (plen != sizeof(struct ip_vs_sync_conn_options)) {
678 IP_VS_DBG(2, "BACKUP, bogus conn options length\n");
681 if (*opt_flags & IPVS_OPT_F_SEQ_DATA) {
682 IP_VS_DBG(2, "BACKUP, conn options found twice\n");
685 ntoh_seq(&topt->in_seq, &opt->in_seq);
686 ntoh_seq(&topt->out_seq, &opt->out_seq);
687 *opt_flags |= IPVS_OPT_F_SEQ_DATA;
691 static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len,
692 __u8 **data, unsigned int maxlen,
693 __u32 *opt_flags, __u32 flag)
696 IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen);
699 if (*opt_flags & flag) {
700 IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag);
709 * Process a Version 1 sync. connection
711 static inline int ip_vs_proc_sync_conn(__u8 *p, __u8 *msg_end)
713 struct ip_vs_sync_conn_options opt;
714 union ip_vs_sync_conn *s;
715 struct ip_vs_protocol *pp;
716 struct ip_vs_conn_param param;
718 unsigned int af, state, pe_data_len=0, pe_name_len=0;
719 __u8 *pe_data=NULL, *pe_name=NULL;
723 s = (union ip_vs_sync_conn *) p;
725 if (s->v6.type & STYPE_F_INET6) {
726 #ifdef CONFIG_IP_VS_IPV6
728 p += sizeof(struct ip_vs_sync_v6);
730 IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n");
734 } else if (!s->v4.type) {
736 p += sizeof(struct ip_vs_sync_v4);
743 /* Process optional params check Type & Len. */
744 while (p < msg_end) {
753 if (!plen || ((p + plen) > msg_end))
755 /* Handle seq option p = param data */
756 switch (ptype & ~IPVS_OPT_F_PARAM) {
757 case IPVS_OPT_SEQ_DATA:
758 if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt))
762 case IPVS_OPT_PE_DATA:
763 if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data,
764 IP_VS_PEDATA_MAXLEN, &opt_flags,
769 case IPVS_OPT_PE_NAME:
770 if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name,
771 IP_VS_PENAME_MAXLEN, &opt_flags,
777 /* Param data mandatory ? */
778 if (!(ptype & IPVS_OPT_F_PARAM)) {
779 IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n",
780 ptype & ~IPVS_OPT_F_PARAM);
785 p += plen; /* Next option */
788 /* Get flags and Mask off unsupported */
789 flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK;
790 flags |= IP_VS_CONN_F_SYNC;
791 state = ntohs(s->v4.state);
793 if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
794 pp = ip_vs_proto_get(s->v4.protocol);
796 IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n",
801 if (state >= pp->num_states) {
802 IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n",
808 /* protocol in templates is not used for state/timeout */
811 IP_VS_DBG(3, "BACKUP, Invalid template state %u\n",
816 if (ip_vs_conn_fill_param_sync(af, s, ¶m,
817 pe_data, pe_data_len,
818 pe_name, pe_name_len)) {
822 /* If only IPv4, just silent skip IPv6 */
824 ip_vs_proc_conn(¶m, flags, state, s->v4.protocol, af,
825 (union nf_inet_addr *)&s->v4.daddr, s->v4.dport,
826 ntohl(s->v4.timeout), ntohl(s->v4.fwmark),
827 (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL),
829 #ifdef CONFIG_IP_VS_IPV6
831 ip_vs_proc_conn(¶m, flags, state, s->v6.protocol, af,
832 (union nf_inet_addr *)&s->v6.daddr, s->v6.dport,
833 ntohl(s->v6.timeout), ntohl(s->v6.fwmark),
834 (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL),
840 IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc);
845 * Process received multicast message and create the corresponding
846 * ip_vs_conn entries.
847 * Handles Version 0 & 1
849 static void ip_vs_process_message(__u8 *buffer, const size_t buflen)
851 struct ip_vs_sync_mesg_v2 *m2 = (struct ip_vs_sync_mesg_v2 *)buffer;
853 unsigned int i, nr_conns;
855 if (buflen < sizeof(struct ip_vs_sync_mesg)) {
856 IP_VS_DBG(2, "BACKUP, message header too short\n");
859 /* Convert size back to host byte order */
860 m2->size = ntohs(m2->size);
862 if (buflen != m2->size) {
863 IP_VS_DBG(2, "BACKUP, bogus message size\n");
866 /* SyncID sanity check */
867 if (ip_vs_backup_syncid != 0 && m2->syncid != ip_vs_backup_syncid) {
868 IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
871 /* Handle version 1 message */
872 if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0)
873 && (m2->spare == 0)) {
875 msg_end = buffer + sizeof(struct ip_vs_sync_mesg_v2);
876 nr_conns = m2->nr_conns;
878 for (i=0; i<nr_conns; i++) {
879 union ip_vs_sync_conn *s;
884 if (p + sizeof(s->v4) > buffer+buflen) {
885 IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
888 s = (union ip_vs_sync_conn *)p;
889 size = ntohs(s->v4.ver_size) & SVER_MASK;
891 /* Basic sanity checks */
892 if (msg_end > buffer+buflen) {
893 IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n");
896 if (ntohs(s->v4.ver_size) >> SVER_SHIFT) {
897 IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n",
898 ntohs(s->v4.ver_size) >> SVER_SHIFT);
901 /* Process a single sync_conn */
902 if ((retc=ip_vs_proc_sync_conn(p, msg_end)) < 0) {
903 IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n",
907 /* Make sure we have 32 bit alignment */
908 msg_end = p + ((size + 3) & ~3);
911 /* Old type of message */
912 ip_vs_process_message_v0(buffer, buflen);
919 * Setup loopback of outgoing multicasts on a sending socket
921 static void set_mcast_loop(struct sock *sk, u_char loop)
923 struct inet_sock *inet = inet_sk(sk);
925 /* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */
927 inet->mc_loop = loop ? 1 : 0;
932 * Specify TTL for outgoing multicasts on a sending socket
934 static void set_mcast_ttl(struct sock *sk, u_char ttl)
936 struct inet_sock *inet = inet_sk(sk);
938 /* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */
945 * Specifiy default interface for outgoing multicasts
947 static int set_mcast_if(struct sock *sk, char *ifname)
949 struct net_device *dev;
950 struct inet_sock *inet = inet_sk(sk);
952 if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
955 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
959 inet->mc_index = dev->ifindex;
960 /* inet->mc_addr = 0; */
968 * Set the maximum length of sync message according to the
969 * specified interface's MTU.
971 static int set_sync_mesg_maxlen(int sync_state)
973 struct net_device *dev;
976 if (sync_state == IP_VS_STATE_MASTER) {
977 if ((dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn)) == NULL)
980 num = (dev->mtu - sizeof(struct iphdr) -
981 sizeof(struct udphdr) -
982 SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
983 sync_send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
984 SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
985 IP_VS_DBG(7, "setting the maximum length of sync sending "
986 "message %d.\n", sync_send_mesg_maxlen);
987 } else if (sync_state == IP_VS_STATE_BACKUP) {
988 if ((dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn)) == NULL)
991 sync_recv_mesg_maxlen = dev->mtu -
992 sizeof(struct iphdr) - sizeof(struct udphdr);
993 IP_VS_DBG(7, "setting the maximum length of sync receiving "
994 "message %d.\n", sync_recv_mesg_maxlen);
1002 * Join a multicast group.
1003 * the group is specified by a class D multicast address 224.0.0.0/8
1004 * in the in_addr structure passed in as a parameter.
1007 join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
1009 struct ip_mreqn mreq;
1010 struct net_device *dev;
1013 memset(&mreq, 0, sizeof(mreq));
1014 memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
1016 if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
1018 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
1021 mreq.imr_ifindex = dev->ifindex;
1024 ret = ip_mc_join_group(sk, &mreq);
1031 static int bind_mcastif_addr(struct socket *sock, char *ifname)
1033 struct net_device *dev;
1035 struct sockaddr_in sin;
1037 if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
1040 addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
1042 pr_err("You probably need to specify IP address on "
1043 "multicast interface.\n");
1045 IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
1048 /* Now bind the socket with the address of multicast interface */
1049 sin.sin_family = AF_INET;
1050 sin.sin_addr.s_addr = addr;
1053 return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
1057 * Set up sending multicast socket over UDP
1059 static struct socket * make_send_sock(void)
1061 struct socket *sock;
1064 /* First create a socket */
1065 result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
1067 pr_err("Error during creation of socket; terminating\n");
1068 return ERR_PTR(result);
1071 result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn);
1073 pr_err("Error setting outbound mcast interface\n");
1077 set_mcast_loop(sock->sk, 0);
1078 set_mcast_ttl(sock->sk, 1);
1080 result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn);
1082 pr_err("Error binding address of the mcast interface\n");
1086 result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
1087 sizeof(struct sockaddr), 0);
1089 pr_err("Error connecting to the multicast addr\n");
1097 return ERR_PTR(result);
1102 * Set up receiving multicast socket over UDP
1104 static struct socket * make_receive_sock(void)
1106 struct socket *sock;
1109 /* First create a socket */
1110 result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
1112 pr_err("Error during creation of socket; terminating\n");
1113 return ERR_PTR(result);
1116 /* it is equivalent to the REUSEADDR option in user-space */
1117 sock->sk->sk_reuse = 1;
1119 result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
1120 sizeof(struct sockaddr));
1122 pr_err("Error binding to the multicast addr\n");
1126 /* join the multicast group */
1127 result = join_mcast_group(sock->sk,
1128 (struct in_addr *) &mcast_addr.sin_addr,
1129 ip_vs_backup_mcast_ifn);
1131 pr_err("Error joining to the multicast group\n");
1139 return ERR_PTR(result);
1144 ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length)
1146 struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL};
1151 iov.iov_base = (void *)buffer;
1152 iov.iov_len = length;
1154 len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length));
1161 ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg)
1167 /* Put size in network byte order */
1168 msg->size = htons(msg->size);
1170 if (ip_vs_send_async(sock, (char *)msg, msize) != msize)
1171 pr_err("ip_vs_send_async error\n");
1175 ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
1177 struct msghdr msg = {NULL,};
1183 /* Receive a packet */
1184 iov.iov_base = buffer;
1185 iov.iov_len = (size_t)buflen;
1187 len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0);
1197 static int sync_thread_master(void *data)
1199 struct ip_vs_sync_thread_data *tinfo = data;
1200 struct ip_vs_sync_buff *sb;
1202 pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
1204 ip_vs_master_mcast_ifn, ip_vs_master_syncid);
1206 while (!kthread_should_stop()) {
1207 while ((sb = sb_dequeue())) {
1208 ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
1209 ip_vs_sync_buff_release(sb);
1212 /* check if entries stay in curr_sb for 2 seconds */
1213 sb = get_curr_sync_buff(2 * HZ);
1215 ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
1216 ip_vs_sync_buff_release(sb);
1219 schedule_timeout_interruptible(HZ);
1222 /* clean up the sync_buff queue */
1223 while ((sb=sb_dequeue())) {
1224 ip_vs_sync_buff_release(sb);
1227 /* clean up the current sync_buff */
1228 if ((sb = get_curr_sync_buff(0))) {
1229 ip_vs_sync_buff_release(sb);
1232 /* release the sending multicast socket */
1233 sock_release(tinfo->sock);
1240 static int sync_thread_backup(void *data)
1242 struct ip_vs_sync_thread_data *tinfo = data;
1245 pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
1247 ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
1249 while (!kthread_should_stop()) {
1250 wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
1251 !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
1252 || kthread_should_stop());
1254 /* do we have data now? */
1255 while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
1256 len = ip_vs_receive(tinfo->sock, tinfo->buf,
1257 sync_recv_mesg_maxlen);
1259 pr_err("receiving message error\n");
1263 /* disable bottom half, because it accesses the data
1264 shared by softirq while getting/creating conns */
1266 ip_vs_process_message(tinfo->buf, len);
1271 /* release the sending multicast socket */
1272 sock_release(tinfo->sock);
1280 int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
1282 struct ip_vs_sync_thread_data *tinfo;
1283 struct task_struct **realtask, *task;
1284 struct socket *sock;
1285 char *name, *buf = NULL;
1286 int (*threadfn)(void *data);
1287 int result = -ENOMEM;
1289 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
1290 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
1291 sizeof(struct ip_vs_sync_conn_v0));
1293 if (state == IP_VS_STATE_MASTER) {
1294 if (sync_master_thread)
1297 strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
1298 sizeof(ip_vs_master_mcast_ifn));
1299 ip_vs_master_syncid = syncid;
1300 realtask = &sync_master_thread;
1301 name = "ipvs_syncmaster";
1302 threadfn = sync_thread_master;
1303 sock = make_send_sock();
1304 } else if (state == IP_VS_STATE_BACKUP) {
1305 if (sync_backup_thread)
1308 strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
1309 sizeof(ip_vs_backup_mcast_ifn));
1310 ip_vs_backup_syncid = syncid;
1311 realtask = &sync_backup_thread;
1312 name = "ipvs_syncbackup";
1313 threadfn = sync_thread_backup;
1314 sock = make_receive_sock();
1320 result = PTR_ERR(sock);
1324 set_sync_mesg_maxlen(state);
1325 if (state == IP_VS_STATE_BACKUP) {
1326 buf = kmalloc(sync_recv_mesg_maxlen, GFP_KERNEL);
1331 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
1338 task = kthread_run(threadfn, tinfo, name);
1340 result = PTR_ERR(task);
1344 /* mark as active */
1346 ip_vs_sync_state |= state;
1348 /* increase the module use count */
1349 ip_vs_use_count_inc();
1364 int stop_sync_thread(int state)
1366 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
1368 if (state == IP_VS_STATE_MASTER) {
1369 if (!sync_master_thread)
1372 pr_info("stopping master sync thread %d ...\n",
1373 task_pid_nr(sync_master_thread));
1376 * The lock synchronizes with sb_queue_tail(), so that we don't
1377 * add sync buffers to the queue, when we are already in
1378 * progress of stopping the master sync daemon.
1381 spin_lock_bh(&ip_vs_sync_lock);
1382 ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
1383 spin_unlock_bh(&ip_vs_sync_lock);
1384 kthread_stop(sync_master_thread);
1385 sync_master_thread = NULL;
1386 } else if (state == IP_VS_STATE_BACKUP) {
1387 if (!sync_backup_thread)
1390 pr_info("stopping backup sync thread %d ...\n",
1391 task_pid_nr(sync_backup_thread));
1393 ip_vs_sync_state &= ~IP_VS_STATE_BACKUP;
1394 kthread_stop(sync_backup_thread);
1395 sync_backup_thread = NULL;
1400 /* decrease the module use count */
1401 ip_vs_use_count_dec();