2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 * Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
25 #include <linux/module.h>
27 #include <linux/capability.h>
28 #include <linux/kernel.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/errno.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/socket.h>
37 #include <linux/fcntl.h>
38 #include <linux/termios.h>
39 #include <linux/sockios.h>
40 #include <linux/net.h>
42 #include <linux/slab.h>
43 #include <asm/uaccess.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
58 #include <linux/mutex.h>
59 #include <linux/vmalloc.h>
60 #include <linux/if_arp.h>
61 #include <asm/cacheflush.h>
63 #include <net/net_namespace.h>
66 #include <net/netlink.h>
68 #include "af_netlink.h"
72 unsigned long masks[0];
76 #define NETLINK_CONGESTED 0x0
79 #define NETLINK_KERNEL_SOCKET 0x1
80 #define NETLINK_RECV_PKTINFO 0x2
81 #define NETLINK_BROADCAST_SEND_ERROR 0x4
82 #define NETLINK_RECV_NO_ENOBUFS 0x8
84 static inline int netlink_is_kernel(struct sock *sk)
86 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
89 struct netlink_table *nl_table;
90 EXPORT_SYMBOL_GPL(nl_table);
92 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
94 static int netlink_dump(struct sock *sk);
95 static void netlink_skb_destructor(struct sk_buff *skb);
97 DEFINE_RWLOCK(nl_table_lock);
98 EXPORT_SYMBOL_GPL(nl_table_lock);
99 static atomic_t nl_table_users = ATOMIC_INIT(0);
101 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
103 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
105 static DEFINE_SPINLOCK(netlink_tap_lock);
106 static struct list_head netlink_tap_all __read_mostly;
108 static inline u32 netlink_group_mask(u32 group)
110 return group ? 1 << (group - 1) : 0;
113 static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
115 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
118 int netlink_add_tap(struct netlink_tap *nt)
120 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
123 spin_lock(&netlink_tap_lock);
124 list_add_rcu(&nt->list, &netlink_tap_all);
125 spin_unlock(&netlink_tap_lock);
128 __module_get(nt->module);
132 EXPORT_SYMBOL_GPL(netlink_add_tap);
134 int __netlink_remove_tap(struct netlink_tap *nt)
137 struct netlink_tap *tmp;
139 spin_lock(&netlink_tap_lock);
141 list_for_each_entry(tmp, &netlink_tap_all, list) {
143 list_del_rcu(&nt->list);
149 pr_warn("__netlink_remove_tap: %p not found\n", nt);
151 spin_unlock(&netlink_tap_lock);
153 if (found && nt->module)
154 module_put(nt->module);
156 return found ? 0 : -ENODEV;
158 EXPORT_SYMBOL_GPL(__netlink_remove_tap);
160 int netlink_remove_tap(struct netlink_tap *nt)
164 ret = __netlink_remove_tap(nt);
169 EXPORT_SYMBOL_GPL(netlink_remove_tap);
171 static bool netlink_filter_tap(const struct sk_buff *skb)
173 struct sock *sk = skb->sk;
176 /* We take the more conservative approach and
177 * whitelist socket protocols that may pass.
179 switch (sk->sk_protocol) {
181 case NETLINK_USERSOCK:
182 case NETLINK_SOCK_DIAG:
185 case NETLINK_FIB_LOOKUP:
186 case NETLINK_NETFILTER:
187 case NETLINK_GENERIC:
195 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
196 struct net_device *dev)
198 struct sk_buff *nskb;
199 struct sock *sk = skb->sk;
203 nskb = skb_clone(skb, GFP_ATOMIC);
206 nskb->protocol = htons((u16) sk->sk_protocol);
207 nskb->pkt_type = netlink_is_kernel(sk) ?
208 PACKET_KERNEL : PACKET_USER;
210 ret = dev_queue_xmit(nskb);
211 if (unlikely(ret > 0))
212 ret = net_xmit_errno(ret);
219 static void __netlink_deliver_tap(struct sk_buff *skb)
222 struct netlink_tap *tmp;
224 if (!netlink_filter_tap(skb))
227 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
228 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
234 static void netlink_deliver_tap(struct sk_buff *skb)
238 if (unlikely(!list_empty(&netlink_tap_all)))
239 __netlink_deliver_tap(skb);
244 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
247 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
248 netlink_deliver_tap(skb);
251 static void netlink_overrun(struct sock *sk)
253 struct netlink_sock *nlk = nlk_sk(sk);
255 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
256 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
257 sk->sk_err = ENOBUFS;
258 sk->sk_error_report(sk);
261 atomic_inc(&sk->sk_drops);
264 static void netlink_rcv_wake(struct sock *sk)
266 struct netlink_sock *nlk = nlk_sk(sk);
268 if (skb_queue_empty(&sk->sk_receive_queue))
269 clear_bit(NETLINK_CONGESTED, &nlk->state);
270 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
271 wake_up_interruptible(&nlk->wait);
274 #ifdef CONFIG_NETLINK_MMAP
275 static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
277 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
280 static bool netlink_rx_is_mmaped(struct sock *sk)
282 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
285 static bool netlink_tx_is_mmaped(struct sock *sk)
287 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
290 static __pure struct page *pgvec_to_page(const void *addr)
292 if (is_vmalloc_addr(addr))
293 return vmalloc_to_page(addr);
295 return virt_to_page(addr);
298 static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
302 for (i = 0; i < len; i++) {
303 if (pg_vec[i] != NULL) {
304 if (is_vmalloc_addr(pg_vec[i]))
307 free_pages((unsigned long)pg_vec[i], order);
313 static void *alloc_one_pg_vec_page(unsigned long order)
316 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
317 __GFP_NOWARN | __GFP_NORETRY;
319 buffer = (void *)__get_free_pages(gfp_flags, order);
323 buffer = vzalloc((1 << order) * PAGE_SIZE);
327 gfp_flags &= ~__GFP_NORETRY;
328 return (void *)__get_free_pages(gfp_flags, order);
331 static void **alloc_pg_vec(struct netlink_sock *nlk,
332 struct nl_mmap_req *req, unsigned int order)
334 unsigned int block_nr = req->nm_block_nr;
338 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
342 for (i = 0; i < block_nr; i++) {
343 pg_vec[i] = alloc_one_pg_vec_page(order);
344 if (pg_vec[i] == NULL)
350 free_pg_vec(pg_vec, order, block_nr);
354 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
355 bool closing, bool tx_ring)
357 struct netlink_sock *nlk = nlk_sk(sk);
358 struct netlink_ring *ring;
359 struct sk_buff_head *queue;
360 void **pg_vec = NULL;
361 unsigned int order = 0;
364 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
365 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
368 if (atomic_read(&nlk->mapped))
370 if (atomic_read(&ring->pending))
374 if (req->nm_block_nr) {
375 if (ring->pg_vec != NULL)
378 if ((int)req->nm_block_size <= 0)
380 if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
382 if (req->nm_frame_size < NL_MMAP_HDRLEN)
384 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
387 ring->frames_per_block = req->nm_block_size /
389 if (ring->frames_per_block == 0)
391 if (ring->frames_per_block * req->nm_block_nr !=
395 order = get_order(req->nm_block_size);
396 pg_vec = alloc_pg_vec(nlk, req, order);
400 if (req->nm_frame_nr)
405 mutex_lock(&nlk->pg_vec_lock);
406 if (closing || atomic_read(&nlk->mapped) == 0) {
408 spin_lock_bh(&queue->lock);
410 ring->frame_max = req->nm_frame_nr - 1;
412 ring->frame_size = req->nm_frame_size;
413 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
415 swap(ring->pg_vec_len, req->nm_block_nr);
416 swap(ring->pg_vec_order, order);
417 swap(ring->pg_vec, pg_vec);
419 __skb_queue_purge(queue);
420 spin_unlock_bh(&queue->lock);
422 WARN_ON(atomic_read(&nlk->mapped));
424 mutex_unlock(&nlk->pg_vec_lock);
427 free_pg_vec(pg_vec, order, req->nm_block_nr);
431 static void netlink_mm_open(struct vm_area_struct *vma)
433 struct file *file = vma->vm_file;
434 struct socket *sock = file->private_data;
435 struct sock *sk = sock->sk;
438 atomic_inc(&nlk_sk(sk)->mapped);
441 static void netlink_mm_close(struct vm_area_struct *vma)
443 struct file *file = vma->vm_file;
444 struct socket *sock = file->private_data;
445 struct sock *sk = sock->sk;
448 atomic_dec(&nlk_sk(sk)->mapped);
451 static const struct vm_operations_struct netlink_mmap_ops = {
452 .open = netlink_mm_open,
453 .close = netlink_mm_close,
456 static int netlink_mmap(struct file *file, struct socket *sock,
457 struct vm_area_struct *vma)
459 struct sock *sk = sock->sk;
460 struct netlink_sock *nlk = nlk_sk(sk);
461 struct netlink_ring *ring;
462 unsigned long start, size, expected;
469 mutex_lock(&nlk->pg_vec_lock);
472 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
473 if (ring->pg_vec == NULL)
475 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
481 size = vma->vm_end - vma->vm_start;
482 if (size != expected)
485 start = vma->vm_start;
486 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
487 if (ring->pg_vec == NULL)
490 for (i = 0; i < ring->pg_vec_len; i++) {
492 void *kaddr = ring->pg_vec[i];
495 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
496 page = pgvec_to_page(kaddr);
497 err = vm_insert_page(vma, start, page);
506 atomic_inc(&nlk->mapped);
507 vma->vm_ops = &netlink_mmap_ops;
510 mutex_unlock(&nlk->pg_vec_lock);
514 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
516 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
517 struct page *p_start, *p_end;
519 /* First page is flushed through netlink_{get,set}_status */
520 p_start = pgvec_to_page(hdr + PAGE_SIZE);
521 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
522 while (p_start <= p_end) {
523 flush_dcache_page(p_start);
529 static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
532 flush_dcache_page(pgvec_to_page(hdr));
533 return hdr->nm_status;
536 static void netlink_set_status(struct nl_mmap_hdr *hdr,
537 enum nl_mmap_status status)
539 hdr->nm_status = status;
540 flush_dcache_page(pgvec_to_page(hdr));
544 static struct nl_mmap_hdr *
545 __netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
547 unsigned int pg_vec_pos, frame_off;
549 pg_vec_pos = pos / ring->frames_per_block;
550 frame_off = pos % ring->frames_per_block;
552 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
555 static struct nl_mmap_hdr *
556 netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
557 enum nl_mmap_status status)
559 struct nl_mmap_hdr *hdr;
561 hdr = __netlink_lookup_frame(ring, pos);
562 if (netlink_get_status(hdr) != status)
568 static struct nl_mmap_hdr *
569 netlink_current_frame(const struct netlink_ring *ring,
570 enum nl_mmap_status status)
572 return netlink_lookup_frame(ring, ring->head, status);
575 static struct nl_mmap_hdr *
576 netlink_previous_frame(const struct netlink_ring *ring,
577 enum nl_mmap_status status)
581 prev = ring->head ? ring->head - 1 : ring->frame_max;
582 return netlink_lookup_frame(ring, prev, status);
585 static void netlink_increment_head(struct netlink_ring *ring)
587 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
590 static void netlink_forward_ring(struct netlink_ring *ring)
592 unsigned int head = ring->head, pos = head;
593 const struct nl_mmap_hdr *hdr;
596 hdr = __netlink_lookup_frame(ring, pos);
597 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
599 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
601 netlink_increment_head(ring);
602 } while (ring->head != head);
605 static bool netlink_dump_space(struct netlink_sock *nlk)
607 struct netlink_ring *ring = &nlk->rx_ring;
608 struct nl_mmap_hdr *hdr;
611 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
615 n = ring->head + ring->frame_max / 2;
616 if (n > ring->frame_max)
617 n -= ring->frame_max;
619 hdr = __netlink_lookup_frame(ring, n);
621 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
624 static unsigned int netlink_poll(struct file *file, struct socket *sock,
627 struct sock *sk = sock->sk;
628 struct netlink_sock *nlk = nlk_sk(sk);
632 if (nlk->rx_ring.pg_vec != NULL) {
633 /* Memory mapped sockets don't call recvmsg(), so flow control
634 * for dumps is performed here. A dump is allowed to continue
635 * if at least half the ring is unused.
637 while (nlk->cb_running && netlink_dump_space(nlk)) {
638 err = netlink_dump(sk);
641 sk->sk_error_report(sk);
645 netlink_rcv_wake(sk);
648 mask = datagram_poll(file, sock, wait);
650 spin_lock_bh(&sk->sk_receive_queue.lock);
651 if (nlk->rx_ring.pg_vec) {
652 netlink_forward_ring(&nlk->rx_ring);
653 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
654 mask |= POLLIN | POLLRDNORM;
656 spin_unlock_bh(&sk->sk_receive_queue.lock);
658 spin_lock_bh(&sk->sk_write_queue.lock);
659 if (nlk->tx_ring.pg_vec) {
660 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
661 mask |= POLLOUT | POLLWRNORM;
663 spin_unlock_bh(&sk->sk_write_queue.lock);
668 static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
670 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
673 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
674 struct netlink_ring *ring,
675 struct nl_mmap_hdr *hdr)
680 size = ring->frame_size - NL_MMAP_HDRLEN;
681 data = (void *)hdr + NL_MMAP_HDRLEN;
685 skb_reset_tail_pointer(skb);
686 skb->end = skb->tail + size;
689 skb->destructor = netlink_skb_destructor;
690 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
691 NETLINK_CB(skb).sk = sk;
694 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
695 u32 dst_portid, u32 dst_group,
696 struct sock_iocb *siocb)
698 struct netlink_sock *nlk = nlk_sk(sk);
699 struct netlink_ring *ring;
700 struct nl_mmap_hdr *hdr;
704 int err = 0, len = 0;
706 /* Netlink messages are validated by the receiver before processing.
707 * In order to avoid userspace changing the contents of the message
708 * after validation, the socket and the ring may only be used by a
709 * single process, otherwise we fall back to copying.
711 if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
712 atomic_read(&nlk->mapped) > 1)
715 mutex_lock(&nlk->pg_vec_lock);
717 ring = &nlk->tx_ring;
718 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
721 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
723 if (!(msg->msg_flags & MSG_DONTWAIT) &&
724 atomic_read(&nlk->tx_ring.pending))
728 if (hdr->nm_len > maxlen) {
733 netlink_frame_flush_dcache(hdr);
735 if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
736 skb = alloc_skb_head(GFP_KERNEL);
742 netlink_ring_setup_skb(skb, sk, ring, hdr);
743 NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
744 __skb_put(skb, hdr->nm_len);
745 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
746 atomic_inc(&ring->pending);
748 skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
753 __skb_put(skb, hdr->nm_len);
754 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
755 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
758 netlink_increment_head(ring);
760 NETLINK_CB(skb).portid = nlk->portid;
761 NETLINK_CB(skb).dst_group = dst_group;
762 NETLINK_CB(skb).creds = siocb->scm->creds;
764 err = security_netlink_send(sk, skb);
770 if (unlikely(dst_group)) {
771 atomic_inc(&skb->users);
772 netlink_broadcast(sk, skb, dst_portid, dst_group,
775 err = netlink_unicast(sk, skb, dst_portid,
776 msg->msg_flags & MSG_DONTWAIT);
781 } while (hdr != NULL ||
782 (!(msg->msg_flags & MSG_DONTWAIT) &&
783 atomic_read(&nlk->tx_ring.pending)));
788 mutex_unlock(&nlk->pg_vec_lock);
792 static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
794 struct nl_mmap_hdr *hdr;
796 hdr = netlink_mmap_hdr(skb);
797 hdr->nm_len = skb->len;
798 hdr->nm_group = NETLINK_CB(skb).dst_group;
799 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
800 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
801 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
802 netlink_frame_flush_dcache(hdr);
803 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
805 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
809 static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
811 struct netlink_sock *nlk = nlk_sk(sk);
812 struct netlink_ring *ring = &nlk->rx_ring;
813 struct nl_mmap_hdr *hdr;
815 spin_lock_bh(&sk->sk_receive_queue.lock);
816 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
818 spin_unlock_bh(&sk->sk_receive_queue.lock);
823 netlink_increment_head(ring);
824 __skb_queue_tail(&sk->sk_receive_queue, skb);
825 spin_unlock_bh(&sk->sk_receive_queue.lock);
827 hdr->nm_len = skb->len;
828 hdr->nm_group = NETLINK_CB(skb).dst_group;
829 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
830 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
831 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
832 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
835 #else /* CONFIG_NETLINK_MMAP */
836 #define netlink_skb_is_mmaped(skb) false
837 #define netlink_rx_is_mmaped(sk) false
838 #define netlink_tx_is_mmaped(sk) false
839 #define netlink_mmap sock_no_mmap
840 #define netlink_poll datagram_poll
841 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
842 #endif /* CONFIG_NETLINK_MMAP */
844 static void netlink_skb_destructor(struct sk_buff *skb)
846 #ifdef CONFIG_NETLINK_MMAP
847 struct nl_mmap_hdr *hdr;
848 struct netlink_ring *ring;
851 /* If a packet from the kernel to userspace was freed because of an
852 * error without being delivered to userspace, the kernel must reset
853 * the status. In the direction userspace to kernel, the status is
854 * always reset here after the packet was processed and freed.
856 if (netlink_skb_is_mmaped(skb)) {
857 hdr = netlink_mmap_hdr(skb);
858 sk = NETLINK_CB(skb).sk;
860 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
861 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
862 ring = &nlk_sk(sk)->tx_ring;
864 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
866 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
868 ring = &nlk_sk(sk)->rx_ring;
871 WARN_ON(atomic_read(&ring->pending) == 0);
872 atomic_dec(&ring->pending);
878 if (is_vmalloc_addr(skb->head)) {
880 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
889 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
891 WARN_ON(skb->sk != NULL);
893 skb->destructor = netlink_skb_destructor;
894 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
895 sk_mem_charge(sk, skb->truesize);
898 static void netlink_sock_destruct(struct sock *sk)
900 struct netlink_sock *nlk = nlk_sk(sk);
902 if (nlk->cb_running) {
904 nlk->cb.done(&nlk->cb);
906 module_put(nlk->cb.module);
907 kfree_skb(nlk->cb.skb);
910 skb_queue_purge(&sk->sk_receive_queue);
911 #ifdef CONFIG_NETLINK_MMAP
913 struct nl_mmap_req req;
915 memset(&req, 0, sizeof(req));
916 if (nlk->rx_ring.pg_vec)
917 netlink_set_ring(sk, &req, true, false);
918 memset(&req, 0, sizeof(req));
919 if (nlk->tx_ring.pg_vec)
920 netlink_set_ring(sk, &req, true, true);
922 #endif /* CONFIG_NETLINK_MMAP */
924 if (!sock_flag(sk, SOCK_DEAD)) {
925 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
929 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
930 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
931 WARN_ON(nlk_sk(sk)->groups);
934 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
935 * SMP. Look, when several writers sleep and reader wakes them up, all but one
936 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
937 * this, _but_ remember, it adds useless work on UP machines.
940 void netlink_table_grab(void)
941 __acquires(nl_table_lock)
945 write_lock_irq(&nl_table_lock);
947 if (atomic_read(&nl_table_users)) {
948 DECLARE_WAITQUEUE(wait, current);
950 add_wait_queue_exclusive(&nl_table_wait, &wait);
952 set_current_state(TASK_UNINTERRUPTIBLE);
953 if (atomic_read(&nl_table_users) == 0)
955 write_unlock_irq(&nl_table_lock);
957 write_lock_irq(&nl_table_lock);
960 __set_current_state(TASK_RUNNING);
961 remove_wait_queue(&nl_table_wait, &wait);
965 void netlink_table_ungrab(void)
966 __releases(nl_table_lock)
968 write_unlock_irq(&nl_table_lock);
969 wake_up(&nl_table_wait);
973 netlink_lock_table(void)
975 /* read_lock() synchronizes us to netlink_table_grab */
977 read_lock(&nl_table_lock);
978 atomic_inc(&nl_table_users);
979 read_unlock(&nl_table_lock);
983 netlink_unlock_table(void)
985 if (atomic_dec_and_test(&nl_table_users))
986 wake_up(&nl_table_wait);
989 static bool netlink_compare(struct net *net, struct sock *sk)
991 return net_eq(sock_net(sk), net);
994 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
996 struct netlink_table *table = &nl_table[protocol];
997 struct nl_portid_hash *hash = &table->hash;
998 struct hlist_head *head;
1001 read_lock(&nl_table_lock);
1002 head = nl_portid_hashfn(hash, portid);
1003 sk_for_each(sk, head) {
1004 if (table->compare(net, sk) &&
1005 (nlk_sk(sk)->portid == portid)) {
1012 read_unlock(&nl_table_lock);
1016 static struct hlist_head *nl_portid_hash_zalloc(size_t size)
1018 if (size <= PAGE_SIZE)
1019 return kzalloc(size, GFP_ATOMIC);
1021 return (struct hlist_head *)
1022 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
1026 static void nl_portid_hash_free(struct hlist_head *table, size_t size)
1028 if (size <= PAGE_SIZE)
1031 free_pages((unsigned long)table, get_order(size));
1034 static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
1036 unsigned int omask, mask, shift;
1038 struct hlist_head *otable, *table;
1041 omask = mask = hash->mask;
1042 osize = size = (mask + 1) * sizeof(*table);
1043 shift = hash->shift;
1046 if (++shift > hash->max_shift)
1048 mask = mask * 2 + 1;
1052 table = nl_portid_hash_zalloc(size);
1056 otable = hash->table;
1057 hash->table = table;
1059 hash->shift = shift;
1060 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
1062 for (i = 0; i <= omask; i++) {
1064 struct hlist_node *tmp;
1066 sk_for_each_safe(sk, tmp, &otable[i])
1067 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
1070 nl_portid_hash_free(otable, osize);
1071 hash->rehash_time = jiffies + 10 * 60 * HZ;
1075 static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
1077 int avg = hash->entries >> hash->shift;
1079 if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
1082 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
1083 nl_portid_hash_rehash(hash, 0);
1090 static const struct proto_ops netlink_ops;
1093 netlink_update_listeners(struct sock *sk)
1095 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1098 struct listeners *listeners;
1100 listeners = nl_deref_protected(tbl->listeners);
1104 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
1106 sk_for_each_bound(sk, &tbl->mc_list) {
1107 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1108 mask |= nlk_sk(sk)->groups[i];
1110 listeners->masks[i] = mask;
1112 /* this function is only called with the netlink table "grabbed", which
1113 * makes sure updates are visible before bind or setsockopt return. */
1116 static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
1118 struct netlink_table *table = &nl_table[sk->sk_protocol];
1119 struct nl_portid_hash *hash = &table->hash;
1120 struct hlist_head *head;
1121 int err = -EADDRINUSE;
1125 netlink_table_grab();
1126 head = nl_portid_hashfn(hash, portid);
1128 sk_for_each(osk, head) {
1129 if (table->compare(net, osk) &&
1130 (nlk_sk(osk)->portid == portid))
1138 if (nlk_sk(sk)->portid)
1142 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
1145 if (len && nl_portid_hash_dilute(hash, len))
1146 head = nl_portid_hashfn(hash, portid);
1148 nlk_sk(sk)->portid = portid;
1149 sk_add_node(sk, head);
1153 netlink_table_ungrab();
1157 static void netlink_remove(struct sock *sk)
1159 netlink_table_grab();
1160 if (sk_del_node_init(sk))
1161 nl_table[sk->sk_protocol].hash.entries--;
1162 if (nlk_sk(sk)->subscriptions)
1163 __sk_del_bind_node(sk);
1164 netlink_table_ungrab();
1167 static struct proto netlink_proto = {
1169 .owner = THIS_MODULE,
1170 .obj_size = sizeof(struct netlink_sock),
1173 static int __netlink_create(struct net *net, struct socket *sock,
1174 struct mutex *cb_mutex, int protocol)
1177 struct netlink_sock *nlk;
1179 sock->ops = &netlink_ops;
1181 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
1185 sock_init_data(sock, sk);
1189 nlk->cb_mutex = cb_mutex;
1191 nlk->cb_mutex = &nlk->cb_def_mutex;
1192 mutex_init(nlk->cb_mutex);
1194 init_waitqueue_head(&nlk->wait);
1195 #ifdef CONFIG_NETLINK_MMAP
1196 mutex_init(&nlk->pg_vec_lock);
1199 sk->sk_destruct = netlink_sock_destruct;
1200 sk->sk_protocol = protocol;
1204 static int netlink_create(struct net *net, struct socket *sock, int protocol,
1207 struct module *module = NULL;
1208 struct mutex *cb_mutex;
1209 struct netlink_sock *nlk;
1210 void (*bind)(int group);
1213 sock->state = SS_UNCONNECTED;
1215 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1216 return -ESOCKTNOSUPPORT;
1218 if (protocol < 0 || protocol >= MAX_LINKS)
1219 return -EPROTONOSUPPORT;
1221 netlink_lock_table();
1222 #ifdef CONFIG_MODULES
1223 if (!nl_table[protocol].registered) {
1224 netlink_unlock_table();
1225 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
1226 netlink_lock_table();
1229 if (nl_table[protocol].registered &&
1230 try_module_get(nl_table[protocol].module))
1231 module = nl_table[protocol].module;
1233 err = -EPROTONOSUPPORT;
1234 cb_mutex = nl_table[protocol].cb_mutex;
1235 bind = nl_table[protocol].bind;
1236 netlink_unlock_table();
1241 err = __netlink_create(net, sock, cb_mutex, protocol);
1246 sock_prot_inuse_add(net, &netlink_proto, 1);
1249 nlk = nlk_sk(sock->sk);
1250 nlk->module = module;
1251 nlk->netlink_bind = bind;
1260 static int netlink_release(struct socket *sock)
1262 struct sock *sk = sock->sk;
1263 struct netlink_sock *nlk;
1273 * OK. Socket is unlinked, any packets that arrive now
1278 wake_up_interruptible_all(&nlk->wait);
1280 skb_queue_purge(&sk->sk_write_queue);
1283 struct netlink_notify n = {
1284 .net = sock_net(sk),
1285 .protocol = sk->sk_protocol,
1286 .portid = nlk->portid,
1288 atomic_notifier_call_chain(&netlink_chain,
1289 NETLINK_URELEASE, &n);
1292 module_put(nlk->module);
1294 netlink_table_grab();
1295 if (netlink_is_kernel(sk)) {
1296 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1297 if (--nl_table[sk->sk_protocol].registered == 0) {
1298 struct listeners *old;
1300 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1301 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1302 kfree_rcu(old, rcu);
1303 nl_table[sk->sk_protocol].module = NULL;
1304 nl_table[sk->sk_protocol].bind = NULL;
1305 nl_table[sk->sk_protocol].flags = 0;
1306 nl_table[sk->sk_protocol].registered = 0;
1308 } else if (nlk->subscriptions) {
1309 netlink_update_listeners(sk);
1311 netlink_table_ungrab();
1317 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
1323 static int netlink_autobind(struct socket *sock)
1325 struct sock *sk = sock->sk;
1326 struct net *net = sock_net(sk);
1327 struct netlink_table *table = &nl_table[sk->sk_protocol];
1328 struct nl_portid_hash *hash = &table->hash;
1329 struct hlist_head *head;
1331 s32 portid = task_tgid_vnr(current);
1333 static s32 rover = -4097;
1337 netlink_table_grab();
1338 head = nl_portid_hashfn(hash, portid);
1339 sk_for_each(osk, head) {
1340 if (!table->compare(net, osk))
1342 if (nlk_sk(osk)->portid == portid) {
1343 /* Bind collision, search negative portid values. */
1347 netlink_table_ungrab();
1351 netlink_table_ungrab();
1353 err = netlink_insert(sk, net, portid);
1354 if (err == -EADDRINUSE)
1357 /* If 2 threads race to autobind, that is fine. */
1364 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
1366 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
1367 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
1371 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1373 struct netlink_sock *nlk = nlk_sk(sk);
1375 if (nlk->subscriptions && !subscriptions)
1376 __sk_del_bind_node(sk);
1377 else if (!nlk->subscriptions && subscriptions)
1378 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1379 nlk->subscriptions = subscriptions;
1382 static int netlink_realloc_groups(struct sock *sk)
1384 struct netlink_sock *nlk = nlk_sk(sk);
1385 unsigned int groups;
1386 unsigned long *new_groups;
1389 netlink_table_grab();
1391 groups = nl_table[sk->sk_protocol].groups;
1392 if (!nl_table[sk->sk_protocol].registered) {
1397 if (nlk->ngroups >= groups)
1400 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1401 if (new_groups == NULL) {
1405 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
1406 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1408 nlk->groups = new_groups;
1409 nlk->ngroups = groups;
1411 netlink_table_ungrab();
1415 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1418 struct sock *sk = sock->sk;
1419 struct net *net = sock_net(sk);
1420 struct netlink_sock *nlk = nlk_sk(sk);
1421 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1424 if (addr_len < sizeof(struct sockaddr_nl))
1427 if (nladdr->nl_family != AF_NETLINK)
1430 /* Only superuser is allowed to listen multicasts */
1431 if (nladdr->nl_groups) {
1432 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
1434 err = netlink_realloc_groups(sk);
1440 if (nladdr->nl_pid != nlk->portid)
1443 err = nladdr->nl_pid ?
1444 netlink_insert(sk, net, nladdr->nl_pid) :
1445 netlink_autobind(sock);
1450 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1453 netlink_table_grab();
1454 netlink_update_subscriptions(sk, nlk->subscriptions +
1455 hweight32(nladdr->nl_groups) -
1456 hweight32(nlk->groups[0]));
1457 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
1458 netlink_update_listeners(sk);
1459 netlink_table_ungrab();
1461 if (nlk->netlink_bind && nlk->groups[0]) {
1464 for (i=0; i<nlk->ngroups; i++) {
1465 if (test_bit(i, nlk->groups))
1466 nlk->netlink_bind(i);
1473 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1474 int alen, int flags)
1477 struct sock *sk = sock->sk;
1478 struct netlink_sock *nlk = nlk_sk(sk);
1479 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1481 if (alen < sizeof(addr->sa_family))
1484 if (addr->sa_family == AF_UNSPEC) {
1485 sk->sk_state = NETLINK_UNCONNECTED;
1486 nlk->dst_portid = 0;
1490 if (addr->sa_family != AF_NETLINK)
1493 /* Only superuser is allowed to send multicasts */
1494 if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
1498 err = netlink_autobind(sock);
1501 sk->sk_state = NETLINK_CONNECTED;
1502 nlk->dst_portid = nladdr->nl_pid;
1503 nlk->dst_group = ffs(nladdr->nl_groups);
1509 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1510 int *addr_len, int peer)
1512 struct sock *sk = sock->sk;
1513 struct netlink_sock *nlk = nlk_sk(sk);
1514 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1516 nladdr->nl_family = AF_NETLINK;
1518 *addr_len = sizeof(*nladdr);
1521 nladdr->nl_pid = nlk->dst_portid;
1522 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1524 nladdr->nl_pid = nlk->portid;
1525 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1530 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1533 struct netlink_sock *nlk;
1535 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1537 return ERR_PTR(-ECONNREFUSED);
1539 /* Don't bother queuing skb if kernel socket has no input function */
1541 if (sock->sk_state == NETLINK_CONNECTED &&
1542 nlk->dst_portid != nlk_sk(ssk)->portid) {
1544 return ERR_PTR(-ECONNREFUSED);
1549 struct sock *netlink_getsockbyfilp(struct file *filp)
1551 struct inode *inode = file_inode(filp);
1554 if (!S_ISSOCK(inode->i_mode))
1555 return ERR_PTR(-ENOTSOCK);
1557 sock = SOCKET_I(inode)->sk;
1558 if (sock->sk_family != AF_NETLINK)
1559 return ERR_PTR(-EINVAL);
1565 static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1568 struct sk_buff *skb;
1571 if (size <= NLMSG_GOODSIZE || broadcast)
1572 return alloc_skb(size, GFP_KERNEL);
1574 size = SKB_DATA_ALIGN(size) +
1575 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1577 data = vmalloc(size);
1581 skb = build_skb(data, size);
1586 skb->destructor = netlink_skb_destructor;
1593 * Attach a skb to a netlink socket.
1594 * The caller must hold a reference to the destination socket. On error, the
1595 * reference is dropped. The skb is not send to the destination, just all
1596 * all error checks are performed and memory in the queue is reserved.
1598 * < 0: error. skb freed, reference to sock dropped.
1600 * 1: repeat lookup - reference dropped while waiting for socket memory.
1602 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1603 long *timeo, struct sock *ssk)
1605 struct netlink_sock *nlk;
1609 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1610 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1611 !netlink_skb_is_mmaped(skb)) {
1612 DECLARE_WAITQUEUE(wait, current);
1614 if (!ssk || netlink_is_kernel(ssk))
1615 netlink_overrun(sk);
1621 __set_current_state(TASK_INTERRUPTIBLE);
1622 add_wait_queue(&nlk->wait, &wait);
1624 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1625 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1626 !sock_flag(sk, SOCK_DEAD))
1627 *timeo = schedule_timeout(*timeo);
1629 __set_current_state(TASK_RUNNING);
1630 remove_wait_queue(&nlk->wait, &wait);
1633 if (signal_pending(current)) {
1635 return sock_intr_errno(*timeo);
1639 netlink_skb_set_owner_r(skb, sk);
1643 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1647 netlink_deliver_tap(skb);
1649 #ifdef CONFIG_NETLINK_MMAP
1650 if (netlink_skb_is_mmaped(skb))
1651 netlink_queue_mmaped_skb(sk, skb);
1652 else if (netlink_rx_is_mmaped(sk))
1653 netlink_ring_set_copied(sk, skb);
1655 #endif /* CONFIG_NETLINK_MMAP */
1656 skb_queue_tail(&sk->sk_receive_queue, skb);
1657 sk->sk_data_ready(sk, len);
1661 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1663 int len = __netlink_sendskb(sk, skb);
1669 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1675 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1679 WARN_ON(skb->sk != NULL);
1680 if (netlink_skb_is_mmaped(skb))
1683 delta = skb->end - skb->tail;
1684 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1687 if (skb_shared(skb)) {
1688 struct sk_buff *nskb = skb_clone(skb, allocation);
1695 if (!pskb_expand_head(skb, 0, -delta, allocation))
1696 skb->truesize -= delta;
1701 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1705 struct netlink_sock *nlk = nlk_sk(sk);
1707 ret = -ECONNREFUSED;
1708 if (nlk->netlink_rcv != NULL) {
1710 netlink_skb_set_owner_r(skb, sk);
1711 NETLINK_CB(skb).sk = ssk;
1712 netlink_deliver_tap_kernel(sk, ssk, skb);
1713 nlk->netlink_rcv(skb);
1722 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1723 u32 portid, int nonblock)
1729 skb = netlink_trim(skb, gfp_any());
1731 timeo = sock_sndtimeo(ssk, nonblock);
1733 sk = netlink_getsockbyportid(ssk, portid);
1738 if (netlink_is_kernel(sk))
1739 return netlink_unicast_kernel(sk, skb, ssk);
1741 if (sk_filter(sk, skb)) {
1748 err = netlink_attachskb(sk, skb, &timeo, ssk);
1754 return netlink_sendskb(sk, skb);
1756 EXPORT_SYMBOL(netlink_unicast);
1758 struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1759 u32 dst_portid, gfp_t gfp_mask)
1761 #ifdef CONFIG_NETLINK_MMAP
1762 struct sock *sk = NULL;
1763 struct sk_buff *skb;
1764 struct netlink_ring *ring;
1765 struct nl_mmap_hdr *hdr;
1766 unsigned int maxlen;
1768 sk = netlink_getsockbyportid(ssk, dst_portid);
1772 ring = &nlk_sk(sk)->rx_ring;
1773 /* fast-path without atomic ops for common case: non-mmaped receiver */
1774 if (ring->pg_vec == NULL)
1777 skb = alloc_skb_head(gfp_mask);
1781 spin_lock_bh(&sk->sk_receive_queue.lock);
1782 /* check again under lock */
1783 if (ring->pg_vec == NULL)
1786 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1790 netlink_forward_ring(ring);
1791 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1794 netlink_ring_setup_skb(skb, sk, ring, hdr);
1795 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1796 atomic_inc(&ring->pending);
1797 netlink_increment_head(ring);
1799 spin_unlock_bh(&sk->sk_receive_queue.lock);
1804 spin_unlock_bh(&sk->sk_receive_queue.lock);
1805 netlink_overrun(sk);
1812 spin_unlock_bh(&sk->sk_receive_queue.lock);
1817 return alloc_skb(size, gfp_mask);
1819 EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1821 int netlink_has_listeners(struct sock *sk, unsigned int group)
1824 struct listeners *listeners;
1826 BUG_ON(!netlink_is_kernel(sk));
1829 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1831 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1832 res = test_bit(group - 1, listeners->masks);
1838 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1840 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1842 struct netlink_sock *nlk = nlk_sk(sk);
1844 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1845 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
1846 netlink_skb_set_owner_r(skb, sk);
1847 __netlink_sendskb(sk, skb);
1848 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1853 struct netlink_broadcast_data {
1854 struct sock *exclude_sk;
1859 int delivery_failure;
1863 struct sk_buff *skb, *skb2;
1864 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1868 static int do_one_broadcast(struct sock *sk,
1869 struct netlink_broadcast_data *p)
1871 struct netlink_sock *nlk = nlk_sk(sk);
1874 if (p->exclude_sk == sk)
1877 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1878 !test_bit(p->group - 1, nlk->groups))
1881 if (!net_eq(sock_net(sk), p->net))
1885 netlink_overrun(sk);
1890 if (p->skb2 == NULL) {
1891 if (skb_shared(p->skb)) {
1892 p->skb2 = skb_clone(p->skb, p->allocation);
1894 p->skb2 = skb_get(p->skb);
1896 * skb ownership may have been set when
1897 * delivered to a previous socket.
1899 skb_orphan(p->skb2);
1902 if (p->skb2 == NULL) {
1903 netlink_overrun(sk);
1904 /* Clone failed. Notify ALL listeners. */
1906 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1907 p->delivery_failure = 1;
1908 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1911 } else if (sk_filter(sk, p->skb2)) {
1914 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1915 netlink_overrun(sk);
1916 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1917 p->delivery_failure = 1;
1919 p->congested |= val;
1929 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1930 u32 group, gfp_t allocation,
1931 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1934 struct net *net = sock_net(ssk);
1935 struct netlink_broadcast_data info;
1938 skb = netlink_trim(skb, allocation);
1940 info.exclude_sk = ssk;
1942 info.portid = portid;
1945 info.delivery_failure = 0;
1948 info.allocation = allocation;
1951 info.tx_filter = filter;
1952 info.tx_data = filter_data;
1954 /* While we sleep in clone, do not allow to change socket list */
1956 netlink_lock_table();
1958 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1959 do_one_broadcast(sk, &info);
1963 netlink_unlock_table();
1965 if (info.delivery_failure) {
1966 kfree_skb(info.skb2);
1969 consume_skb(info.skb2);
1971 if (info.delivered) {
1972 if (info.congested && (allocation & __GFP_WAIT))
1978 EXPORT_SYMBOL(netlink_broadcast_filtered);
1980 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1981 u32 group, gfp_t allocation)
1983 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1986 EXPORT_SYMBOL(netlink_broadcast);
1988 struct netlink_set_err_data {
1989 struct sock *exclude_sk;
1995 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1997 struct netlink_sock *nlk = nlk_sk(sk);
2000 if (sk == p->exclude_sk)
2003 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
2006 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
2007 !test_bit(p->group - 1, nlk->groups))
2010 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2015 sk->sk_err = p->code;
2016 sk->sk_error_report(sk);
2022 * netlink_set_err - report error to broadcast listeners
2023 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2024 * @portid: the PORTID of a process that we want to skip (if any)
2025 * @group: the broadcast group that will notice the error
2026 * @code: error code, must be negative (as usual in kernelspace)
2028 * This function returns the number of broadcast listeners that have set the
2029 * NETLINK_RECV_NO_ENOBUFS socket option.
2031 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
2033 struct netlink_set_err_data info;
2037 info.exclude_sk = ssk;
2038 info.portid = portid;
2040 /* sk->sk_err wants a positive error value */
2043 read_lock(&nl_table_lock);
2045 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2046 ret += do_one_set_err(sk, &info);
2048 read_unlock(&nl_table_lock);
2051 EXPORT_SYMBOL(netlink_set_err);
2053 /* must be called with netlink table grabbed */
2054 static void netlink_update_socket_mc(struct netlink_sock *nlk,
2058 int old, new = !!is_new, subscriptions;
2060 old = test_bit(group - 1, nlk->groups);
2061 subscriptions = nlk->subscriptions - old + new;
2063 __set_bit(group - 1, nlk->groups);
2065 __clear_bit(group - 1, nlk->groups);
2066 netlink_update_subscriptions(&nlk->sk, subscriptions);
2067 netlink_update_listeners(&nlk->sk);
2070 static int netlink_setsockopt(struct socket *sock, int level, int optname,
2071 char __user *optval, unsigned int optlen)
2073 struct sock *sk = sock->sk;
2074 struct netlink_sock *nlk = nlk_sk(sk);
2075 unsigned int val = 0;
2078 if (level != SOL_NETLINK)
2079 return -ENOPROTOOPT;
2081 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2082 optlen >= sizeof(int) &&
2083 get_user(val, (unsigned int __user *)optval))
2087 case NETLINK_PKTINFO:
2089 nlk->flags |= NETLINK_RECV_PKTINFO;
2091 nlk->flags &= ~NETLINK_RECV_PKTINFO;
2094 case NETLINK_ADD_MEMBERSHIP:
2095 case NETLINK_DROP_MEMBERSHIP: {
2096 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
2098 err = netlink_realloc_groups(sk);
2101 if (!val || val - 1 >= nlk->ngroups)
2103 netlink_table_grab();
2104 netlink_update_socket_mc(nlk, val,
2105 optname == NETLINK_ADD_MEMBERSHIP);
2106 netlink_table_ungrab();
2108 if (nlk->netlink_bind)
2109 nlk->netlink_bind(val);
2114 case NETLINK_BROADCAST_ERROR:
2116 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2118 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2121 case NETLINK_NO_ENOBUFS:
2123 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
2124 clear_bit(NETLINK_CONGESTED, &nlk->state);
2125 wake_up_interruptible(&nlk->wait);
2127 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
2131 #ifdef CONFIG_NETLINK_MMAP
2132 case NETLINK_RX_RING:
2133 case NETLINK_TX_RING: {
2134 struct nl_mmap_req req;
2136 /* Rings might consume more memory than queue limits, require
2139 if (!capable(CAP_NET_ADMIN))
2141 if (optlen < sizeof(req))
2143 if (copy_from_user(&req, optval, sizeof(req)))
2145 err = netlink_set_ring(sk, &req, false,
2146 optname == NETLINK_TX_RING);
2149 #endif /* CONFIG_NETLINK_MMAP */
2156 static int netlink_getsockopt(struct socket *sock, int level, int optname,
2157 char __user *optval, int __user *optlen)
2159 struct sock *sk = sock->sk;
2160 struct netlink_sock *nlk = nlk_sk(sk);
2163 if (level != SOL_NETLINK)
2164 return -ENOPROTOOPT;
2166 if (get_user(len, optlen))
2172 case NETLINK_PKTINFO:
2173 if (len < sizeof(int))
2176 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
2177 if (put_user(len, optlen) ||
2178 put_user(val, optval))
2182 case NETLINK_BROADCAST_ERROR:
2183 if (len < sizeof(int))
2186 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2187 if (put_user(len, optlen) ||
2188 put_user(val, optval))
2192 case NETLINK_NO_ENOBUFS:
2193 if (len < sizeof(int))
2196 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2197 if (put_user(len, optlen) ||
2198 put_user(val, optval))
2208 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2210 struct nl_pktinfo info;
2212 info.group = NETLINK_CB(skb).dst_group;
2213 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2216 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2217 struct msghdr *msg, size_t len)
2219 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2220 struct sock *sk = sock->sk;
2221 struct netlink_sock *nlk = nlk_sk(sk);
2222 struct sockaddr_nl *addr = msg->msg_name;
2225 struct sk_buff *skb;
2227 struct scm_cookie scm;
2229 if (msg->msg_flags&MSG_OOB)
2232 if (NULL == siocb->scm)
2235 err = scm_send(sock, msg, siocb->scm, true);
2239 if (msg->msg_namelen) {
2241 if (addr->nl_family != AF_NETLINK)
2243 dst_portid = addr->nl_pid;
2244 dst_group = ffs(addr->nl_groups);
2246 if ((dst_group || dst_portid) &&
2247 !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
2250 dst_portid = nlk->dst_portid;
2251 dst_group = nlk->dst_group;
2255 err = netlink_autobind(sock);
2260 if (netlink_tx_is_mmaped(sk) &&
2261 msg->msg_iov->iov_base == NULL) {
2262 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2268 if (len > sk->sk_sndbuf - 32)
2271 skb = netlink_alloc_large_skb(len, dst_group);
2275 NETLINK_CB(skb).portid = nlk->portid;
2276 NETLINK_CB(skb).dst_group = dst_group;
2277 NETLINK_CB(skb).creds = siocb->scm->creds;
2280 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2285 err = security_netlink_send(sk, skb);
2292 atomic_inc(&skb->users);
2293 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
2295 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
2298 scm_destroy(siocb->scm);
2302 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2303 struct msghdr *msg, size_t len,
2306 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2307 struct scm_cookie scm;
2308 struct sock *sk = sock->sk;
2309 struct netlink_sock *nlk = nlk_sk(sk);
2310 int noblock = flags&MSG_DONTWAIT;
2312 struct sk_buff *skb, *data_skb;
2320 skb = skb_recv_datagram(sk, flags, noblock, &err);
2326 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2327 if (unlikely(skb_shinfo(skb)->frag_list)) {
2329 * If this skb has a frag_list, then here that means that we
2330 * will have to use the frag_list skb's data for compat tasks
2331 * and the regular skb's data for normal (non-compat) tasks.
2333 * If we need to send the compat skb, assign it to the
2334 * 'data_skb' variable so that it will be used below for data
2335 * copying. We keep 'skb' for everything else, including
2336 * freeing both later.
2338 if (flags & MSG_CMSG_COMPAT)
2339 data_skb = skb_shinfo(skb)->frag_list;
2343 copied = data_skb->len;
2345 msg->msg_flags |= MSG_TRUNC;
2349 skb_reset_transport_header(data_skb);
2350 err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
2352 if (msg->msg_name) {
2353 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
2354 addr->nl_family = AF_NETLINK;
2356 addr->nl_pid = NETLINK_CB(skb).portid;
2357 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
2358 msg->msg_namelen = sizeof(*addr);
2361 if (nlk->flags & NETLINK_RECV_PKTINFO)
2362 netlink_cmsg_recv_pktinfo(msg, skb);
2364 if (NULL == siocb->scm) {
2365 memset(&scm, 0, sizeof(scm));
2368 siocb->scm->creds = *NETLINK_CREDS(skb);
2369 if (flags & MSG_TRUNC)
2370 copied = data_skb->len;
2372 skb_free_datagram(sk, skb);
2374 if (nlk->cb_running &&
2375 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2376 ret = netlink_dump(sk);
2379 sk->sk_error_report(sk);
2383 scm_recv(sock, msg, siocb->scm, flags);
2385 netlink_rcv_wake(sk);
2386 return err ? : copied;
2389 static void netlink_data_ready(struct sock *sk, int len)
2395 * We export these functions to other modules. They provide a
2396 * complete set of kernel non-blocking support for message
2401 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2402 struct netlink_kernel_cfg *cfg)
2404 struct socket *sock;
2406 struct netlink_sock *nlk;
2407 struct listeners *listeners = NULL;
2408 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2409 unsigned int groups;
2413 if (unit < 0 || unit >= MAX_LINKS)
2416 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2420 * We have to just have a reference on the net from sk, but don't
2421 * get_net it. Besides, we cannot get and then put the net here.
2422 * So we create one inside init_net and the move it to net.
2425 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2426 goto out_sock_release_nosk;
2429 sk_change_net(sk, net);
2431 if (!cfg || cfg->groups < 32)
2434 groups = cfg->groups;
2436 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2438 goto out_sock_release;
2440 sk->sk_data_ready = netlink_data_ready;
2441 if (cfg && cfg->input)
2442 nlk_sk(sk)->netlink_rcv = cfg->input;
2444 if (netlink_insert(sk, net, 0))
2445 goto out_sock_release;
2448 nlk->flags |= NETLINK_KERNEL_SOCKET;
2450 netlink_table_grab();
2451 if (!nl_table[unit].registered) {
2452 nl_table[unit].groups = groups;
2453 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2454 nl_table[unit].cb_mutex = cb_mutex;
2455 nl_table[unit].module = module;
2457 nl_table[unit].bind = cfg->bind;
2458 nl_table[unit].flags = cfg->flags;
2460 nl_table[unit].compare = cfg->compare;
2462 nl_table[unit].registered = 1;
2465 nl_table[unit].registered++;
2467 netlink_table_ungrab();
2472 netlink_kernel_release(sk);
2475 out_sock_release_nosk:
2479 EXPORT_SYMBOL(__netlink_kernel_create);
2482 netlink_kernel_release(struct sock *sk)
2484 sk_release_kernel(sk);
2486 EXPORT_SYMBOL(netlink_kernel_release);
2488 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2490 struct listeners *new, *old;
2491 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2496 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2497 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2500 old = nl_deref_protected(tbl->listeners);
2501 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2502 rcu_assign_pointer(tbl->listeners, new);
2504 kfree_rcu(old, rcu);
2506 tbl->groups = groups;
2512 * netlink_change_ngroups - change number of multicast groups
2514 * This changes the number of multicast groups that are available
2515 * on a certain netlink family. Note that it is not possible to
2516 * change the number of groups to below 32. Also note that it does
2517 * not implicitly call netlink_clear_multicast_users() when the
2518 * number of groups is reduced.
2520 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2521 * @groups: The new number of groups.
2523 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2527 netlink_table_grab();
2528 err = __netlink_change_ngroups(sk, groups);
2529 netlink_table_ungrab();
2534 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2537 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2539 sk_for_each_bound(sk, &tbl->mc_list)
2540 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2544 * netlink_clear_multicast_users - kick off multicast listeners
2546 * This function removes all listeners from the given group.
2547 * @ksk: The kernel netlink socket, as returned by
2548 * netlink_kernel_create().
2549 * @group: The multicast group to clear.
2551 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2553 netlink_table_grab();
2554 __netlink_clear_multicast_users(ksk, group);
2555 netlink_table_ungrab();
2559 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2561 struct nlmsghdr *nlh;
2562 int size = nlmsg_msg_size(len);
2564 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
2565 nlh->nlmsg_type = type;
2566 nlh->nlmsg_len = size;
2567 nlh->nlmsg_flags = flags;
2568 nlh->nlmsg_pid = portid;
2569 nlh->nlmsg_seq = seq;
2570 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2571 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2574 EXPORT_SYMBOL(__nlmsg_put);
2577 * It looks a bit ugly.
2578 * It would be better to create kernel thread.
2581 static int netlink_dump(struct sock *sk)
2583 struct netlink_sock *nlk = nlk_sk(sk);
2584 struct netlink_callback *cb;
2585 struct sk_buff *skb = NULL;
2586 struct nlmsghdr *nlh;
2587 int len, err = -ENOBUFS;
2590 mutex_lock(nlk->cb_mutex);
2591 if (!nlk->cb_running) {
2597 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2599 if (!netlink_rx_is_mmaped(sk) &&
2600 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2602 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL);
2605 netlink_skb_set_owner_r(skb, sk);
2607 len = cb->dump(skb, cb);
2610 mutex_unlock(nlk->cb_mutex);
2612 if (sk_filter(sk, skb))
2615 __netlink_sendskb(sk, skb);
2619 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2623 nl_dump_check_consistent(cb, nlh);
2625 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2627 if (sk_filter(sk, skb))
2630 __netlink_sendskb(sk, skb);
2635 nlk->cb_running = false;
2636 mutex_unlock(nlk->cb_mutex);
2637 module_put(cb->module);
2638 consume_skb(cb->skb);
2642 mutex_unlock(nlk->cb_mutex);
2647 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2648 const struct nlmsghdr *nlh,
2649 struct netlink_dump_control *control)
2651 struct netlink_callback *cb;
2653 struct netlink_sock *nlk;
2656 /* Memory mapped dump requests need to be copied to avoid looping
2657 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2658 * a reference to the skb.
2660 if (netlink_skb_is_mmaped(skb)) {
2661 skb = skb_copy(skb, GFP_KERNEL);
2665 atomic_inc(&skb->users);
2667 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2669 ret = -ECONNREFUSED;
2674 mutex_lock(nlk->cb_mutex);
2675 /* A dump is in progress... */
2676 if (nlk->cb_running) {
2680 /* add reference of module which cb->dump belongs to */
2681 if (!try_module_get(control->module)) {
2682 ret = -EPROTONOSUPPORT;
2687 memset(cb, 0, sizeof(*cb));
2688 cb->dump = control->dump;
2689 cb->done = control->done;
2691 cb->data = control->data;
2692 cb->module = control->module;
2693 cb->min_dump_alloc = control->min_dump_alloc;
2696 nlk->cb_running = true;
2698 mutex_unlock(nlk->cb_mutex);
2700 ret = netlink_dump(sk);
2706 /* We successfully started a dump, by returning -EINTR we
2707 * signal not to send ACK even if it was requested.
2713 mutex_unlock(nlk->cb_mutex);
2718 EXPORT_SYMBOL(__netlink_dump_start);
2720 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2722 struct sk_buff *skb;
2723 struct nlmsghdr *rep;
2724 struct nlmsgerr *errmsg;
2725 size_t payload = sizeof(*errmsg);
2727 /* error messages get the original request appened */
2729 payload += nlmsg_len(nlh);
2731 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2732 NETLINK_CB(in_skb).portid, GFP_KERNEL);
2736 sk = netlink_lookup(sock_net(in_skb->sk),
2737 in_skb->sk->sk_protocol,
2738 NETLINK_CB(in_skb).portid);
2740 sk->sk_err = ENOBUFS;
2741 sk->sk_error_report(sk);
2747 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2748 NLMSG_ERROR, payload, 0);
2749 errmsg = nlmsg_data(rep);
2750 errmsg->error = err;
2751 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
2752 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2754 EXPORT_SYMBOL(netlink_ack);
2756 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2759 struct nlmsghdr *nlh;
2762 while (skb->len >= nlmsg_total_size(0)) {
2765 nlh = nlmsg_hdr(skb);
2768 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2771 /* Only requests are handled by the kernel */
2772 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2775 /* Skip control messages */
2776 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2784 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2785 netlink_ack(skb, nlh, err);
2788 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2789 if (msglen > skb->len)
2791 skb_pull(skb, msglen);
2796 EXPORT_SYMBOL(netlink_rcv_skb);
2799 * nlmsg_notify - send a notification netlink message
2800 * @sk: netlink socket to use
2801 * @skb: notification message
2802 * @portid: destination netlink portid for reports or 0
2803 * @group: destination multicast group or 0
2804 * @report: 1 to report back, 0 to disable
2805 * @flags: allocation flags
2807 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2808 unsigned int group, int report, gfp_t flags)
2813 int exclude_portid = 0;
2816 atomic_inc(&skb->users);
2817 exclude_portid = portid;
2820 /* errors reported via destination sk->sk_err, but propagate
2821 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2822 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2828 err2 = nlmsg_unicast(sk, skb, portid);
2829 if (!err || err == -ESRCH)
2835 EXPORT_SYMBOL(nlmsg_notify);
2837 #ifdef CONFIG_PROC_FS
2838 struct nl_seq_iter {
2839 struct seq_net_private p;
2844 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2846 struct nl_seq_iter *iter = seq->private;
2851 for (i = 0; i < MAX_LINKS; i++) {
2852 struct nl_portid_hash *hash = &nl_table[i].hash;
2854 for (j = 0; j <= hash->mask; j++) {
2855 sk_for_each(s, &hash->table[j]) {
2856 if (sock_net(s) != seq_file_net(seq))
2870 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
2871 __acquires(nl_table_lock)
2873 read_lock(&nl_table_lock);
2874 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2877 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2880 struct nl_seq_iter *iter;
2886 if (v == SEQ_START_TOKEN)
2887 return netlink_seq_socket_idx(seq, 0);
2889 net = seq_file_net(seq);
2890 iter = seq->private;
2894 } while (s && !nl_table[s->sk_protocol].compare(net, s));
2899 j = iter->hash_idx + 1;
2902 struct nl_portid_hash *hash = &nl_table[i].hash;
2904 for (; j <= hash->mask; j++) {
2905 s = sk_head(&hash->table[j]);
2907 while (s && !nl_table[s->sk_protocol].compare(net, s))
2917 } while (++i < MAX_LINKS);
2922 static void netlink_seq_stop(struct seq_file *seq, void *v)
2923 __releases(nl_table_lock)
2925 read_unlock(&nl_table_lock);
2929 static int netlink_seq_show(struct seq_file *seq, void *v)
2931 if (v == SEQ_START_TOKEN) {
2933 "sk Eth Pid Groups "
2934 "Rmem Wmem Dump Locks Drops Inode\n");
2937 struct netlink_sock *nlk = nlk_sk(s);
2939 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
2943 nlk->groups ? (u32)nlk->groups[0] : 0,
2944 sk_rmem_alloc_get(s),
2945 sk_wmem_alloc_get(s),
2947 atomic_read(&s->sk_refcnt),
2948 atomic_read(&s->sk_drops),
2956 static const struct seq_operations netlink_seq_ops = {
2957 .start = netlink_seq_start,
2958 .next = netlink_seq_next,
2959 .stop = netlink_seq_stop,
2960 .show = netlink_seq_show,
2964 static int netlink_seq_open(struct inode *inode, struct file *file)
2966 return seq_open_net(inode, file, &netlink_seq_ops,
2967 sizeof(struct nl_seq_iter));
2970 static const struct file_operations netlink_seq_fops = {
2971 .owner = THIS_MODULE,
2972 .open = netlink_seq_open,
2974 .llseek = seq_lseek,
2975 .release = seq_release_net,
2980 int netlink_register_notifier(struct notifier_block *nb)
2982 return atomic_notifier_chain_register(&netlink_chain, nb);
2984 EXPORT_SYMBOL(netlink_register_notifier);
2986 int netlink_unregister_notifier(struct notifier_block *nb)
2988 return atomic_notifier_chain_unregister(&netlink_chain, nb);
2990 EXPORT_SYMBOL(netlink_unregister_notifier);
2992 static const struct proto_ops netlink_ops = {
2993 .family = PF_NETLINK,
2994 .owner = THIS_MODULE,
2995 .release = netlink_release,
2996 .bind = netlink_bind,
2997 .connect = netlink_connect,
2998 .socketpair = sock_no_socketpair,
2999 .accept = sock_no_accept,
3000 .getname = netlink_getname,
3001 .poll = netlink_poll,
3002 .ioctl = sock_no_ioctl,
3003 .listen = sock_no_listen,
3004 .shutdown = sock_no_shutdown,
3005 .setsockopt = netlink_setsockopt,
3006 .getsockopt = netlink_getsockopt,
3007 .sendmsg = netlink_sendmsg,
3008 .recvmsg = netlink_recvmsg,
3009 .mmap = netlink_mmap,
3010 .sendpage = sock_no_sendpage,
3013 static const struct net_proto_family netlink_family_ops = {
3014 .family = PF_NETLINK,
3015 .create = netlink_create,
3016 .owner = THIS_MODULE, /* for consistency 8) */
3019 static int __net_init netlink_net_init(struct net *net)
3021 #ifdef CONFIG_PROC_FS
3022 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
3028 static void __net_exit netlink_net_exit(struct net *net)
3030 #ifdef CONFIG_PROC_FS
3031 remove_proc_entry("netlink", net->proc_net);
3035 static void __init netlink_add_usersock_entry(void)
3037 struct listeners *listeners;
3040 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
3042 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
3044 netlink_table_grab();
3046 nl_table[NETLINK_USERSOCK].groups = groups;
3047 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
3048 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3049 nl_table[NETLINK_USERSOCK].registered = 1;
3050 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
3052 netlink_table_ungrab();
3055 static struct pernet_operations __net_initdata netlink_net_ops = {
3056 .init = netlink_net_init,
3057 .exit = netlink_net_exit,
3060 static int __init netlink_proto_init(void)
3063 unsigned long limit;
3065 int err = proto_register(&netlink_proto, 0);
3070 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
3072 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
3076 if (totalram_pages >= (128 * 1024))
3077 limit = totalram_pages >> (21 - PAGE_SHIFT);
3079 limit = totalram_pages >> (23 - PAGE_SHIFT);
3081 order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
3082 limit = (1UL << order) / sizeof(struct hlist_head);
3083 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
3085 for (i = 0; i < MAX_LINKS; i++) {
3086 struct nl_portid_hash *hash = &nl_table[i].hash;
3088 hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
3091 nl_portid_hash_free(nl_table[i].hash.table,
3092 1 * sizeof(*hash->table));
3096 hash->max_shift = order;
3099 hash->rehash_time = jiffies;
3101 nl_table[i].compare = netlink_compare;
3104 INIT_LIST_HEAD(&netlink_tap_all);
3106 netlink_add_usersock_entry();
3108 sock_register(&netlink_family_ops);
3109 register_pernet_subsys(&netlink_net_ops);
3110 /* The netlink device handler may be needed early. */
3115 panic("netlink_init: Cannot allocate nl_table\n");
3118 core_initcall(netlink_proto_init);