2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
118 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119 static DEFINE_SPINLOCK(unix_table_lock);
120 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
132 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
134 scm->secid = *UNIXSID(skb);
137 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142 #endif /* CONFIG_SECURITY_NETWORK */
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate spin lock.
150 static inline unsigned unix_hash_fold(__wsum n)
152 unsigned hash = (__force unsigned)n;
155 return hash&(UNIX_HASH_SIZE-1);
158 #define unix_peer(sk) (unix_sk(sk)->peer)
160 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
162 return unix_peer(osk) == sk;
165 static inline int unix_may_send(struct sock *sk, struct sock *osk)
167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
170 static inline int unix_recvq_full(struct sock const *sk)
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
175 static struct sock *unix_peer_get(struct sock *s)
183 unix_state_unlock(s);
187 static inline void unix_release_addr(struct unix_address *addr)
189 if (atomic_dec_and_test(&addr->refcnt))
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
200 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
206 if (sunaddr->sun_path[0]) {
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
214 ((char *)sunaddr)[len] = 0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
223 static void __unix_remove_socket(struct sock *sk)
225 sk_del_node_init(sk);
228 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
230 WARN_ON(!sk_unhashed(sk));
231 sk_add_node(sk, list);
234 static inline void unix_remove_socket(struct sock *sk)
236 spin_lock(&unix_table_lock);
237 __unix_remove_socket(sk);
238 spin_unlock(&unix_table_lock);
241 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
243 spin_lock(&unix_table_lock);
244 __unix_insert_socket(list, sk);
245 spin_unlock(&unix_table_lock);
248 static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
250 int len, int type, unsigned hash)
253 struct hlist_node *node;
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
258 if (!net_eq(sock_net(s), net))
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
270 static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
277 spin_lock(&unix_table_lock);
278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
281 spin_unlock(&unix_table_lock);
285 static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
288 struct hlist_node *node;
290 spin_lock(&unix_table_lock);
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
295 if (!net_eq(sock_net(s), net))
298 if (dentry && dentry->d_inode == i) {
305 spin_unlock(&unix_table_lock);
309 static inline int unix_writable(struct sock *sk)
311 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
314 static void unix_write_space(struct sock *sk)
316 struct socket_wq *wq;
319 if (unix_writable(sk)) {
320 wq = rcu_dereference(sk->sk_wq);
321 if (wq_has_sleeper(wq))
322 wake_up_interruptible_sync(&wq->wait);
323 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
328 /* When dgram socket disconnects (or changes its peer), we clear its receive
329 * queue of packets arrived from previous peer. First, it allows to do
330 * flow control based only on wmem_alloc; second, sk connected to peer
331 * may receive messages only from that peer. */
332 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
334 if (!skb_queue_empty(&sk->sk_receive_queue)) {
335 skb_queue_purge(&sk->sk_receive_queue);
336 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
338 /* If one link of bidirectional dgram pipe is disconnected,
339 * we signal error. Messages are lost. Do not make this,
340 * when peer was not connected to us.
342 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
343 other->sk_err = ECONNRESET;
344 other->sk_error_report(other);
349 static void unix_sock_destructor(struct sock *sk)
351 struct unix_sock *u = unix_sk(sk);
353 skb_queue_purge(&sk->sk_receive_queue);
355 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
356 WARN_ON(!sk_unhashed(sk));
357 WARN_ON(sk->sk_socket);
358 if (!sock_flag(sk, SOCK_DEAD)) {
359 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
364 unix_release_addr(u->addr);
366 atomic_dec(&unix_nr_socks);
368 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
370 #ifdef UNIX_REFCNT_DEBUG
371 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk,
372 atomic_read(&unix_nr_socks));
376 static int unix_release_sock(struct sock *sk, int embrion)
378 struct unix_sock *u = unix_sk(sk);
379 struct dentry *dentry;
380 struct vfsmount *mnt;
385 unix_remove_socket(sk);
390 sk->sk_shutdown = SHUTDOWN_MASK;
395 state = sk->sk_state;
396 sk->sk_state = TCP_CLOSE;
397 unix_state_unlock(sk);
399 wake_up_interruptible_all(&u->peer_wait);
401 skpair = unix_peer(sk);
403 if (skpair != NULL) {
404 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
405 unix_state_lock(skpair);
407 skpair->sk_shutdown = SHUTDOWN_MASK;
408 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
409 skpair->sk_err = ECONNRESET;
410 unix_state_unlock(skpair);
411 skpair->sk_state_change(skpair);
412 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
414 sock_put(skpair); /* It may now die */
415 unix_peer(sk) = NULL;
418 /* Try to flush out this socket. Throw out buffers at least */
420 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
421 if (state == TCP_LISTEN)
422 unix_release_sock(skb->sk, 1);
423 /* passed fds are erased in the kfree_skb hook */
434 /* ---- Socket is dead now and most probably destroyed ---- */
437 * Fixme: BSD difference: In BSD all sockets connected to use get
438 * ECONNRESET and we die on the spot. In Linux we behave
439 * like files and pipes do and wait for the last
442 * Can't we simply set sock->err?
444 * What the above comment does talk about? --ANK(980817)
447 if (unix_tot_inflight)
448 unix_gc(); /* Garbage collect fds */
453 static int unix_listen(struct socket *sock, int backlog)
456 struct sock *sk = sock->sk;
457 struct unix_sock *u = unix_sk(sk);
460 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
461 goto out; /* Only stream/seqpacket sockets accept */
464 goto out; /* No listens on an unbound socket */
466 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
468 if (backlog > sk->sk_max_ack_backlog)
469 wake_up_interruptible_all(&u->peer_wait);
470 sk->sk_max_ack_backlog = backlog;
471 sk->sk_state = TCP_LISTEN;
472 /* set credentials so connect can copy them */
473 sk->sk_peercred.pid = task_tgid_vnr(current);
474 current_euid_egid(&sk->sk_peercred.uid, &sk->sk_peercred.gid);
478 unix_state_unlock(sk);
483 static int unix_release(struct socket *);
484 static int unix_bind(struct socket *, struct sockaddr *, int);
485 static int unix_stream_connect(struct socket *, struct sockaddr *,
486 int addr_len, int flags);
487 static int unix_socketpair(struct socket *, struct socket *);
488 static int unix_accept(struct socket *, struct socket *, int);
489 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
490 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
491 static unsigned int unix_dgram_poll(struct file *, struct socket *,
493 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
494 static int unix_shutdown(struct socket *, int);
495 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
496 struct msghdr *, size_t);
497 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
498 struct msghdr *, size_t, int);
499 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
500 struct msghdr *, size_t);
501 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
502 struct msghdr *, size_t, int);
503 static int unix_dgram_connect(struct socket *, struct sockaddr *,
505 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
506 struct msghdr *, size_t);
508 static const struct proto_ops unix_stream_ops = {
510 .owner = THIS_MODULE,
511 .release = unix_release,
513 .connect = unix_stream_connect,
514 .socketpair = unix_socketpair,
515 .accept = unix_accept,
516 .getname = unix_getname,
519 .listen = unix_listen,
520 .shutdown = unix_shutdown,
521 .setsockopt = sock_no_setsockopt,
522 .getsockopt = sock_no_getsockopt,
523 .sendmsg = unix_stream_sendmsg,
524 .recvmsg = unix_stream_recvmsg,
525 .mmap = sock_no_mmap,
526 .sendpage = sock_no_sendpage,
529 static const struct proto_ops unix_dgram_ops = {
531 .owner = THIS_MODULE,
532 .release = unix_release,
534 .connect = unix_dgram_connect,
535 .socketpair = unix_socketpair,
536 .accept = sock_no_accept,
537 .getname = unix_getname,
538 .poll = unix_dgram_poll,
540 .listen = sock_no_listen,
541 .shutdown = unix_shutdown,
542 .setsockopt = sock_no_setsockopt,
543 .getsockopt = sock_no_getsockopt,
544 .sendmsg = unix_dgram_sendmsg,
545 .recvmsg = unix_dgram_recvmsg,
546 .mmap = sock_no_mmap,
547 .sendpage = sock_no_sendpage,
550 static const struct proto_ops unix_seqpacket_ops = {
552 .owner = THIS_MODULE,
553 .release = unix_release,
555 .connect = unix_stream_connect,
556 .socketpair = unix_socketpair,
557 .accept = unix_accept,
558 .getname = unix_getname,
559 .poll = unix_dgram_poll,
561 .listen = unix_listen,
562 .shutdown = unix_shutdown,
563 .setsockopt = sock_no_setsockopt,
564 .getsockopt = sock_no_getsockopt,
565 .sendmsg = unix_seqpacket_sendmsg,
566 .recvmsg = unix_dgram_recvmsg,
567 .mmap = sock_no_mmap,
568 .sendpage = sock_no_sendpage,
571 static struct proto unix_proto = {
573 .owner = THIS_MODULE,
574 .obj_size = sizeof(struct unix_sock),
578 * AF_UNIX sockets do not interact with hardware, hence they
579 * dont trigger interrupts - so it's safe for them to have
580 * bh-unsafe locking for their sk_receive_queue.lock. Split off
581 * this special lock-class by reinitializing the spinlock key:
583 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
585 static struct sock *unix_create1(struct net *net, struct socket *sock)
587 struct sock *sk = NULL;
590 atomic_inc(&unix_nr_socks);
591 if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
594 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
598 sock_init_data(sock, sk);
599 lockdep_set_class(&sk->sk_receive_queue.lock,
600 &af_unix_sk_receive_queue_lock_key);
602 sk->sk_write_space = unix_write_space;
603 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
604 sk->sk_destruct = unix_sock_destructor;
608 spin_lock_init(&u->lock);
609 atomic_long_set(&u->inflight, 0);
610 INIT_LIST_HEAD(&u->link);
611 mutex_init(&u->readlock); /* single task reading lock */
612 init_waitqueue_head(&u->peer_wait);
613 unix_insert_socket(unix_sockets_unbound, sk);
616 atomic_dec(&unix_nr_socks);
619 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
625 static int unix_create(struct net *net, struct socket *sock, int protocol,
628 if (protocol && protocol != PF_UNIX)
629 return -EPROTONOSUPPORT;
631 sock->state = SS_UNCONNECTED;
633 switch (sock->type) {
635 sock->ops = &unix_stream_ops;
638 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
642 sock->type = SOCK_DGRAM;
644 sock->ops = &unix_dgram_ops;
647 sock->ops = &unix_seqpacket_ops;
650 return -ESOCKTNOSUPPORT;
653 return unix_create1(net, sock) ? 0 : -ENOMEM;
656 static int unix_release(struct socket *sock)
658 struct sock *sk = sock->sk;
665 return unix_release_sock(sk, 0);
668 static int unix_autobind(struct socket *sock)
670 struct sock *sk = sock->sk;
671 struct net *net = sock_net(sk);
672 struct unix_sock *u = unix_sk(sk);
673 static u32 ordernum = 1;
674 struct unix_address *addr;
676 unsigned int retries = 0;
678 mutex_lock(&u->readlock);
685 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
689 addr->name->sun_family = AF_UNIX;
690 atomic_set(&addr->refcnt, 1);
693 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
694 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
696 spin_lock(&unix_table_lock);
697 ordernum = (ordernum+1)&0xFFFFF;
699 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
701 spin_unlock(&unix_table_lock);
703 * __unix_find_socket_byname() may take long time if many names
704 * are already in use.
707 /* Give up if all names seems to be in use. */
708 if (retries++ == 0xFFFFF) {
715 addr->hash ^= sk->sk_type;
717 __unix_remove_socket(sk);
719 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
720 spin_unlock(&unix_table_lock);
723 out: mutex_unlock(&u->readlock);
727 static struct sock *unix_find_other(struct net *net,
728 struct sockaddr_un *sunname, int len,
729 int type, unsigned hash, int *error)
735 if (sunname->sun_path[0]) {
737 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
740 inode = path.dentry->d_inode;
741 err = inode_permission(inode, MAY_WRITE);
746 if (!S_ISSOCK(inode->i_mode))
748 u = unix_find_socket_byinode(net, inode);
752 if (u->sk_type == type)
753 touch_atime(path.mnt, path.dentry);
758 if (u->sk_type != type) {
764 u = unix_find_socket_byname(net, sunname, len, type, hash);
766 struct dentry *dentry;
767 dentry = unix_sk(u)->dentry;
769 touch_atime(unix_sk(u)->mnt, dentry);
783 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
785 struct sock *sk = sock->sk;
786 struct net *net = sock_net(sk);
787 struct unix_sock *u = unix_sk(sk);
788 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
789 struct dentry *dentry = NULL;
793 struct unix_address *addr;
794 struct hlist_head *list;
797 if (sunaddr->sun_family != AF_UNIX)
800 if (addr_len == sizeof(short)) {
801 err = unix_autobind(sock);
805 err = unix_mkname(sunaddr, addr_len, &hash);
810 mutex_lock(&u->readlock);
817 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
821 memcpy(addr->name, sunaddr, addr_len);
822 addr->len = addr_len;
823 addr->hash = hash ^ sk->sk_type;
824 atomic_set(&addr->refcnt, 1);
826 if (sunaddr->sun_path[0]) {
830 * Get the parent directory, calculate the hash for last
833 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
835 goto out_mknod_parent;
837 dentry = lookup_create(&nd, 0);
838 err = PTR_ERR(dentry);
840 goto out_mknod_unlock;
843 * All right, let's create it.
846 (SOCK_INODE(sock)->i_mode & ~current_umask());
847 err = mnt_want_write(nd.path.mnt);
850 err = security_path_mknod(&nd.path, dentry, mode, 0);
852 goto out_mknod_drop_write;
853 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
854 out_mknod_drop_write:
855 mnt_drop_write(nd.path.mnt);
858 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
859 dput(nd.path.dentry);
860 nd.path.dentry = dentry;
862 addr->hash = UNIX_HASH_SIZE;
865 spin_lock(&unix_table_lock);
867 if (!sunaddr->sun_path[0]) {
869 if (__unix_find_socket_byname(net, sunaddr, addr_len,
870 sk->sk_type, hash)) {
871 unix_release_addr(addr);
875 list = &unix_socket_table[addr->hash];
877 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
878 u->dentry = nd.path.dentry;
879 u->mnt = nd.path.mnt;
883 __unix_remove_socket(sk);
885 __unix_insert_socket(list, sk);
888 spin_unlock(&unix_table_lock);
890 mutex_unlock(&u->readlock);
897 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
902 unix_release_addr(addr);
906 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
908 if (unlikely(sk1 == sk2) || !sk2) {
909 unix_state_lock(sk1);
913 unix_state_lock(sk1);
914 unix_state_lock_nested(sk2);
916 unix_state_lock(sk2);
917 unix_state_lock_nested(sk1);
921 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
923 if (unlikely(sk1 == sk2) || !sk2) {
924 unix_state_unlock(sk1);
927 unix_state_unlock(sk1);
928 unix_state_unlock(sk2);
931 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
934 struct sock *sk = sock->sk;
935 struct net *net = sock_net(sk);
936 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
941 if (addr->sa_family != AF_UNSPEC) {
942 err = unix_mkname(sunaddr, alen, &hash);
947 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
948 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
952 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
956 unix_state_double_lock(sk, other);
958 /* Apparently VFS overslept socket death. Retry. */
959 if (sock_flag(other, SOCK_DEAD)) {
960 unix_state_double_unlock(sk, other);
966 if (!unix_may_send(sk, other))
969 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
975 * 1003.1g breaking connected state with AF_UNSPEC
978 unix_state_double_lock(sk, other);
982 * If it was connected, reconnect.
985 struct sock *old_peer = unix_peer(sk);
986 unix_peer(sk) = other;
987 unix_state_double_unlock(sk, other);
989 if (other != old_peer)
990 unix_dgram_disconnected(sk, old_peer);
993 unix_peer(sk) = other;
994 unix_state_double_unlock(sk, other);
999 unix_state_double_unlock(sk, other);
1005 static long unix_wait_for_peer(struct sock *other, long timeo)
1007 struct unix_sock *u = unix_sk(other);
1011 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1013 sched = !sock_flag(other, SOCK_DEAD) &&
1014 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1015 unix_recvq_full(other);
1017 unix_state_unlock(other);
1020 timeo = schedule_timeout(timeo);
1022 finish_wait(&u->peer_wait, &wait);
1026 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1027 int addr_len, int flags)
1029 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1030 struct sock *sk = sock->sk;
1031 struct net *net = sock_net(sk);
1032 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1033 struct sock *newsk = NULL;
1034 struct sock *other = NULL;
1035 struct sk_buff *skb = NULL;
1041 err = unix_mkname(sunaddr, addr_len, &hash);
1046 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1047 (err = unix_autobind(sock)) != 0)
1050 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1052 /* First of all allocate resources.
1053 If we will make it after state is locked,
1054 we will have to recheck all again in any case.
1059 /* create new sock for complete connection */
1060 newsk = unix_create1(sock_net(sk), NULL);
1064 /* Allocate skb for sending to listening sock */
1065 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1070 /* Find listening sock. */
1071 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1075 /* Latch state of peer */
1076 unix_state_lock(other);
1078 /* Apparently VFS overslept socket death. Retry. */
1079 if (sock_flag(other, SOCK_DEAD)) {
1080 unix_state_unlock(other);
1085 err = -ECONNREFUSED;
1086 if (other->sk_state != TCP_LISTEN)
1088 if (other->sk_shutdown & RCV_SHUTDOWN)
1091 if (unix_recvq_full(other)) {
1096 timeo = unix_wait_for_peer(other, timeo);
1098 err = sock_intr_errno(timeo);
1099 if (signal_pending(current))
1107 It is tricky place. We need to grab write lock and cannot
1108 drop lock on peer. It is dangerous because deadlock is
1109 possible. Connect to self case and simultaneous
1110 attempt to connect are eliminated by checking socket
1111 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1112 check this before attempt to grab lock.
1114 Well, and we have to recheck the state after socket locked.
1120 /* This is ok... continue with connect */
1122 case TCP_ESTABLISHED:
1123 /* Socket is already connected */
1131 unix_state_lock_nested(sk);
1133 if (sk->sk_state != st) {
1134 unix_state_unlock(sk);
1135 unix_state_unlock(other);
1140 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1142 unix_state_unlock(sk);
1146 /* The way is open! Fastly set all the necessary fields... */
1149 unix_peer(newsk) = sk;
1150 newsk->sk_state = TCP_ESTABLISHED;
1151 newsk->sk_type = sk->sk_type;
1152 newsk->sk_peercred.pid = task_tgid_vnr(current);
1153 current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
1154 newu = unix_sk(newsk);
1155 newsk->sk_wq = &newu->peer_wq;
1156 otheru = unix_sk(other);
1158 /* copy address information from listening to new sock*/
1160 atomic_inc(&otheru->addr->refcnt);
1161 newu->addr = otheru->addr;
1163 if (otheru->dentry) {
1164 newu->dentry = dget(otheru->dentry);
1165 newu->mnt = mntget(otheru->mnt);
1168 /* Set credentials */
1169 sk->sk_peercred = other->sk_peercred;
1171 sock->state = SS_CONNECTED;
1172 sk->sk_state = TCP_ESTABLISHED;
1175 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1176 unix_peer(sk) = newsk;
1178 unix_state_unlock(sk);
1180 /* take ten and and send info to listening sock */
1181 spin_lock(&other->sk_receive_queue.lock);
1182 __skb_queue_tail(&other->sk_receive_queue, skb);
1183 spin_unlock(&other->sk_receive_queue.lock);
1184 unix_state_unlock(other);
1185 other->sk_data_ready(other, 0);
1191 unix_state_unlock(other);
1196 unix_release_sock(newsk, 0);
1202 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1204 struct sock *ska = socka->sk, *skb = sockb->sk;
1206 /* Join our sockets back to back */
1209 unix_peer(ska) = skb;
1210 unix_peer(skb) = ska;
1211 ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current);
1212 current_euid_egid(&skb->sk_peercred.uid, &skb->sk_peercred.gid);
1213 ska->sk_peercred.uid = skb->sk_peercred.uid;
1214 ska->sk_peercred.gid = skb->sk_peercred.gid;
1216 if (ska->sk_type != SOCK_DGRAM) {
1217 ska->sk_state = TCP_ESTABLISHED;
1218 skb->sk_state = TCP_ESTABLISHED;
1219 socka->state = SS_CONNECTED;
1220 sockb->state = SS_CONNECTED;
1225 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1227 struct sock *sk = sock->sk;
1229 struct sk_buff *skb;
1233 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1237 if (sk->sk_state != TCP_LISTEN)
1240 /* If socket state is TCP_LISTEN it cannot change (for now...),
1241 * so that no locks are necessary.
1244 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1246 /* This means receive shutdown. */
1253 skb_free_datagram(sk, skb);
1254 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1256 /* attach accepted sock to socket */
1257 unix_state_lock(tsk);
1258 newsock->state = SS_CONNECTED;
1259 sock_graft(tsk, newsock);
1260 unix_state_unlock(tsk);
1268 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1270 struct sock *sk = sock->sk;
1271 struct unix_sock *u;
1272 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1276 sk = unix_peer_get(sk);
1287 unix_state_lock(sk);
1289 sunaddr->sun_family = AF_UNIX;
1290 sunaddr->sun_path[0] = 0;
1291 *uaddr_len = sizeof(short);
1293 struct unix_address *addr = u->addr;
1295 *uaddr_len = addr->len;
1296 memcpy(sunaddr, addr->name, *uaddr_len);
1298 unix_state_unlock(sk);
1304 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1308 scm->fp = UNIXCB(skb).fp;
1309 UNIXCB(skb).fp = NULL;
1311 for (i = scm->fp->count-1; i >= 0; i--)
1312 unix_notinflight(scm->fp->fp[i]);
1315 static void unix_destruct_scm(struct sk_buff *skb)
1317 struct scm_cookie scm;
1318 memset(&scm, 0, sizeof(scm));
1319 scm.pid = UNIXCB(skb).pid;
1320 scm.cred = UNIXCB(skb).cred;
1322 unix_detach_fds(&scm, skb);
1324 /* Alas, it calls VFS */
1325 /* So fscking what? fput() had been SMP-safe since the last Summer */
1330 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1335 * Need to duplicate file references for the sake of garbage
1336 * collection. Otherwise a socket in the fps might become a
1337 * candidate for GC while the skb is not yet queued.
1339 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1340 if (!UNIXCB(skb).fp)
1343 for (i = scm->fp->count-1; i >= 0; i--)
1344 unix_inflight(scm->fp->fp[i]);
1348 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1351 UNIXCB(skb).pid = get_pid(scm->pid);
1352 UNIXCB(skb).cred = get_cred(scm->cred);
1353 UNIXCB(skb).fp = NULL;
1354 if (scm->fp && send_fds)
1355 err = unix_attach_fds(scm, skb);
1357 skb->destructor = unix_destruct_scm;
1362 * Send AF_UNIX data.
1365 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1366 struct msghdr *msg, size_t len)
1368 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1369 struct sock *sk = sock->sk;
1370 struct net *net = sock_net(sk);
1371 struct unix_sock *u = unix_sk(sk);
1372 struct sockaddr_un *sunaddr = msg->msg_name;
1373 struct sock *other = NULL;
1374 int namelen = 0; /* fake GCC */
1377 struct sk_buff *skb;
1379 struct scm_cookie tmp_scm;
1381 if (NULL == siocb->scm)
1382 siocb->scm = &tmp_scm;
1384 err = scm_send(sock, msg, siocb->scm);
1389 if (msg->msg_flags&MSG_OOB)
1392 if (msg->msg_namelen) {
1393 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1400 other = unix_peer_get(sk);
1405 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1406 && (err = unix_autobind(sock)) != 0)
1410 if (len > sk->sk_sndbuf - 32)
1413 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1417 err = unix_scm_to_skb(siocb->scm, skb, true);
1420 unix_get_secdata(siocb->scm, skb);
1422 skb_reset_transport_header(skb);
1423 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1427 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1432 if (sunaddr == NULL)
1435 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1441 unix_state_lock(other);
1443 if (!unix_may_send(sk, other))
1446 if (sock_flag(other, SOCK_DEAD)) {
1448 * Check with 1003.1g - what should
1451 unix_state_unlock(other);
1455 unix_state_lock(sk);
1456 if (unix_peer(sk) == other) {
1457 unix_peer(sk) = NULL;
1458 unix_state_unlock(sk);
1460 unix_dgram_disconnected(sk, other);
1462 err = -ECONNREFUSED;
1464 unix_state_unlock(sk);
1474 if (other->sk_shutdown & RCV_SHUTDOWN)
1477 if (sk->sk_type != SOCK_SEQPACKET) {
1478 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1483 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1489 timeo = unix_wait_for_peer(other, timeo);
1491 err = sock_intr_errno(timeo);
1492 if (signal_pending(current))
1498 skb_queue_tail(&other->sk_receive_queue, skb);
1499 unix_state_unlock(other);
1500 other->sk_data_ready(other, len);
1502 scm_destroy(siocb->scm);
1506 unix_state_unlock(other);
1512 scm_destroy(siocb->scm);
1517 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1518 struct msghdr *msg, size_t len)
1520 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1521 struct sock *sk = sock->sk;
1522 struct sock *other = NULL;
1523 struct sockaddr_un *sunaddr = msg->msg_name;
1525 struct sk_buff *skb;
1527 struct scm_cookie tmp_scm;
1528 bool fds_sent = false;
1530 if (NULL == siocb->scm)
1531 siocb->scm = &tmp_scm;
1533 err = scm_send(sock, msg, siocb->scm);
1538 if (msg->msg_flags&MSG_OOB)
1541 if (msg->msg_namelen) {
1542 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1547 other = unix_peer(sk);
1552 if (sk->sk_shutdown & SEND_SHUTDOWN)
1555 while (sent < len) {
1557 * Optimisation for the fact that under 0.01% of X
1558 * messages typically need breaking up.
1563 /* Keep two messages in the pipe so it schedules better */
1564 if (size > ((sk->sk_sndbuf >> 1) - 64))
1565 size = (sk->sk_sndbuf >> 1) - 64;
1567 if (size > SKB_MAX_ALLOC)
1568 size = SKB_MAX_ALLOC;
1574 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1581 * If you pass two values to the sock_alloc_send_skb
1582 * it tries to grab the large buffer with GFP_NOFS
1583 * (which can fail easily), and if it fails grab the
1584 * fallback size buffer which is under a page and will
1587 size = min_t(int, size, skb_tailroom(skb));
1590 /* Only send the fds in the first buffer */
1591 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1598 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1604 unix_state_lock(other);
1606 if (sock_flag(other, SOCK_DEAD) ||
1607 (other->sk_shutdown & RCV_SHUTDOWN))
1610 skb_queue_tail(&other->sk_receive_queue, skb);
1611 unix_state_unlock(other);
1612 other->sk_data_ready(other, size);
1616 scm_destroy(siocb->scm);
1622 unix_state_unlock(other);
1625 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1626 send_sig(SIGPIPE, current, 0);
1629 scm_destroy(siocb->scm);
1631 return sent ? : err;
1634 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1635 struct msghdr *msg, size_t len)
1638 struct sock *sk = sock->sk;
1640 err = sock_error(sk);
1644 if (sk->sk_state != TCP_ESTABLISHED)
1647 if (msg->msg_namelen)
1648 msg->msg_namelen = 0;
1650 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1653 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1655 struct unix_sock *u = unix_sk(sk);
1657 msg->msg_namelen = 0;
1659 msg->msg_namelen = u->addr->len;
1660 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1664 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1665 struct msghdr *msg, size_t size,
1668 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1669 struct scm_cookie tmp_scm;
1670 struct sock *sk = sock->sk;
1671 struct unix_sock *u = unix_sk(sk);
1672 int noblock = flags & MSG_DONTWAIT;
1673 struct sk_buff *skb;
1680 msg->msg_namelen = 0;
1682 mutex_lock(&u->readlock);
1684 skb = skb_recv_datagram(sk, flags, noblock, &err);
1686 unix_state_lock(sk);
1687 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1688 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1689 (sk->sk_shutdown & RCV_SHUTDOWN))
1691 unix_state_unlock(sk);
1695 wake_up_interruptible_sync(&u->peer_wait);
1698 unix_copy_addr(msg, skb->sk);
1700 if (size > skb->len)
1702 else if (size < skb->len)
1703 msg->msg_flags |= MSG_TRUNC;
1705 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1710 siocb->scm = &tmp_scm;
1711 memset(&tmp_scm, 0, sizeof(tmp_scm));
1713 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1714 unix_set_secdata(siocb->scm, skb);
1716 if (!(flags & MSG_PEEK)) {
1718 unix_detach_fds(siocb->scm, skb);
1720 /* It is questionable: on PEEK we could:
1721 - do not return fds - good, but too simple 8)
1722 - return fds, and do not return them on read (old strategy,
1724 - clone fds (I chose it for now, it is the most universal
1727 POSIX 1003.1g does not actually define this clearly
1728 at all. POSIX 1003.1g doesn't define a lot of things
1733 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1737 scm_recv(sock, msg, siocb->scm, flags);
1740 skb_free_datagram(sk, skb);
1742 mutex_unlock(&u->readlock);
1748 * Sleep until data has arrive. But check for races..
1751 static long unix_stream_data_wait(struct sock *sk, long timeo)
1755 unix_state_lock(sk);
1758 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1760 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1762 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1763 signal_pending(current) ||
1767 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1768 unix_state_unlock(sk);
1769 timeo = schedule_timeout(timeo);
1770 unix_state_lock(sk);
1771 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1774 finish_wait(sk_sleep(sk), &wait);
1775 unix_state_unlock(sk);
1781 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1782 struct msghdr *msg, size_t size,
1785 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1786 struct scm_cookie tmp_scm;
1787 struct sock *sk = sock->sk;
1788 struct unix_sock *u = unix_sk(sk);
1789 struct sockaddr_un *sunaddr = msg->msg_name;
1791 int check_creds = 0;
1797 if (sk->sk_state != TCP_ESTABLISHED)
1804 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1805 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1807 msg->msg_namelen = 0;
1809 /* Lock the socket to prevent queue disordering
1810 * while sleeps in memcpy_tomsg
1814 siocb->scm = &tmp_scm;
1815 memset(&tmp_scm, 0, sizeof(tmp_scm));
1818 mutex_lock(&u->readlock);
1822 struct sk_buff *skb;
1824 unix_state_lock(sk);
1825 skb = skb_dequeue(&sk->sk_receive_queue);
1827 if (copied >= target)
1831 * POSIX 1003.1g mandates this order.
1834 err = sock_error(sk);
1837 if (sk->sk_shutdown & RCV_SHUTDOWN)
1840 unix_state_unlock(sk);
1844 mutex_unlock(&u->readlock);
1846 timeo = unix_stream_data_wait(sk, timeo);
1848 if (signal_pending(current)) {
1849 err = sock_intr_errno(timeo);
1852 mutex_lock(&u->readlock);
1855 unix_state_unlock(sk);
1858 unix_state_unlock(sk);
1861 /* Never glue messages from different writers */
1862 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1863 (UNIXCB(skb).cred != siocb->scm->cred)) {
1864 skb_queue_head(&sk->sk_receive_queue, skb);
1868 /* Copy credentials */
1869 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1873 /* Copy address just once */
1875 unix_copy_addr(msg, skb->sk);
1879 chunk = min_t(unsigned int, skb->len, size);
1880 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1881 skb_queue_head(&sk->sk_receive_queue, skb);
1889 /* Mark read part of skb as used */
1890 if (!(flags & MSG_PEEK)) {
1891 skb_pull(skb, chunk);
1894 unix_detach_fds(siocb->scm, skb);
1896 /* put the skb back if we didn't use it up.. */
1898 skb_queue_head(&sk->sk_receive_queue, skb);
1907 /* It is questionable, see note in unix_dgram_recvmsg.
1910 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1912 /* put message back and return */
1913 skb_queue_head(&sk->sk_receive_queue, skb);
1918 mutex_unlock(&u->readlock);
1919 scm_recv(sock, msg, siocb->scm, flags);
1921 return copied ? : err;
1924 static int unix_shutdown(struct socket *sock, int mode)
1926 struct sock *sk = sock->sk;
1929 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1932 unix_state_lock(sk);
1933 sk->sk_shutdown |= mode;
1934 other = unix_peer(sk);
1937 unix_state_unlock(sk);
1938 sk->sk_state_change(sk);
1941 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1945 if (mode&RCV_SHUTDOWN)
1946 peer_mode |= SEND_SHUTDOWN;
1947 if (mode&SEND_SHUTDOWN)
1948 peer_mode |= RCV_SHUTDOWN;
1949 unix_state_lock(other);
1950 other->sk_shutdown |= peer_mode;
1951 unix_state_unlock(other);
1952 other->sk_state_change(other);
1953 if (peer_mode == SHUTDOWN_MASK)
1954 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1955 else if (peer_mode & RCV_SHUTDOWN)
1956 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1964 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1966 struct sock *sk = sock->sk;
1972 amount = sk_wmem_alloc_get(sk);
1973 err = put_user(amount, (int __user *)arg);
1977 struct sk_buff *skb;
1979 if (sk->sk_state == TCP_LISTEN) {
1984 spin_lock(&sk->sk_receive_queue.lock);
1985 if (sk->sk_type == SOCK_STREAM ||
1986 sk->sk_type == SOCK_SEQPACKET) {
1987 skb_queue_walk(&sk->sk_receive_queue, skb)
1990 skb = skb_peek(&sk->sk_receive_queue);
1994 spin_unlock(&sk->sk_receive_queue.lock);
1995 err = put_user(amount, (int __user *)arg);
2006 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2008 struct sock *sk = sock->sk;
2011 sock_poll_wait(file, sk_sleep(sk), wait);
2014 /* exceptional events? */
2017 if (sk->sk_shutdown == SHUTDOWN_MASK)
2019 if (sk->sk_shutdown & RCV_SHUTDOWN)
2023 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2024 (sk->sk_shutdown & RCV_SHUTDOWN))
2025 mask |= POLLIN | POLLRDNORM;
2027 /* Connection-based need to check for termination and startup */
2028 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2029 sk->sk_state == TCP_CLOSE)
2033 * we set writable also when the other side has shut down the
2034 * connection. This prevents stuck sockets.
2036 if (unix_writable(sk))
2037 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2042 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2045 struct sock *sk = sock->sk, *other;
2046 unsigned int mask, writable;
2048 sock_poll_wait(file, sk_sleep(sk), wait);
2051 /* exceptional events? */
2052 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2054 if (sk->sk_shutdown & RCV_SHUTDOWN)
2056 if (sk->sk_shutdown == SHUTDOWN_MASK)
2060 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2061 (sk->sk_shutdown & RCV_SHUTDOWN))
2062 mask |= POLLIN | POLLRDNORM;
2064 /* Connection-based need to check for termination and startup */
2065 if (sk->sk_type == SOCK_SEQPACKET) {
2066 if (sk->sk_state == TCP_CLOSE)
2068 /* connection hasn't started yet? */
2069 if (sk->sk_state == TCP_SYN_SENT)
2074 writable = unix_writable(sk);
2076 other = unix_peer_get(sk);
2078 if (unix_peer(other) != sk) {
2079 sock_poll_wait(file, &unix_sk(other)->peer_wait,
2081 if (unix_recvq_full(other))
2090 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2092 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2097 #ifdef CONFIG_PROC_FS
2098 static struct sock *first_unix_socket(int *i)
2100 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2101 if (!hlist_empty(&unix_socket_table[*i]))
2102 return __sk_head(&unix_socket_table[*i]);
2107 static struct sock *next_unix_socket(int *i, struct sock *s)
2109 struct sock *next = sk_next(s);
2110 /* More in this chain? */
2113 /* Look for next non-empty chain. */
2114 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2115 if (!hlist_empty(&unix_socket_table[*i]))
2116 return __sk_head(&unix_socket_table[*i]);
2121 struct unix_iter_state {
2122 struct seq_net_private p;
2126 static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2128 struct unix_iter_state *iter = seq->private;
2132 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2133 if (sock_net(s) != seq_file_net(seq))
2142 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2143 __acquires(unix_table_lock)
2145 spin_lock(&unix_table_lock);
2146 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2149 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2151 struct unix_iter_state *iter = seq->private;
2152 struct sock *sk = v;
2155 if (v == SEQ_START_TOKEN)
2156 sk = first_unix_socket(&iter->i);
2158 sk = next_unix_socket(&iter->i, sk);
2159 while (sk && (sock_net(sk) != seq_file_net(seq)))
2160 sk = next_unix_socket(&iter->i, sk);
2164 static void unix_seq_stop(struct seq_file *seq, void *v)
2165 __releases(unix_table_lock)
2167 spin_unlock(&unix_table_lock);
2170 static int unix_seq_show(struct seq_file *seq, void *v)
2173 if (v == SEQ_START_TOKEN)
2174 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2178 struct unix_sock *u = unix_sk(s);
2181 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2183 atomic_read(&s->sk_refcnt),
2185 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2188 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2189 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2197 len = u->addr->len - sizeof(short);
2198 if (!UNIX_ABSTRACT(s))
2204 for ( ; i < len; i++)
2205 seq_putc(seq, u->addr->name->sun_path[i]);
2207 unix_state_unlock(s);
2208 seq_putc(seq, '\n');
2214 static const struct seq_operations unix_seq_ops = {
2215 .start = unix_seq_start,
2216 .next = unix_seq_next,
2217 .stop = unix_seq_stop,
2218 .show = unix_seq_show,
2221 static int unix_seq_open(struct inode *inode, struct file *file)
2223 return seq_open_net(inode, file, &unix_seq_ops,
2224 sizeof(struct unix_iter_state));
2227 static const struct file_operations unix_seq_fops = {
2228 .owner = THIS_MODULE,
2229 .open = unix_seq_open,
2231 .llseek = seq_lseek,
2232 .release = seq_release_net,
2237 static const struct net_proto_family unix_family_ops = {
2239 .create = unix_create,
2240 .owner = THIS_MODULE,
2244 static int __net_init unix_net_init(struct net *net)
2246 int error = -ENOMEM;
2248 net->unx.sysctl_max_dgram_qlen = 10;
2249 if (unix_sysctl_register(net))
2252 #ifdef CONFIG_PROC_FS
2253 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2254 unix_sysctl_unregister(net);
2263 static void __net_exit unix_net_exit(struct net *net)
2265 unix_sysctl_unregister(net);
2266 proc_net_remove(net, "unix");
2269 static struct pernet_operations unix_net_ops = {
2270 .init = unix_net_init,
2271 .exit = unix_net_exit,
2274 static int __init af_unix_init(void)
2277 struct sk_buff *dummy_skb;
2279 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2281 rc = proto_register(&unix_proto, 1);
2283 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2288 sock_register(&unix_family_ops);
2289 register_pernet_subsys(&unix_net_ops);
2294 static void __exit af_unix_exit(void)
2296 sock_unregister(PF_UNIX);
2297 proto_unregister(&unix_proto);
2298 unregister_pernet_subsys(&unix_net_ops);
2301 /* Earlier than device_initcall() so that other drivers invoking
2302 request_module() don't end up in a loop when modprobe tries
2303 to use a UNIX socket. But later than subsys_initcall() because
2304 we depend on stuff initialised there */
2305 fs_initcall(af_unix_init);
2306 module_exit(af_unix_exit);
2308 MODULE_LICENSE("GPL");
2309 MODULE_ALIAS_NETPROTO(PF_UNIX);