4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
16 * Alan Cox : NULL return from skb_peek_copy()
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
24 * Alan Cox : Fixed write poll of non IP protocol
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
41 #include <linux/interrupt.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/poll.h>
48 #include <linux/highmem.h>
49 #include <linux/spinlock.h>
50 #include <linux/slab.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
55 #include <net/checksum.h>
57 #include <net/tcp_states.h>
58 #include <trace/events/skb.h>
59 #include <net/busy_poll.h>
62 * Is a socket 'connection oriented' ?
64 static inline int connection_based(struct sock *sk)
66 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
69 static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
72 unsigned long bits = (unsigned long)key;
75 * Avoid a wakeup if event not interesting for us
77 if (bits && !(bits & (POLLIN | POLLERR)))
79 return autoremove_wake_function(wait, mode, sync, key);
82 * Wait for the last received packet to be different from skb
84 static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
85 const struct sk_buff *skb)
88 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
90 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
93 error = sock_error(sk);
97 if (sk->sk_receive_queue.prev != skb)
100 /* Socket shut down? */
101 if (sk->sk_shutdown & RCV_SHUTDOWN)
104 /* Sequenced packets can come disconnected.
105 * If so we report the problem
108 if (connection_based(sk) &&
109 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
113 if (signal_pending(current))
117 *timeo_p = schedule_timeout(*timeo_p);
119 finish_wait(sk_sleep(sk), &wait);
122 error = sock_intr_errno(*timeo_p);
133 * __skb_recv_datagram - Receive a datagram skbuff
136 * @peeked: returns non-zero if this packet has been seen before
137 * @off: an offset in bytes to peek skb from. Returns an offset
138 * within an skb where data actually starts
139 * @err: error code returned
141 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
142 * and possible races. This replaces identical code in packet, raw and
143 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
144 * the long standing peek and read race for datagram sockets. If you
145 * alter this routine remember it must be re-entrant.
147 * This function will lock the socket if a skb is returned, so the caller
148 * needs to unlock the socket in that case (usually by calling
151 * * It does not lock socket since today. This function is
152 * * free of race conditions. This measure should/can improve
153 * * significantly datagram socket latencies at high loads,
154 * * when data copying to user space takes lots of time.
155 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
159 * The order of the tests when we find no data waiting are specified
160 * quite explicitly by POSIX 1003.1g, don't change them without having
161 * the standard around please.
163 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
164 int *peeked, int *off, int *err)
166 struct sk_buff *skb, *last;
169 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
171 int error = sock_error(sk);
176 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
179 /* Again only user level code calls this function, so nothing
180 * interrupt level will suddenly eat the receive_queue.
182 * Look at current nfs client by the way...
183 * However, this function was correct in any case. 8)
185 unsigned long cpu_flags;
186 struct sk_buff_head *queue = &sk->sk_receive_queue;
189 last = (struct sk_buff *)queue;
190 spin_lock_irqsave(&queue->lock, cpu_flags);
191 skb_queue_walk(queue, skb) {
193 *peeked = skb->peeked;
194 if (flags & MSG_PEEK) {
195 if (_off >= skb->len && (skb->len || _off ||
201 atomic_inc(&skb->users);
203 __skb_unlink(skb, queue);
205 spin_unlock_irqrestore(&queue->lock, cpu_flags);
209 spin_unlock_irqrestore(&queue->lock, cpu_flags);
211 if (sk_can_busy_loop(sk) &&
212 sk_busy_loop(sk, flags & MSG_DONTWAIT))
215 /* User doesn't want to wait */
220 } while (!wait_for_more_packets(sk, err, &timeo, last));
228 EXPORT_SYMBOL(__skb_recv_datagram);
230 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
231 int noblock, int *err)
235 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
238 EXPORT_SYMBOL(skb_recv_datagram);
240 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
243 sk_mem_reclaim_partial(sk);
245 EXPORT_SYMBOL(skb_free_datagram);
247 void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
251 if (likely(atomic_read(&skb->users) == 1))
253 else if (likely(!atomic_dec_and_test(&skb->users)))
256 slow = lock_sock_fast(sk);
258 sk_mem_reclaim_partial(sk);
259 unlock_sock_fast(sk, slow);
261 /* skb is now orphaned, can be freed outside of locked section */
264 EXPORT_SYMBOL(skb_free_datagram_locked);
267 * skb_kill_datagram - Free a datagram skbuff forcibly
269 * @skb: datagram skbuff
272 * This function frees a datagram skbuff that was received by
273 * skb_recv_datagram. The flags argument must match the one
274 * used for skb_recv_datagram.
276 * If the MSG_PEEK flag is set, and the packet is still on the
277 * receive queue of the socket, it will be taken off the queue
278 * before it is freed.
280 * This function currently only disables BH when acquiring the
281 * sk_receive_queue lock. Therefore it must not be used in a
282 * context where that lock is acquired in an IRQ context.
284 * It returns 0 if the packet was removed by us.
287 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
291 if (flags & MSG_PEEK) {
293 spin_lock_bh(&sk->sk_receive_queue.lock);
294 if (skb == skb_peek(&sk->sk_receive_queue)) {
295 __skb_unlink(skb, &sk->sk_receive_queue);
296 atomic_dec(&skb->users);
299 spin_unlock_bh(&sk->sk_receive_queue.lock);
303 atomic_inc(&sk->sk_drops);
304 sk_mem_reclaim_partial(sk);
308 EXPORT_SYMBOL(skb_kill_datagram);
311 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
312 * @skb: buffer to copy
313 * @offset: offset in the buffer to start copying from
314 * @to: io vector to copy to
315 * @len: amount of data to copy from buffer to iovec
317 * Note: the iovec is modified during the copy.
319 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
320 struct iovec *to, int len)
322 int start = skb_headlen(skb);
323 int i, copy = start - offset;
324 struct sk_buff *frag_iter;
326 trace_skb_copy_datagram_iovec(skb, len);
332 if (memcpy_toiovec(to, skb->data + offset, copy))
334 if ((len -= copy) == 0)
339 /* Copy paged appendix. Hmm... why does this look so complicated? */
340 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
342 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
344 WARN_ON(start > offset + len);
346 end = start + skb_frag_size(frag);
347 if ((copy = end - offset) > 0) {
350 struct page *page = skb_frag_page(frag);
355 err = memcpy_toiovec(to, vaddr + frag->page_offset +
356 offset - start, copy);
367 skb_walk_frags(skb, frag_iter) {
370 WARN_ON(start > offset + len);
372 end = start + frag_iter->len;
373 if ((copy = end - offset) > 0) {
376 if (skb_copy_datagram_iovec(frag_iter,
380 if ((len -= copy) == 0)
392 EXPORT_SYMBOL(skb_copy_datagram_iovec);
395 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
396 * @skb: buffer to copy
397 * @offset: offset in the buffer to start copying from
398 * @to: io vector to copy to
399 * @to_offset: offset in the io vector to start copying to
400 * @len: amount of data to copy from buffer to iovec
402 * Returns 0 or -EFAULT.
403 * Note: the iovec is not modified during the copy.
405 int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
406 const struct iovec *to, int to_offset,
409 int start = skb_headlen(skb);
410 int i, copy = start - offset;
411 struct sk_buff *frag_iter;
417 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
419 if ((len -= copy) == 0)
425 /* Copy paged appendix. Hmm... why does this look so complicated? */
426 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
428 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
430 WARN_ON(start > offset + len);
432 end = start + skb_frag_size(frag);
433 if ((copy = end - offset) > 0) {
436 struct page *page = skb_frag_page(frag);
441 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
442 offset - start, to_offset, copy);
454 skb_walk_frags(skb, frag_iter) {
457 WARN_ON(start > offset + len);
459 end = start + frag_iter->len;
460 if ((copy = end - offset) > 0) {
463 if (skb_copy_datagram_const_iovec(frag_iter,
468 if ((len -= copy) == 0)
481 EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
484 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
485 * @skb: buffer to copy
486 * @offset: offset in the buffer to start copying to
487 * @from: io vector to copy to
488 * @from_offset: offset in the io vector to start copying from
489 * @len: amount of data to copy to buffer from iovec
491 * Returns 0 or -EFAULT.
492 * Note: the iovec is not modified during the copy.
494 int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
495 const struct iovec *from, int from_offset,
498 int start = skb_headlen(skb);
499 int i, copy = start - offset;
500 struct sk_buff *frag_iter;
506 if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
509 if ((len -= copy) == 0)
515 /* Copy paged appendix. Hmm... why does this look so complicated? */
516 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
518 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
520 WARN_ON(start > offset + len);
522 end = start + skb_frag_size(frag);
523 if ((copy = end - offset) > 0) {
526 struct page *page = skb_frag_page(frag);
531 err = memcpy_fromiovecend(vaddr + frag->page_offset +
533 from, from_offset, copy);
546 skb_walk_frags(skb, frag_iter) {
549 WARN_ON(start > offset + len);
551 end = start + frag_iter->len;
552 if ((copy = end - offset) > 0) {
555 if (skb_copy_datagram_from_iovec(frag_iter,
561 if ((len -= copy) == 0)
574 EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
577 * zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
578 * @skb: buffer to copy
579 * @from: io vector to copy to
580 * @offset: offset in the io vector to start copying from
581 * @count: amount of vectors to copy to buffer from
583 * The function will first copy up to headlen, and then pin the userspace
584 * pages and build frags through them.
586 * Returns 0, -EFAULT or -EMSGSIZE.
587 * Note: the iovec is not modified during the copy
589 int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
590 int offset, size_t count)
592 int len = iov_length(from, count) - offset;
593 int copy = skb_headlen(skb);
594 int size, offset1 = 0;
597 /* Skip over from offset */
598 while (count && (offset >= from->iov_len)) {
599 offset -= from->iov_len;
604 /* copy up to skb headlen */
605 while (count && (copy > 0)) {
606 size = min_t(unsigned int, copy, from->iov_len - offset);
607 if (copy_from_user(skb->data + offset1, from->iov_base + offset,
624 struct page *page[MAX_SKB_FRAGS];
627 unsigned long truesize;
629 len = from->iov_len - offset;
635 base = (unsigned long)from->iov_base + offset;
636 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
637 if (i + size > MAX_SKB_FRAGS)
639 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
640 if (num_pages != size) {
643 for (j = 0; j < num_pages; j++)
644 put_page(page[i + j]);
647 truesize = size * PAGE_SIZE;
648 skb->data_len += len;
650 skb->truesize += truesize;
651 atomic_add(truesize, &skb->sk->sk_wmem_alloc);
653 int off = base & ~PAGE_MASK;
654 int size = min_t(int, len, PAGE_SIZE - off);
655 __skb_fill_page_desc(skb, i, page[i], off, size);
656 skb_shinfo(skb)->nr_frags++;
657 /* increase sk_wmem_alloc */
667 EXPORT_SYMBOL(zerocopy_sg_from_iovec);
669 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
670 u8 __user *to, int len,
673 int start = skb_headlen(skb);
674 int i, copy = start - offset;
675 struct sk_buff *frag_iter;
683 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
687 if ((len -= copy) == 0)
694 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
696 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
698 WARN_ON(start > offset + len);
700 end = start + skb_frag_size(frag);
701 if ((copy = end - offset) > 0) {
705 struct page *page = skb_frag_page(frag);
710 csum2 = csum_and_copy_to_user(vaddr +
717 *csump = csum_block_add(*csump, csum2, pos);
727 skb_walk_frags(skb, frag_iter) {
730 WARN_ON(start > offset + len);
732 end = start + frag_iter->len;
733 if ((copy = end - offset) > 0) {
737 if (skb_copy_and_csum_datagram(frag_iter,
742 *csump = csum_block_add(*csump, csum2, pos);
743 if ((len -= copy) == 0)
758 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
762 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
764 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
765 netdev_rx_csum_fault(skb->dev);
766 skb->ip_summed = CHECKSUM_UNNECESSARY;
770 EXPORT_SYMBOL(__skb_checksum_complete_head);
772 __sum16 __skb_checksum_complete(struct sk_buff *skb)
774 return __skb_checksum_complete_head(skb, skb->len);
776 EXPORT_SYMBOL(__skb_checksum_complete);
779 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
781 * @hlen: hardware length
784 * Caller _must_ check that skb will fit to this iovec.
786 * Returns: 0 - success.
787 * -EINVAL - checksum failure.
788 * -EFAULT - fault during copy. Beware, in this case iovec
791 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
792 int hlen, struct iovec *iov)
795 int chunk = skb->len - hlen;
800 /* Skip filled elements.
801 * Pretty silly, look at memcpy_toiovec, though 8)
803 while (!iov->iov_len)
806 if (iov->iov_len < chunk) {
807 if (__skb_checksum_complete(skb))
809 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
812 csum = csum_partial(skb->data, hlen, skb->csum);
813 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
818 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
819 netdev_rx_csum_fault(skb->dev);
820 iov->iov_len -= chunk;
821 iov->iov_base += chunk;
829 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
832 * datagram_poll - generic datagram poll
837 * Datagram poll: Again totally generic. This also handles
838 * sequenced packet sockets providing the socket receive queue
839 * is only ever holding data ready to receive.
841 * Note: when you _don't_ use this routine for this protocol,
842 * and you use a different write policy from sock_writeable()
843 * then please supply your own write_space callback.
845 unsigned int datagram_poll(struct file *file, struct socket *sock,
848 struct sock *sk = sock->sk;
851 sock_poll_wait(file, sk_sleep(sk), wait);
854 /* exceptional events? */
855 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
857 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
859 if (sk->sk_shutdown & RCV_SHUTDOWN)
860 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
861 if (sk->sk_shutdown == SHUTDOWN_MASK)
865 if (!skb_queue_empty(&sk->sk_receive_queue))
866 mask |= POLLIN | POLLRDNORM;
868 /* Connection-based need to check for termination and startup */
869 if (connection_based(sk)) {
870 if (sk->sk_state == TCP_CLOSE)
872 /* connection hasn't started yet? */
873 if (sk->sk_state == TCP_SYN_SENT)
878 if (sock_writeable(sk))
879 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
881 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
885 EXPORT_SYMBOL(datagram_poll);