]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/packet/af_packet.c
scsi: pmcraid: remove redundant check to see if request_size is less than zero
[karo-tx-linux.git] / net / packet / af_packet.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              PACKET - implements raw packet sockets.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *
12  * Fixes:
13  *              Alan Cox        :       verify_area() now used correctly
14  *              Alan Cox        :       new skbuff lists, look ma no backlogs!
15  *              Alan Cox        :       tidied skbuff lists.
16  *              Alan Cox        :       Now uses generic datagram routines I
17  *                                      added. Also fixed the peek/read crash
18  *                                      from all old Linux datagram code.
19  *              Alan Cox        :       Uses the improved datagram code.
20  *              Alan Cox        :       Added NULL's for socket options.
21  *              Alan Cox        :       Re-commented the code.
22  *              Alan Cox        :       Use new kernel side addressing
23  *              Rob Janssen     :       Correct MTU usage.
24  *              Dave Platt      :       Counter leaks caused by incorrect
25  *                                      interrupt locking and some slightly
26  *                                      dubious gcc output. Can you read
27  *                                      compiler: it said _VOLATILE_
28  *      Richard Kooijman        :       Timestamp fixes.
29  *              Alan Cox        :       New buffers. Use sk->mac.raw.
30  *              Alan Cox        :       sendmsg/recvmsg support.
31  *              Alan Cox        :       Protocol setting support
32  *      Alexey Kuznetsov        :       Untied from IPv4 stack.
33  *      Cyrus Durgin            :       Fixed kerneld for kmod.
34  *      Michal Ostrowski        :       Module initialization cleanup.
35  *         Ulises Alonso        :       Frame number limit removal and
36  *                                      packet_set_ring memory leak.
37  *              Eric Biederman  :       Allow for > 8 byte hardware addresses.
38  *                                      The convention is that longer addresses
39  *                                      will simply extend the hardware address
40  *                                      byte arrays at the end of sockaddr_ll
41  *                                      and packet_mreq.
42  *              Johann Baudy    :       Added TX RING.
43  *              Chetan Loke     :       Implemented TPACKET_V3 block abstraction
44  *                                      layer.
45  *                                      Copyright (C) 2011, <lokec@ccs.neu.edu>
46  *
47  *
48  *              This program is free software; you can redistribute it and/or
49  *              modify it under the terms of the GNU General Public License
50  *              as published by the Free Software Foundation; either version
51  *              2 of the License, or (at your option) any later version.
52  *
53  */
54
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <linux/uaccess.h>
77 #include <asm/ioctls.h>
78 #include <asm/page.h>
79 #include <asm/cacheflush.h>
80 #include <asm/io.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 #include <linux/percpu.h>
92 #ifdef CONFIG_INET
93 #include <net/inet_common.h>
94 #endif
95 #include <linux/bpf.h>
96 #include <net/compat.h>
97
98 #include "internal.h"
99
100 /*
101    Assumptions:
102    - if device has no dev->hard_header routine, it adds and removes ll header
103      inside itself. In this case ll header is invisible outside of device,
104      but higher levels still should reserve dev->hard_header_len.
105      Some devices are enough clever to reallocate skb, when header
106      will not fit to reserved space (tunnel), another ones are silly
107      (PPP).
108    - packet socket receives packets with pulled ll header,
109      so that SOCK_RAW should push it back.
110
111 On receive:
112 -----------
113
114 Incoming, dev->hard_header!=NULL
115    mac_header -> ll header
116    data       -> data
117
118 Outgoing, dev->hard_header!=NULL
119    mac_header -> ll header
120    data       -> ll header
121
122 Incoming, dev->hard_header==NULL
123    mac_header -> UNKNOWN position. It is very likely, that it points to ll
124                  header.  PPP makes it, that is wrong, because introduce
125                  assymetry between rx and tx paths.
126    data       -> data
127
128 Outgoing, dev->hard_header==NULL
129    mac_header -> data. ll header is still not built!
130    data       -> data
131
132 Resume
133   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
134
135
136 On transmit:
137 ------------
138
139 dev->hard_header != NULL
140    mac_header -> ll header
141    data       -> ll header
142
143 dev->hard_header == NULL (ll header is added by device, we cannot control it)
144    mac_header -> data
145    data       -> data
146
147    We should set nh.raw on output to correct posistion,
148    packet classifier depends on it.
149  */
150
151 /* Private packet socket structures. */
152
153 /* identical to struct packet_mreq except it has
154  * a longer address field.
155  */
156 struct packet_mreq_max {
157         int             mr_ifindex;
158         unsigned short  mr_type;
159         unsigned short  mr_alen;
160         unsigned char   mr_address[MAX_ADDR_LEN];
161 };
162
163 union tpacket_uhdr {
164         struct tpacket_hdr  *h1;
165         struct tpacket2_hdr *h2;
166         struct tpacket3_hdr *h3;
167         void *raw;
168 };
169
170 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
171                 int closing, int tx_ring);
172
173 #define V3_ALIGNMENT    (8)
174
175 #define BLK_HDR_LEN     (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
176
177 #define BLK_PLUS_PRIV(sz_of_priv) \
178         (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
179
180 #define PGV_FROM_VMALLOC 1
181
182 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
183 #define BLOCK_NUM_PKTS(x)       ((x)->hdr.bh1.num_pkts)
184 #define BLOCK_O2FP(x)           ((x)->hdr.bh1.offset_to_first_pkt)
185 #define BLOCK_LEN(x)            ((x)->hdr.bh1.blk_len)
186 #define BLOCK_SNUM(x)           ((x)->hdr.bh1.seq_num)
187 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
188 #define BLOCK_PRIV(x)           ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
189
190 struct packet_sock;
191 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
192 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
193                        struct packet_type *pt, struct net_device *orig_dev);
194
195 static void *packet_previous_frame(struct packet_sock *po,
196                 struct packet_ring_buffer *rb,
197                 int status);
198 static void packet_increment_head(struct packet_ring_buffer *buff);
199 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
200                         struct tpacket_block_desc *);
201 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
202                         struct packet_sock *);
203 static void prb_retire_current_block(struct tpacket_kbdq_core *,
204                 struct packet_sock *, unsigned int status);
205 static int prb_queue_frozen(struct tpacket_kbdq_core *);
206 static void prb_open_block(struct tpacket_kbdq_core *,
207                 struct tpacket_block_desc *);
208 static void prb_retire_rx_blk_timer_expired(unsigned long);
209 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
210 static void prb_init_blk_timer(struct packet_sock *,
211                 struct tpacket_kbdq_core *,
212                 void (*func) (unsigned long));
213 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
214 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
215                 struct tpacket3_hdr *);
216 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
217                 struct tpacket3_hdr *);
218 static void packet_flush_mclist(struct sock *sk);
219
220 struct packet_skb_cb {
221         union {
222                 struct sockaddr_pkt pkt;
223                 union {
224                         /* Trick: alias skb original length with
225                          * ll.sll_family and ll.protocol in order
226                          * to save room.
227                          */
228                         unsigned int origlen;
229                         struct sockaddr_ll ll;
230                 };
231         } sa;
232 };
233
234 #define vio_le() virtio_legacy_is_little_endian()
235
236 #define PACKET_SKB_CB(__skb)    ((struct packet_skb_cb *)((__skb)->cb))
237
238 #define GET_PBDQC_FROM_RB(x)    ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
239 #define GET_PBLOCK_DESC(x, bid) \
240         ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
241 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)       \
242         ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
243 #define GET_NEXT_PRB_BLK_NUM(x) \
244         (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
245         ((x)->kactive_blk_num+1) : 0)
246
247 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
248 static void __fanout_link(struct sock *sk, struct packet_sock *po);
249
250 static int packet_direct_xmit(struct sk_buff *skb)
251 {
252         struct net_device *dev = skb->dev;
253         struct sk_buff *orig_skb = skb;
254         struct netdev_queue *txq;
255         int ret = NETDEV_TX_BUSY;
256
257         if (unlikely(!netif_running(dev) ||
258                      !netif_carrier_ok(dev)))
259                 goto drop;
260
261         skb = validate_xmit_skb_list(skb, dev);
262         if (skb != orig_skb)
263                 goto drop;
264
265         txq = skb_get_tx_queue(dev, skb);
266
267         local_bh_disable();
268
269         HARD_TX_LOCK(dev, txq, smp_processor_id());
270         if (!netif_xmit_frozen_or_drv_stopped(txq))
271                 ret = netdev_start_xmit(skb, dev, txq, false);
272         HARD_TX_UNLOCK(dev, txq);
273
274         local_bh_enable();
275
276         if (!dev_xmit_complete(ret))
277                 kfree_skb(skb);
278
279         return ret;
280 drop:
281         atomic_long_inc(&dev->tx_dropped);
282         kfree_skb_list(skb);
283         return NET_XMIT_DROP;
284 }
285
286 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
287 {
288         struct net_device *dev;
289
290         rcu_read_lock();
291         dev = rcu_dereference(po->cached_dev);
292         if (likely(dev))
293                 dev_hold(dev);
294         rcu_read_unlock();
295
296         return dev;
297 }
298
299 static void packet_cached_dev_assign(struct packet_sock *po,
300                                      struct net_device *dev)
301 {
302         rcu_assign_pointer(po->cached_dev, dev);
303 }
304
305 static void packet_cached_dev_reset(struct packet_sock *po)
306 {
307         RCU_INIT_POINTER(po->cached_dev, NULL);
308 }
309
310 static bool packet_use_direct_xmit(const struct packet_sock *po)
311 {
312         return po->xmit == packet_direct_xmit;
313 }
314
315 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
316 {
317         return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
318 }
319
320 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
321 {
322         const struct net_device_ops *ops = dev->netdev_ops;
323         u16 queue_index;
324
325         if (ops->ndo_select_queue) {
326                 queue_index = ops->ndo_select_queue(dev, skb, NULL,
327                                                     __packet_pick_tx_queue);
328                 queue_index = netdev_cap_txqueue(dev, queue_index);
329         } else {
330                 queue_index = __packet_pick_tx_queue(dev, skb);
331         }
332
333         skb_set_queue_mapping(skb, queue_index);
334 }
335
336 /* register_prot_hook must be invoked with the po->bind_lock held,
337  * or from a context in which asynchronous accesses to the packet
338  * socket is not possible (packet_create()).
339  */
340 static void register_prot_hook(struct sock *sk)
341 {
342         struct packet_sock *po = pkt_sk(sk);
343
344         if (!po->running) {
345                 if (po->fanout)
346                         __fanout_link(sk, po);
347                 else
348                         dev_add_pack(&po->prot_hook);
349
350                 sock_hold(sk);
351                 po->running = 1;
352         }
353 }
354
355 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
356  * held.   If the sync parameter is true, we will temporarily drop
357  * the po->bind_lock and do a synchronize_net to make sure no
358  * asynchronous packet processing paths still refer to the elements
359  * of po->prot_hook.  If the sync parameter is false, it is the
360  * callers responsibility to take care of this.
361  */
362 static void __unregister_prot_hook(struct sock *sk, bool sync)
363 {
364         struct packet_sock *po = pkt_sk(sk);
365
366         po->running = 0;
367
368         if (po->fanout)
369                 __fanout_unlink(sk, po);
370         else
371                 __dev_remove_pack(&po->prot_hook);
372
373         __sock_put(sk);
374
375         if (sync) {
376                 spin_unlock(&po->bind_lock);
377                 synchronize_net();
378                 spin_lock(&po->bind_lock);
379         }
380 }
381
382 static void unregister_prot_hook(struct sock *sk, bool sync)
383 {
384         struct packet_sock *po = pkt_sk(sk);
385
386         if (po->running)
387                 __unregister_prot_hook(sk, sync);
388 }
389
390 static inline struct page * __pure pgv_to_page(void *addr)
391 {
392         if (is_vmalloc_addr(addr))
393                 return vmalloc_to_page(addr);
394         return virt_to_page(addr);
395 }
396
397 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
398 {
399         union tpacket_uhdr h;
400
401         h.raw = frame;
402         switch (po->tp_version) {
403         case TPACKET_V1:
404                 h.h1->tp_status = status;
405                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
406                 break;
407         case TPACKET_V2:
408                 h.h2->tp_status = status;
409                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
410                 break;
411         case TPACKET_V3:
412                 h.h3->tp_status = status;
413                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
414                 break;
415         default:
416                 WARN(1, "TPACKET version not supported.\n");
417                 BUG();
418         }
419
420         smp_wmb();
421 }
422
423 static int __packet_get_status(struct packet_sock *po, void *frame)
424 {
425         union tpacket_uhdr h;
426
427         smp_rmb();
428
429         h.raw = frame;
430         switch (po->tp_version) {
431         case TPACKET_V1:
432                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
433                 return h.h1->tp_status;
434         case TPACKET_V2:
435                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
436                 return h.h2->tp_status;
437         case TPACKET_V3:
438                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
439                 return h.h3->tp_status;
440         default:
441                 WARN(1, "TPACKET version not supported.\n");
442                 BUG();
443                 return 0;
444         }
445 }
446
447 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
448                                    unsigned int flags)
449 {
450         struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
451
452         if (shhwtstamps &&
453             (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
454             ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
455                 return TP_STATUS_TS_RAW_HARDWARE;
456
457         if (ktime_to_timespec_cond(skb->tstamp, ts))
458                 return TP_STATUS_TS_SOFTWARE;
459
460         return 0;
461 }
462
463 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
464                                     struct sk_buff *skb)
465 {
466         union tpacket_uhdr h;
467         struct timespec ts;
468         __u32 ts_status;
469
470         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
471                 return 0;
472
473         h.raw = frame;
474         switch (po->tp_version) {
475         case TPACKET_V1:
476                 h.h1->tp_sec = ts.tv_sec;
477                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
478                 break;
479         case TPACKET_V2:
480                 h.h2->tp_sec = ts.tv_sec;
481                 h.h2->tp_nsec = ts.tv_nsec;
482                 break;
483         case TPACKET_V3:
484                 h.h3->tp_sec = ts.tv_sec;
485                 h.h3->tp_nsec = ts.tv_nsec;
486                 break;
487         default:
488                 WARN(1, "TPACKET version not supported.\n");
489                 BUG();
490         }
491
492         /* one flush is safe, as both fields always lie on the same cacheline */
493         flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
494         smp_wmb();
495
496         return ts_status;
497 }
498
499 static void *packet_lookup_frame(struct packet_sock *po,
500                 struct packet_ring_buffer *rb,
501                 unsigned int position,
502                 int status)
503 {
504         unsigned int pg_vec_pos, frame_offset;
505         union tpacket_uhdr h;
506
507         pg_vec_pos = position / rb->frames_per_block;
508         frame_offset = position % rb->frames_per_block;
509
510         h.raw = rb->pg_vec[pg_vec_pos].buffer +
511                 (frame_offset * rb->frame_size);
512
513         if (status != __packet_get_status(po, h.raw))
514                 return NULL;
515
516         return h.raw;
517 }
518
519 static void *packet_current_frame(struct packet_sock *po,
520                 struct packet_ring_buffer *rb,
521                 int status)
522 {
523         return packet_lookup_frame(po, rb, rb->head, status);
524 }
525
526 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
527 {
528         del_timer_sync(&pkc->retire_blk_timer);
529 }
530
531 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
532                 struct sk_buff_head *rb_queue)
533 {
534         struct tpacket_kbdq_core *pkc;
535
536         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
537
538         spin_lock_bh(&rb_queue->lock);
539         pkc->delete_blk_timer = 1;
540         spin_unlock_bh(&rb_queue->lock);
541
542         prb_del_retire_blk_timer(pkc);
543 }
544
545 static void prb_init_blk_timer(struct packet_sock *po,
546                 struct tpacket_kbdq_core *pkc,
547                 void (*func) (unsigned long))
548 {
549         init_timer(&pkc->retire_blk_timer);
550         pkc->retire_blk_timer.data = (long)po;
551         pkc->retire_blk_timer.function = func;
552         pkc->retire_blk_timer.expires = jiffies;
553 }
554
555 static void prb_setup_retire_blk_timer(struct packet_sock *po)
556 {
557         struct tpacket_kbdq_core *pkc;
558
559         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
560         prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
561 }
562
563 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
564                                 int blk_size_in_bytes)
565 {
566         struct net_device *dev;
567         unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
568         struct ethtool_link_ksettings ecmd;
569         int err;
570
571         rtnl_lock();
572         dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
573         if (unlikely(!dev)) {
574                 rtnl_unlock();
575                 return DEFAULT_PRB_RETIRE_TOV;
576         }
577         err = __ethtool_get_link_ksettings(dev, &ecmd);
578         rtnl_unlock();
579         if (!err) {
580                 /*
581                  * If the link speed is so slow you don't really
582                  * need to worry about perf anyways
583                  */
584                 if (ecmd.base.speed < SPEED_1000 ||
585                     ecmd.base.speed == SPEED_UNKNOWN) {
586                         return DEFAULT_PRB_RETIRE_TOV;
587                 } else {
588                         msec = 1;
589                         div = ecmd.base.speed / 1000;
590                 }
591         }
592
593         mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
594
595         if (div)
596                 mbits /= div;
597
598         tmo = mbits * msec;
599
600         if (div)
601                 return tmo+1;
602         return tmo;
603 }
604
605 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
606                         union tpacket_req_u *req_u)
607 {
608         p1->feature_req_word = req_u->req3.tp_feature_req_word;
609 }
610
611 static void init_prb_bdqc(struct packet_sock *po,
612                         struct packet_ring_buffer *rb,
613                         struct pgv *pg_vec,
614                         union tpacket_req_u *req_u)
615 {
616         struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
617         struct tpacket_block_desc *pbd;
618
619         memset(p1, 0x0, sizeof(*p1));
620
621         p1->knxt_seq_num = 1;
622         p1->pkbdq = pg_vec;
623         pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
624         p1->pkblk_start = pg_vec[0].buffer;
625         p1->kblk_size = req_u->req3.tp_block_size;
626         p1->knum_blocks = req_u->req3.tp_block_nr;
627         p1->hdrlen = po->tp_hdrlen;
628         p1->version = po->tp_version;
629         p1->last_kactive_blk_num = 0;
630         po->stats.stats3.tp_freeze_q_cnt = 0;
631         if (req_u->req3.tp_retire_blk_tov)
632                 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
633         else
634                 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
635                                                 req_u->req3.tp_block_size);
636         p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
637         p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
638
639         p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
640         prb_init_ft_ops(p1, req_u);
641         prb_setup_retire_blk_timer(po);
642         prb_open_block(p1, pbd);
643 }
644
645 /*  Do NOT update the last_blk_num first.
646  *  Assumes sk_buff_head lock is held.
647  */
648 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
649 {
650         mod_timer(&pkc->retire_blk_timer,
651                         jiffies + pkc->tov_in_jiffies);
652         pkc->last_kactive_blk_num = pkc->kactive_blk_num;
653 }
654
655 /*
656  * Timer logic:
657  * 1) We refresh the timer only when we open a block.
658  *    By doing this we don't waste cycles refreshing the timer
659  *        on packet-by-packet basis.
660  *
661  * With a 1MB block-size, on a 1Gbps line, it will take
662  * i) ~8 ms to fill a block + ii) memcpy etc.
663  * In this cut we are not accounting for the memcpy time.
664  *
665  * So, if the user sets the 'tmo' to 10ms then the timer
666  * will never fire while the block is still getting filled
667  * (which is what we want). However, the user could choose
668  * to close a block early and that's fine.
669  *
670  * But when the timer does fire, we check whether or not to refresh it.
671  * Since the tmo granularity is in msecs, it is not too expensive
672  * to refresh the timer, lets say every '8' msecs.
673  * Either the user can set the 'tmo' or we can derive it based on
674  * a) line-speed and b) block-size.
675  * prb_calc_retire_blk_tmo() calculates the tmo.
676  *
677  */
678 static void prb_retire_rx_blk_timer_expired(unsigned long data)
679 {
680         struct packet_sock *po = (struct packet_sock *)data;
681         struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
682         unsigned int frozen;
683         struct tpacket_block_desc *pbd;
684
685         spin_lock(&po->sk.sk_receive_queue.lock);
686
687         frozen = prb_queue_frozen(pkc);
688         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
689
690         if (unlikely(pkc->delete_blk_timer))
691                 goto out;
692
693         /* We only need to plug the race when the block is partially filled.
694          * tpacket_rcv:
695          *              lock(); increment BLOCK_NUM_PKTS; unlock()
696          *              copy_bits() is in progress ...
697          *              timer fires on other cpu:
698          *              we can't retire the current block because copy_bits
699          *              is in progress.
700          *
701          */
702         if (BLOCK_NUM_PKTS(pbd)) {
703                 while (atomic_read(&pkc->blk_fill_in_prog)) {
704                         /* Waiting for skb_copy_bits to finish... */
705                         cpu_relax();
706                 }
707         }
708
709         if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
710                 if (!frozen) {
711                         if (!BLOCK_NUM_PKTS(pbd)) {
712                                 /* An empty block. Just refresh the timer. */
713                                 goto refresh_timer;
714                         }
715                         prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
716                         if (!prb_dispatch_next_block(pkc, po))
717                                 goto refresh_timer;
718                         else
719                                 goto out;
720                 } else {
721                         /* Case 1. Queue was frozen because user-space was
722                          *         lagging behind.
723                          */
724                         if (prb_curr_blk_in_use(pkc, pbd)) {
725                                 /*
726                                  * Ok, user-space is still behind.
727                                  * So just refresh the timer.
728                                  */
729                                 goto refresh_timer;
730                         } else {
731                                /* Case 2. queue was frozen,user-space caught up,
732                                 * now the link went idle && the timer fired.
733                                 * We don't have a block to close.So we open this
734                                 * block and restart the timer.
735                                 * opening a block thaws the queue,restarts timer
736                                 * Thawing/timer-refresh is a side effect.
737                                 */
738                                 prb_open_block(pkc, pbd);
739                                 goto out;
740                         }
741                 }
742         }
743
744 refresh_timer:
745         _prb_refresh_rx_retire_blk_timer(pkc);
746
747 out:
748         spin_unlock(&po->sk.sk_receive_queue.lock);
749 }
750
751 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
752                 struct tpacket_block_desc *pbd1, __u32 status)
753 {
754         /* Flush everything minus the block header */
755
756 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
757         u8 *start, *end;
758
759         start = (u8 *)pbd1;
760
761         /* Skip the block header(we know header WILL fit in 4K) */
762         start += PAGE_SIZE;
763
764         end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
765         for (; start < end; start += PAGE_SIZE)
766                 flush_dcache_page(pgv_to_page(start));
767
768         smp_wmb();
769 #endif
770
771         /* Now update the block status. */
772
773         BLOCK_STATUS(pbd1) = status;
774
775         /* Flush the block header */
776
777 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
778         start = (u8 *)pbd1;
779         flush_dcache_page(pgv_to_page(start));
780
781         smp_wmb();
782 #endif
783 }
784
785 /*
786  * Side effect:
787  *
788  * 1) flush the block
789  * 2) Increment active_blk_num
790  *
791  * Note:We DONT refresh the timer on purpose.
792  *      Because almost always the next block will be opened.
793  */
794 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
795                 struct tpacket_block_desc *pbd1,
796                 struct packet_sock *po, unsigned int stat)
797 {
798         __u32 status = TP_STATUS_USER | stat;
799
800         struct tpacket3_hdr *last_pkt;
801         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
802         struct sock *sk = &po->sk;
803
804         if (po->stats.stats3.tp_drops)
805                 status |= TP_STATUS_LOSING;
806
807         last_pkt = (struct tpacket3_hdr *)pkc1->prev;
808         last_pkt->tp_next_offset = 0;
809
810         /* Get the ts of the last pkt */
811         if (BLOCK_NUM_PKTS(pbd1)) {
812                 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
813                 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
814         } else {
815                 /* Ok, we tmo'd - so get the current time.
816                  *
817                  * It shouldn't really happen as we don't close empty
818                  * blocks. See prb_retire_rx_blk_timer_expired().
819                  */
820                 struct timespec ts;
821                 getnstimeofday(&ts);
822                 h1->ts_last_pkt.ts_sec = ts.tv_sec;
823                 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
824         }
825
826         smp_wmb();
827
828         /* Flush the block */
829         prb_flush_block(pkc1, pbd1, status);
830
831         sk->sk_data_ready(sk);
832
833         pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
834 }
835
836 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
837 {
838         pkc->reset_pending_on_curr_blk = 0;
839 }
840
841 /*
842  * Side effect of opening a block:
843  *
844  * 1) prb_queue is thawed.
845  * 2) retire_blk_timer is refreshed.
846  *
847  */
848 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
849         struct tpacket_block_desc *pbd1)
850 {
851         struct timespec ts;
852         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
853
854         smp_rmb();
855
856         /* We could have just memset this but we will lose the
857          * flexibility of making the priv area sticky
858          */
859
860         BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
861         BLOCK_NUM_PKTS(pbd1) = 0;
862         BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
863
864         getnstimeofday(&ts);
865
866         h1->ts_first_pkt.ts_sec = ts.tv_sec;
867         h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
868
869         pkc1->pkblk_start = (char *)pbd1;
870         pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
871
872         BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
873         BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
874
875         pbd1->version = pkc1->version;
876         pkc1->prev = pkc1->nxt_offset;
877         pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
878
879         prb_thaw_queue(pkc1);
880         _prb_refresh_rx_retire_blk_timer(pkc1);
881
882         smp_wmb();
883 }
884
885 /*
886  * Queue freeze logic:
887  * 1) Assume tp_block_nr = 8 blocks.
888  * 2) At time 't0', user opens Rx ring.
889  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
890  * 4) user-space is either sleeping or processing block '0'.
891  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
892  *    it will close block-7,loop around and try to fill block '0'.
893  *    call-flow:
894  *    __packet_lookup_frame_in_block
895  *      prb_retire_current_block()
896  *      prb_dispatch_next_block()
897  *        |->(BLOCK_STATUS == USER) evaluates to true
898  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
899  * 6) Now there are two cases:
900  *    6.1) Link goes idle right after the queue is frozen.
901  *         But remember, the last open_block() refreshed the timer.
902  *         When this timer expires,it will refresh itself so that we can
903  *         re-open block-0 in near future.
904  *    6.2) Link is busy and keeps on receiving packets. This is a simple
905  *         case and __packet_lookup_frame_in_block will check if block-0
906  *         is free and can now be re-used.
907  */
908 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
909                                   struct packet_sock *po)
910 {
911         pkc->reset_pending_on_curr_blk = 1;
912         po->stats.stats3.tp_freeze_q_cnt++;
913 }
914
915 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
916
917 /*
918  * If the next block is free then we will dispatch it
919  * and return a good offset.
920  * Else, we will freeze the queue.
921  * So, caller must check the return value.
922  */
923 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
924                 struct packet_sock *po)
925 {
926         struct tpacket_block_desc *pbd;
927
928         smp_rmb();
929
930         /* 1. Get current block num */
931         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
932
933         /* 2. If this block is currently in_use then freeze the queue */
934         if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
935                 prb_freeze_queue(pkc, po);
936                 return NULL;
937         }
938
939         /*
940          * 3.
941          * open this block and return the offset where the first packet
942          * needs to get stored.
943          */
944         prb_open_block(pkc, pbd);
945         return (void *)pkc->nxt_offset;
946 }
947
948 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
949                 struct packet_sock *po, unsigned int status)
950 {
951         struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
952
953         /* retire/close the current block */
954         if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
955                 /*
956                  * Plug the case where copy_bits() is in progress on
957                  * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
958                  * have space to copy the pkt in the current block and
959                  * called prb_retire_current_block()
960                  *
961                  * We don't need to worry about the TMO case because
962                  * the timer-handler already handled this case.
963                  */
964                 if (!(status & TP_STATUS_BLK_TMO)) {
965                         while (atomic_read(&pkc->blk_fill_in_prog)) {
966                                 /* Waiting for skb_copy_bits to finish... */
967                                 cpu_relax();
968                         }
969                 }
970                 prb_close_block(pkc, pbd, po, status);
971                 return;
972         }
973 }
974
975 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
976                                       struct tpacket_block_desc *pbd)
977 {
978         return TP_STATUS_USER & BLOCK_STATUS(pbd);
979 }
980
981 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
982 {
983         return pkc->reset_pending_on_curr_blk;
984 }
985
986 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
987 {
988         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
989         atomic_dec(&pkc->blk_fill_in_prog);
990 }
991
992 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
993                         struct tpacket3_hdr *ppd)
994 {
995         ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
996 }
997
998 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
999                         struct tpacket3_hdr *ppd)
1000 {
1001         ppd->hv1.tp_rxhash = 0;
1002 }
1003
1004 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1005                         struct tpacket3_hdr *ppd)
1006 {
1007         if (skb_vlan_tag_present(pkc->skb)) {
1008                 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1009                 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1010                 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1011         } else {
1012                 ppd->hv1.tp_vlan_tci = 0;
1013                 ppd->hv1.tp_vlan_tpid = 0;
1014                 ppd->tp_status = TP_STATUS_AVAILABLE;
1015         }
1016 }
1017
1018 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1019                         struct tpacket3_hdr *ppd)
1020 {
1021         ppd->hv1.tp_padding = 0;
1022         prb_fill_vlan_info(pkc, ppd);
1023
1024         if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1025                 prb_fill_rxhash(pkc, ppd);
1026         else
1027                 prb_clear_rxhash(pkc, ppd);
1028 }
1029
1030 static void prb_fill_curr_block(char *curr,
1031                                 struct tpacket_kbdq_core *pkc,
1032                                 struct tpacket_block_desc *pbd,
1033                                 unsigned int len)
1034 {
1035         struct tpacket3_hdr *ppd;
1036
1037         ppd  = (struct tpacket3_hdr *)curr;
1038         ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1039         pkc->prev = curr;
1040         pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1041         BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1042         BLOCK_NUM_PKTS(pbd) += 1;
1043         atomic_inc(&pkc->blk_fill_in_prog);
1044         prb_run_all_ft_ops(pkc, ppd);
1045 }
1046
1047 /* Assumes caller has the sk->rx_queue.lock */
1048 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1049                                             struct sk_buff *skb,
1050                                                 int status,
1051                                             unsigned int len
1052                                             )
1053 {
1054         struct tpacket_kbdq_core *pkc;
1055         struct tpacket_block_desc *pbd;
1056         char *curr, *end;
1057
1058         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1059         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1060
1061         /* Queue is frozen when user space is lagging behind */
1062         if (prb_queue_frozen(pkc)) {
1063                 /*
1064                  * Check if that last block which caused the queue to freeze,
1065                  * is still in_use by user-space.
1066                  */
1067                 if (prb_curr_blk_in_use(pkc, pbd)) {
1068                         /* Can't record this packet */
1069                         return NULL;
1070                 } else {
1071                         /*
1072                          * Ok, the block was released by user-space.
1073                          * Now let's open that block.
1074                          * opening a block also thaws the queue.
1075                          * Thawing is a side effect.
1076                          */
1077                         prb_open_block(pkc, pbd);
1078                 }
1079         }
1080
1081         smp_mb();
1082         curr = pkc->nxt_offset;
1083         pkc->skb = skb;
1084         end = (char *)pbd + pkc->kblk_size;
1085
1086         /* first try the current block */
1087         if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1088                 prb_fill_curr_block(curr, pkc, pbd, len);
1089                 return (void *)curr;
1090         }
1091
1092         /* Ok, close the current block */
1093         prb_retire_current_block(pkc, po, 0);
1094
1095         /* Now, try to dispatch the next block */
1096         curr = (char *)prb_dispatch_next_block(pkc, po);
1097         if (curr) {
1098                 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1099                 prb_fill_curr_block(curr, pkc, pbd, len);
1100                 return (void *)curr;
1101         }
1102
1103         /*
1104          * No free blocks are available.user_space hasn't caught up yet.
1105          * Queue was just frozen and now this packet will get dropped.
1106          */
1107         return NULL;
1108 }
1109
1110 static void *packet_current_rx_frame(struct packet_sock *po,
1111                                             struct sk_buff *skb,
1112                                             int status, unsigned int len)
1113 {
1114         char *curr = NULL;
1115         switch (po->tp_version) {
1116         case TPACKET_V1:
1117         case TPACKET_V2:
1118                 curr = packet_lookup_frame(po, &po->rx_ring,
1119                                         po->rx_ring.head, status);
1120                 return curr;
1121         case TPACKET_V3:
1122                 return __packet_lookup_frame_in_block(po, skb, status, len);
1123         default:
1124                 WARN(1, "TPACKET version not supported\n");
1125                 BUG();
1126                 return NULL;
1127         }
1128 }
1129
1130 static void *prb_lookup_block(struct packet_sock *po,
1131                                      struct packet_ring_buffer *rb,
1132                                      unsigned int idx,
1133                                      int status)
1134 {
1135         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1136         struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1137
1138         if (status != BLOCK_STATUS(pbd))
1139                 return NULL;
1140         return pbd;
1141 }
1142
1143 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1144 {
1145         unsigned int prev;
1146         if (rb->prb_bdqc.kactive_blk_num)
1147                 prev = rb->prb_bdqc.kactive_blk_num-1;
1148         else
1149                 prev = rb->prb_bdqc.knum_blocks-1;
1150         return prev;
1151 }
1152
1153 /* Assumes caller has held the rx_queue.lock */
1154 static void *__prb_previous_block(struct packet_sock *po,
1155                                          struct packet_ring_buffer *rb,
1156                                          int status)
1157 {
1158         unsigned int previous = prb_previous_blk_num(rb);
1159         return prb_lookup_block(po, rb, previous, status);
1160 }
1161
1162 static void *packet_previous_rx_frame(struct packet_sock *po,
1163                                              struct packet_ring_buffer *rb,
1164                                              int status)
1165 {
1166         if (po->tp_version <= TPACKET_V2)
1167                 return packet_previous_frame(po, rb, status);
1168
1169         return __prb_previous_block(po, rb, status);
1170 }
1171
1172 static void packet_increment_rx_head(struct packet_sock *po,
1173                                             struct packet_ring_buffer *rb)
1174 {
1175         switch (po->tp_version) {
1176         case TPACKET_V1:
1177         case TPACKET_V2:
1178                 return packet_increment_head(rb);
1179         case TPACKET_V3:
1180         default:
1181                 WARN(1, "TPACKET version not supported.\n");
1182                 BUG();
1183                 return;
1184         }
1185 }
1186
1187 static void *packet_previous_frame(struct packet_sock *po,
1188                 struct packet_ring_buffer *rb,
1189                 int status)
1190 {
1191         unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1192         return packet_lookup_frame(po, rb, previous, status);
1193 }
1194
1195 static void packet_increment_head(struct packet_ring_buffer *buff)
1196 {
1197         buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1198 }
1199
1200 static void packet_inc_pending(struct packet_ring_buffer *rb)
1201 {
1202         this_cpu_inc(*rb->pending_refcnt);
1203 }
1204
1205 static void packet_dec_pending(struct packet_ring_buffer *rb)
1206 {
1207         this_cpu_dec(*rb->pending_refcnt);
1208 }
1209
1210 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1211 {
1212         unsigned int refcnt = 0;
1213         int cpu;
1214
1215         /* We don't use pending refcount in rx_ring. */
1216         if (rb->pending_refcnt == NULL)
1217                 return 0;
1218
1219         for_each_possible_cpu(cpu)
1220                 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1221
1222         return refcnt;
1223 }
1224
1225 static int packet_alloc_pending(struct packet_sock *po)
1226 {
1227         po->rx_ring.pending_refcnt = NULL;
1228
1229         po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1230         if (unlikely(po->tx_ring.pending_refcnt == NULL))
1231                 return -ENOBUFS;
1232
1233         return 0;
1234 }
1235
1236 static void packet_free_pending(struct packet_sock *po)
1237 {
1238         free_percpu(po->tx_ring.pending_refcnt);
1239 }
1240
1241 #define ROOM_POW_OFF    2
1242 #define ROOM_NONE       0x0
1243 #define ROOM_LOW        0x1
1244 #define ROOM_NORMAL     0x2
1245
1246 static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1247 {
1248         int idx, len;
1249
1250         len = po->rx_ring.frame_max + 1;
1251         idx = po->rx_ring.head;
1252         if (pow_off)
1253                 idx += len >> pow_off;
1254         if (idx >= len)
1255                 idx -= len;
1256         return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1257 }
1258
1259 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1260 {
1261         int idx, len;
1262
1263         len = po->rx_ring.prb_bdqc.knum_blocks;
1264         idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1265         if (pow_off)
1266                 idx += len >> pow_off;
1267         if (idx >= len)
1268                 idx -= len;
1269         return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1270 }
1271
1272 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1273 {
1274         struct sock *sk = &po->sk;
1275         int ret = ROOM_NONE;
1276
1277         if (po->prot_hook.func != tpacket_rcv) {
1278                 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1279                                           - (skb ? skb->truesize : 0);
1280                 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
1281                         return ROOM_NORMAL;
1282                 else if (avail > 0)
1283                         return ROOM_LOW;
1284                 else
1285                         return ROOM_NONE;
1286         }
1287
1288         if (po->tp_version == TPACKET_V3) {
1289                 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1290                         ret = ROOM_NORMAL;
1291                 else if (__tpacket_v3_has_room(po, 0))
1292                         ret = ROOM_LOW;
1293         } else {
1294                 if (__tpacket_has_room(po, ROOM_POW_OFF))
1295                         ret = ROOM_NORMAL;
1296                 else if (__tpacket_has_room(po, 0))
1297                         ret = ROOM_LOW;
1298         }
1299
1300         return ret;
1301 }
1302
1303 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1304 {
1305         int ret;
1306         bool has_room;
1307
1308         spin_lock_bh(&po->sk.sk_receive_queue.lock);
1309         ret = __packet_rcv_has_room(po, skb);
1310         has_room = ret == ROOM_NORMAL;
1311         if (po->pressure == has_room)
1312                 po->pressure = !has_room;
1313         spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1314
1315         return ret;
1316 }
1317
1318 static void packet_sock_destruct(struct sock *sk)
1319 {
1320         skb_queue_purge(&sk->sk_error_queue);
1321
1322         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1323         WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1324
1325         if (!sock_flag(sk, SOCK_DEAD)) {
1326                 pr_err("Attempt to release alive packet socket: %p\n", sk);
1327                 return;
1328         }
1329
1330         sk_refcnt_debug_dec(sk);
1331 }
1332
1333 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1334 {
1335         u32 rxhash;
1336         int i, count = 0;
1337
1338         rxhash = skb_get_hash(skb);
1339         for (i = 0; i < ROLLOVER_HLEN; i++)
1340                 if (po->rollover->history[i] == rxhash)
1341                         count++;
1342
1343         po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
1344         return count > (ROLLOVER_HLEN >> 1);
1345 }
1346
1347 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1348                                       struct sk_buff *skb,
1349                                       unsigned int num)
1350 {
1351         return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1352 }
1353
1354 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1355                                     struct sk_buff *skb,
1356                                     unsigned int num)
1357 {
1358         unsigned int val = atomic_inc_return(&f->rr_cur);
1359
1360         return val % num;
1361 }
1362
1363 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1364                                      struct sk_buff *skb,
1365                                      unsigned int num)
1366 {
1367         return smp_processor_id() % num;
1368 }
1369
1370 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1371                                      struct sk_buff *skb,
1372                                      unsigned int num)
1373 {
1374         return prandom_u32_max(num);
1375 }
1376
1377 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1378                                           struct sk_buff *skb,
1379                                           unsigned int idx, bool try_self,
1380                                           unsigned int num)
1381 {
1382         struct packet_sock *po, *po_next, *po_skip = NULL;
1383         unsigned int i, j, room = ROOM_NONE;
1384
1385         po = pkt_sk(f->arr[idx]);
1386
1387         if (try_self) {
1388                 room = packet_rcv_has_room(po, skb);
1389                 if (room == ROOM_NORMAL ||
1390                     (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1391                         return idx;
1392                 po_skip = po;
1393         }
1394
1395         i = j = min_t(int, po->rollover->sock, num - 1);
1396         do {
1397                 po_next = pkt_sk(f->arr[i]);
1398                 if (po_next != po_skip && !po_next->pressure &&
1399                     packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1400                         if (i != j)
1401                                 po->rollover->sock = i;
1402                         atomic_long_inc(&po->rollover->num);
1403                         if (room == ROOM_LOW)
1404                                 atomic_long_inc(&po->rollover->num_huge);
1405                         return i;
1406                 }
1407
1408                 if (++i == num)
1409                         i = 0;
1410         } while (i != j);
1411
1412         atomic_long_inc(&po->rollover->num_failed);
1413         return idx;
1414 }
1415
1416 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1417                                     struct sk_buff *skb,
1418                                     unsigned int num)
1419 {
1420         return skb_get_queue_mapping(skb) % num;
1421 }
1422
1423 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1424                                      struct sk_buff *skb,
1425                                      unsigned int num)
1426 {
1427         struct bpf_prog *prog;
1428         unsigned int ret = 0;
1429
1430         rcu_read_lock();
1431         prog = rcu_dereference(f->bpf_prog);
1432         if (prog)
1433                 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1434         rcu_read_unlock();
1435
1436         return ret;
1437 }
1438
1439 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1440 {
1441         return f->flags & (flag >> 8);
1442 }
1443
1444 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1445                              struct packet_type *pt, struct net_device *orig_dev)
1446 {
1447         struct packet_fanout *f = pt->af_packet_priv;
1448         unsigned int num = READ_ONCE(f->num_members);
1449         struct net *net = read_pnet(&f->net);
1450         struct packet_sock *po;
1451         unsigned int idx;
1452
1453         if (!net_eq(dev_net(dev), net) || !num) {
1454                 kfree_skb(skb);
1455                 return 0;
1456         }
1457
1458         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1459                 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1460                 if (!skb)
1461                         return 0;
1462         }
1463         switch (f->type) {
1464         case PACKET_FANOUT_HASH:
1465         default:
1466                 idx = fanout_demux_hash(f, skb, num);
1467                 break;
1468         case PACKET_FANOUT_LB:
1469                 idx = fanout_demux_lb(f, skb, num);
1470                 break;
1471         case PACKET_FANOUT_CPU:
1472                 idx = fanout_demux_cpu(f, skb, num);
1473                 break;
1474         case PACKET_FANOUT_RND:
1475                 idx = fanout_demux_rnd(f, skb, num);
1476                 break;
1477         case PACKET_FANOUT_QM:
1478                 idx = fanout_demux_qm(f, skb, num);
1479                 break;
1480         case PACKET_FANOUT_ROLLOVER:
1481                 idx = fanout_demux_rollover(f, skb, 0, false, num);
1482                 break;
1483         case PACKET_FANOUT_CBPF:
1484         case PACKET_FANOUT_EBPF:
1485                 idx = fanout_demux_bpf(f, skb, num);
1486                 break;
1487         }
1488
1489         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1490                 idx = fanout_demux_rollover(f, skb, idx, true, num);
1491
1492         po = pkt_sk(f->arr[idx]);
1493         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1494 }
1495
1496 DEFINE_MUTEX(fanout_mutex);
1497 EXPORT_SYMBOL_GPL(fanout_mutex);
1498 static LIST_HEAD(fanout_list);
1499 static u16 fanout_next_id;
1500
1501 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1502 {
1503         struct packet_fanout *f = po->fanout;
1504
1505         spin_lock(&f->lock);
1506         f->arr[f->num_members] = sk;
1507         smp_wmb();
1508         f->num_members++;
1509         if (f->num_members == 1)
1510                 dev_add_pack(&f->prot_hook);
1511         spin_unlock(&f->lock);
1512 }
1513
1514 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1515 {
1516         struct packet_fanout *f = po->fanout;
1517         int i;
1518
1519         spin_lock(&f->lock);
1520         for (i = 0; i < f->num_members; i++) {
1521                 if (f->arr[i] == sk)
1522                         break;
1523         }
1524         BUG_ON(i >= f->num_members);
1525         f->arr[i] = f->arr[f->num_members - 1];
1526         f->num_members--;
1527         if (f->num_members == 0)
1528                 __dev_remove_pack(&f->prot_hook);
1529         spin_unlock(&f->lock);
1530 }
1531
1532 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1533 {
1534         if (sk->sk_family != PF_PACKET)
1535                 return false;
1536
1537         return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1538 }
1539
1540 static void fanout_init_data(struct packet_fanout *f)
1541 {
1542         switch (f->type) {
1543         case PACKET_FANOUT_LB:
1544                 atomic_set(&f->rr_cur, 0);
1545                 break;
1546         case PACKET_FANOUT_CBPF:
1547         case PACKET_FANOUT_EBPF:
1548                 RCU_INIT_POINTER(f->bpf_prog, NULL);
1549                 break;
1550         }
1551 }
1552
1553 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1554 {
1555         struct bpf_prog *old;
1556
1557         spin_lock(&f->lock);
1558         old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1559         rcu_assign_pointer(f->bpf_prog, new);
1560         spin_unlock(&f->lock);
1561
1562         if (old) {
1563                 synchronize_net();
1564                 bpf_prog_destroy(old);
1565         }
1566 }
1567
1568 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1569                                 unsigned int len)
1570 {
1571         struct bpf_prog *new;
1572         struct sock_fprog fprog;
1573         int ret;
1574
1575         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1576                 return -EPERM;
1577         if (len != sizeof(fprog))
1578                 return -EINVAL;
1579         if (copy_from_user(&fprog, data, len))
1580                 return -EFAULT;
1581
1582         ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1583         if (ret)
1584                 return ret;
1585
1586         __fanout_set_data_bpf(po->fanout, new);
1587         return 0;
1588 }
1589
1590 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1591                                 unsigned int len)
1592 {
1593         struct bpf_prog *new;
1594         u32 fd;
1595
1596         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1597                 return -EPERM;
1598         if (len != sizeof(fd))
1599                 return -EINVAL;
1600         if (copy_from_user(&fd, data, len))
1601                 return -EFAULT;
1602
1603         new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1604         if (IS_ERR(new))
1605                 return PTR_ERR(new);
1606
1607         __fanout_set_data_bpf(po->fanout, new);
1608         return 0;
1609 }
1610
1611 static int fanout_set_data(struct packet_sock *po, char __user *data,
1612                            unsigned int len)
1613 {
1614         switch (po->fanout->type) {
1615         case PACKET_FANOUT_CBPF:
1616                 return fanout_set_data_cbpf(po, data, len);
1617         case PACKET_FANOUT_EBPF:
1618                 return fanout_set_data_ebpf(po, data, len);
1619         default:
1620                 return -EINVAL;
1621         };
1622 }
1623
1624 static void fanout_release_data(struct packet_fanout *f)
1625 {
1626         switch (f->type) {
1627         case PACKET_FANOUT_CBPF:
1628         case PACKET_FANOUT_EBPF:
1629                 __fanout_set_data_bpf(f, NULL);
1630         };
1631 }
1632
1633 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1634 {
1635         struct packet_fanout *f;
1636
1637         list_for_each_entry(f, &fanout_list, list) {
1638                 if (f->id == candidate_id &&
1639                     read_pnet(&f->net) == sock_net(sk)) {
1640                         return false;
1641                 }
1642         }
1643         return true;
1644 }
1645
1646 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1647 {
1648         u16 id = fanout_next_id;
1649
1650         do {
1651                 if (__fanout_id_is_free(sk, id)) {
1652                         *new_id = id;
1653                         fanout_next_id = id + 1;
1654                         return true;
1655                 }
1656
1657                 id++;
1658         } while (id != fanout_next_id);
1659
1660         return false;
1661 }
1662
1663 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1664 {
1665         struct packet_rollover *rollover = NULL;
1666         struct packet_sock *po = pkt_sk(sk);
1667         struct packet_fanout *f, *match;
1668         u8 type = type_flags & 0xff;
1669         u8 flags = type_flags >> 8;
1670         int err;
1671
1672         switch (type) {
1673         case PACKET_FANOUT_ROLLOVER:
1674                 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1675                         return -EINVAL;
1676         case PACKET_FANOUT_HASH:
1677         case PACKET_FANOUT_LB:
1678         case PACKET_FANOUT_CPU:
1679         case PACKET_FANOUT_RND:
1680         case PACKET_FANOUT_QM:
1681         case PACKET_FANOUT_CBPF:
1682         case PACKET_FANOUT_EBPF:
1683                 break;
1684         default:
1685                 return -EINVAL;
1686         }
1687
1688         mutex_lock(&fanout_mutex);
1689
1690         err = -EINVAL;
1691         if (!po->running)
1692                 goto out;
1693
1694         err = -EALREADY;
1695         if (po->fanout)
1696                 goto out;
1697
1698         if (type == PACKET_FANOUT_ROLLOVER ||
1699             (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1700                 err = -ENOMEM;
1701                 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1702                 if (!rollover)
1703                         goto out;
1704                 atomic_long_set(&rollover->num, 0);
1705                 atomic_long_set(&rollover->num_huge, 0);
1706                 atomic_long_set(&rollover->num_failed, 0);
1707                 po->rollover = rollover;
1708         }
1709
1710         if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1711                 if (id != 0) {
1712                         err = -EINVAL;
1713                         goto out;
1714                 }
1715                 if (!fanout_find_new_id(sk, &id)) {
1716                         err = -ENOMEM;
1717                         goto out;
1718                 }
1719                 /* ephemeral flag for the first socket in the group: drop it */
1720                 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1721         }
1722
1723         match = NULL;
1724         list_for_each_entry(f, &fanout_list, list) {
1725                 if (f->id == id &&
1726                     read_pnet(&f->net) == sock_net(sk)) {
1727                         match = f;
1728                         break;
1729                 }
1730         }
1731         err = -EINVAL;
1732         if (match && match->flags != flags)
1733                 goto out;
1734         if (!match) {
1735                 err = -ENOMEM;
1736                 match = kzalloc(sizeof(*match), GFP_KERNEL);
1737                 if (!match)
1738                         goto out;
1739                 write_pnet(&match->net, sock_net(sk));
1740                 match->id = id;
1741                 match->type = type;
1742                 match->flags = flags;
1743                 INIT_LIST_HEAD(&match->list);
1744                 spin_lock_init(&match->lock);
1745                 atomic_set(&match->sk_ref, 0);
1746                 fanout_init_data(match);
1747                 match->prot_hook.type = po->prot_hook.type;
1748                 match->prot_hook.dev = po->prot_hook.dev;
1749                 match->prot_hook.func = packet_rcv_fanout;
1750                 match->prot_hook.af_packet_priv = match;
1751                 match->prot_hook.id_match = match_fanout_group;
1752                 list_add(&match->list, &fanout_list);
1753         }
1754         err = -EINVAL;
1755         if (match->type == type &&
1756             match->prot_hook.type == po->prot_hook.type &&
1757             match->prot_hook.dev == po->prot_hook.dev) {
1758                 err = -ENOSPC;
1759                 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1760                         __dev_remove_pack(&po->prot_hook);
1761                         po->fanout = match;
1762                         atomic_inc(&match->sk_ref);
1763                         __fanout_link(sk, po);
1764                         err = 0;
1765                 }
1766         }
1767 out:
1768         if (err && rollover) {
1769                 kfree(rollover);
1770                 po->rollover = NULL;
1771         }
1772         mutex_unlock(&fanout_mutex);
1773         return err;
1774 }
1775
1776 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1777  * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1778  * It is the responsibility of the caller to call fanout_release_data() and
1779  * free the returned packet_fanout (after synchronize_net())
1780  */
1781 static struct packet_fanout *fanout_release(struct sock *sk)
1782 {
1783         struct packet_sock *po = pkt_sk(sk);
1784         struct packet_fanout *f;
1785
1786         mutex_lock(&fanout_mutex);
1787         f = po->fanout;
1788         if (f) {
1789                 po->fanout = NULL;
1790
1791                 if (atomic_dec_and_test(&f->sk_ref))
1792                         list_del(&f->list);
1793                 else
1794                         f = NULL;
1795
1796                 if (po->rollover)
1797                         kfree_rcu(po->rollover, rcu);
1798         }
1799         mutex_unlock(&fanout_mutex);
1800
1801         return f;
1802 }
1803
1804 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1805                                           struct sk_buff *skb)
1806 {
1807         /* Earlier code assumed this would be a VLAN pkt, double-check
1808          * this now that we have the actual packet in hand. We can only
1809          * do this check on Ethernet devices.
1810          */
1811         if (unlikely(dev->type != ARPHRD_ETHER))
1812                 return false;
1813
1814         skb_reset_mac_header(skb);
1815         return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1816 }
1817
1818 static const struct proto_ops packet_ops;
1819
1820 static const struct proto_ops packet_ops_spkt;
1821
1822 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1823                            struct packet_type *pt, struct net_device *orig_dev)
1824 {
1825         struct sock *sk;
1826         struct sockaddr_pkt *spkt;
1827
1828         /*
1829          *      When we registered the protocol we saved the socket in the data
1830          *      field for just this event.
1831          */
1832
1833         sk = pt->af_packet_priv;
1834
1835         /*
1836          *      Yank back the headers [hope the device set this
1837          *      right or kerboom...]
1838          *
1839          *      Incoming packets have ll header pulled,
1840          *      push it back.
1841          *
1842          *      For outgoing ones skb->data == skb_mac_header(skb)
1843          *      so that this procedure is noop.
1844          */
1845
1846         if (skb->pkt_type == PACKET_LOOPBACK)
1847                 goto out;
1848
1849         if (!net_eq(dev_net(dev), sock_net(sk)))
1850                 goto out;
1851
1852         skb = skb_share_check(skb, GFP_ATOMIC);
1853         if (skb == NULL)
1854                 goto oom;
1855
1856         /* drop any routing info */
1857         skb_dst_drop(skb);
1858
1859         /* drop conntrack reference */
1860         nf_reset(skb);
1861
1862         spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1863
1864         skb_push(skb, skb->data - skb_mac_header(skb));
1865
1866         /*
1867          *      The SOCK_PACKET socket receives _all_ frames.
1868          */
1869
1870         spkt->spkt_family = dev->type;
1871         strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1872         spkt->spkt_protocol = skb->protocol;
1873
1874         /*
1875          *      Charge the memory to the socket. This is done specifically
1876          *      to prevent sockets using all the memory up.
1877          */
1878
1879         if (sock_queue_rcv_skb(sk, skb) == 0)
1880                 return 0;
1881
1882 out:
1883         kfree_skb(skb);
1884 oom:
1885         return 0;
1886 }
1887
1888
1889 /*
1890  *      Output a raw packet to a device layer. This bypasses all the other
1891  *      protocol layers and you must therefore supply it with a complete frame
1892  */
1893
1894 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1895                                size_t len)
1896 {
1897         struct sock *sk = sock->sk;
1898         DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1899         struct sk_buff *skb = NULL;
1900         struct net_device *dev;
1901         struct sockcm_cookie sockc;
1902         __be16 proto = 0;
1903         int err;
1904         int extra_len = 0;
1905
1906         /*
1907          *      Get and verify the address.
1908          */
1909
1910         if (saddr) {
1911                 if (msg->msg_namelen < sizeof(struct sockaddr))
1912                         return -EINVAL;
1913                 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1914                         proto = saddr->spkt_protocol;
1915         } else
1916                 return -ENOTCONN;       /* SOCK_PACKET must be sent giving an address */
1917
1918         /*
1919          *      Find the device first to size check it
1920          */
1921
1922         saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1923 retry:
1924         rcu_read_lock();
1925         dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1926         err = -ENODEV;
1927         if (dev == NULL)
1928                 goto out_unlock;
1929
1930         err = -ENETDOWN;
1931         if (!(dev->flags & IFF_UP))
1932                 goto out_unlock;
1933
1934         /*
1935          * You may not queue a frame bigger than the mtu. This is the lowest level
1936          * raw protocol and you must do your own fragmentation at this level.
1937          */
1938
1939         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1940                 if (!netif_supports_nofcs(dev)) {
1941                         err = -EPROTONOSUPPORT;
1942                         goto out_unlock;
1943                 }
1944                 extra_len = 4; /* We're doing our own CRC */
1945         }
1946
1947         err = -EMSGSIZE;
1948         if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1949                 goto out_unlock;
1950
1951         if (!skb) {
1952                 size_t reserved = LL_RESERVED_SPACE(dev);
1953                 int tlen = dev->needed_tailroom;
1954                 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1955
1956                 rcu_read_unlock();
1957                 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1958                 if (skb == NULL)
1959                         return -ENOBUFS;
1960                 /* FIXME: Save some space for broken drivers that write a hard
1961                  * header at transmission time by themselves. PPP is the notable
1962                  * one here. This should really be fixed at the driver level.
1963                  */
1964                 skb_reserve(skb, reserved);
1965                 skb_reset_network_header(skb);
1966
1967                 /* Try to align data part correctly */
1968                 if (hhlen) {
1969                         skb->data -= hhlen;
1970                         skb->tail -= hhlen;
1971                         if (len < hhlen)
1972                                 skb_reset_network_header(skb);
1973                 }
1974                 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1975                 if (err)
1976                         goto out_free;
1977                 goto retry;
1978         }
1979
1980         if (!dev_validate_header(dev, skb->data, len)) {
1981                 err = -EINVAL;
1982                 goto out_unlock;
1983         }
1984         if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1985             !packet_extra_vlan_len_allowed(dev, skb)) {
1986                 err = -EMSGSIZE;
1987                 goto out_unlock;
1988         }
1989
1990         sockc.tsflags = sk->sk_tsflags;
1991         if (msg->msg_controllen) {
1992                 err = sock_cmsg_send(sk, msg, &sockc);
1993                 if (unlikely(err))
1994                         goto out_unlock;
1995         }
1996
1997         skb->protocol = proto;
1998         skb->dev = dev;
1999         skb->priority = sk->sk_priority;
2000         skb->mark = sk->sk_mark;
2001
2002         sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
2003
2004         if (unlikely(extra_len == 4))
2005                 skb->no_fcs = 1;
2006
2007         skb_probe_transport_header(skb, 0);
2008
2009         dev_queue_xmit(skb);
2010         rcu_read_unlock();
2011         return len;
2012
2013 out_unlock:
2014         rcu_read_unlock();
2015 out_free:
2016         kfree_skb(skb);
2017         return err;
2018 }
2019
2020 static unsigned int run_filter(struct sk_buff *skb,
2021                                const struct sock *sk,
2022                                unsigned int res)
2023 {
2024         struct sk_filter *filter;
2025
2026         rcu_read_lock();
2027         filter = rcu_dereference(sk->sk_filter);
2028         if (filter != NULL)
2029                 res = bpf_prog_run_clear_cb(filter->prog, skb);
2030         rcu_read_unlock();
2031
2032         return res;
2033 }
2034
2035 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2036                            size_t *len)
2037 {
2038         struct virtio_net_hdr vnet_hdr;
2039
2040         if (*len < sizeof(vnet_hdr))
2041                 return -EINVAL;
2042         *len -= sizeof(vnet_hdr);
2043
2044         if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
2045                 return -EINVAL;
2046
2047         return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2048 }
2049
2050 /*
2051  * This function makes lazy skb cloning in hope that most of packets
2052  * are discarded by BPF.
2053  *
2054  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2055  * and skb->cb are mangled. It works because (and until) packets
2056  * falling here are owned by current CPU. Output packets are cloned
2057  * by dev_queue_xmit_nit(), input packets are processed by net_bh
2058  * sequencially, so that if we return skb to original state on exit,
2059  * we will not harm anyone.
2060  */
2061
2062 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2063                       struct packet_type *pt, struct net_device *orig_dev)
2064 {
2065         struct sock *sk;
2066         struct sockaddr_ll *sll;
2067         struct packet_sock *po;
2068         u8 *skb_head = skb->data;
2069         int skb_len = skb->len;
2070         unsigned int snaplen, res;
2071         bool is_drop_n_account = false;
2072
2073         if (skb->pkt_type == PACKET_LOOPBACK)
2074                 goto drop;
2075
2076         sk = pt->af_packet_priv;
2077         po = pkt_sk(sk);
2078
2079         if (!net_eq(dev_net(dev), sock_net(sk)))
2080                 goto drop;
2081
2082         skb->dev = dev;
2083
2084         if (dev->header_ops) {
2085                 /* The device has an explicit notion of ll header,
2086                  * exported to higher levels.
2087                  *
2088                  * Otherwise, the device hides details of its frame
2089                  * structure, so that corresponding packet head is
2090                  * never delivered to user.
2091                  */
2092                 if (sk->sk_type != SOCK_DGRAM)
2093                         skb_push(skb, skb->data - skb_mac_header(skb));
2094                 else if (skb->pkt_type == PACKET_OUTGOING) {
2095                         /* Special case: outgoing packets have ll header at head */
2096                         skb_pull(skb, skb_network_offset(skb));
2097                 }
2098         }
2099
2100         snaplen = skb->len;
2101
2102         res = run_filter(skb, sk, snaplen);
2103         if (!res)
2104                 goto drop_n_restore;
2105         if (snaplen > res)
2106                 snaplen = res;
2107
2108         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2109                 goto drop_n_acct;
2110
2111         if (skb_shared(skb)) {
2112                 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2113                 if (nskb == NULL)
2114                         goto drop_n_acct;
2115
2116                 if (skb_head != skb->data) {
2117                         skb->data = skb_head;
2118                         skb->len = skb_len;
2119                 }
2120                 consume_skb(skb);
2121                 skb = nskb;
2122         }
2123
2124         sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2125
2126         sll = &PACKET_SKB_CB(skb)->sa.ll;
2127         sll->sll_hatype = dev->type;
2128         sll->sll_pkttype = skb->pkt_type;
2129         if (unlikely(po->origdev))
2130                 sll->sll_ifindex = orig_dev->ifindex;
2131         else
2132                 sll->sll_ifindex = dev->ifindex;
2133
2134         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2135
2136         /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2137          * Use their space for storing the original skb length.
2138          */
2139         PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2140
2141         if (pskb_trim(skb, snaplen))
2142                 goto drop_n_acct;
2143
2144         skb_set_owner_r(skb, sk);
2145         skb->dev = NULL;
2146         skb_dst_drop(skb);
2147
2148         /* drop conntrack reference */
2149         nf_reset(skb);
2150
2151         spin_lock(&sk->sk_receive_queue.lock);
2152         po->stats.stats1.tp_packets++;
2153         sock_skb_set_dropcount(sk, skb);
2154         __skb_queue_tail(&sk->sk_receive_queue, skb);
2155         spin_unlock(&sk->sk_receive_queue.lock);
2156         sk->sk_data_ready(sk);
2157         return 0;
2158
2159 drop_n_acct:
2160         is_drop_n_account = true;
2161         spin_lock(&sk->sk_receive_queue.lock);
2162         po->stats.stats1.tp_drops++;
2163         atomic_inc(&sk->sk_drops);
2164         spin_unlock(&sk->sk_receive_queue.lock);
2165
2166 drop_n_restore:
2167         if (skb_head != skb->data && skb_shared(skb)) {
2168                 skb->data = skb_head;
2169                 skb->len = skb_len;
2170         }
2171 drop:
2172         if (!is_drop_n_account)
2173                 consume_skb(skb);
2174         else
2175                 kfree_skb(skb);
2176         return 0;
2177 }
2178
2179 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2180                        struct packet_type *pt, struct net_device *orig_dev)
2181 {
2182         struct sock *sk;
2183         struct packet_sock *po;
2184         struct sockaddr_ll *sll;
2185         union tpacket_uhdr h;
2186         u8 *skb_head = skb->data;
2187         int skb_len = skb->len;
2188         unsigned int snaplen, res;
2189         unsigned long status = TP_STATUS_USER;
2190         unsigned short macoff, netoff, hdrlen;
2191         struct sk_buff *copy_skb = NULL;
2192         struct timespec ts;
2193         __u32 ts_status;
2194         bool is_drop_n_account = false;
2195
2196         /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2197          * We may add members to them until current aligned size without forcing
2198          * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2199          */
2200         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2201         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2202
2203         if (skb->pkt_type == PACKET_LOOPBACK)
2204                 goto drop;
2205
2206         sk = pt->af_packet_priv;
2207         po = pkt_sk(sk);
2208
2209         if (!net_eq(dev_net(dev), sock_net(sk)))
2210                 goto drop;
2211
2212         if (dev->header_ops) {
2213                 if (sk->sk_type != SOCK_DGRAM)
2214                         skb_push(skb, skb->data - skb_mac_header(skb));
2215                 else if (skb->pkt_type == PACKET_OUTGOING) {
2216                         /* Special case: outgoing packets have ll header at head */
2217                         skb_pull(skb, skb_network_offset(skb));
2218                 }
2219         }
2220
2221         snaplen = skb->len;
2222
2223         res = run_filter(skb, sk, snaplen);
2224         if (!res)
2225                 goto drop_n_restore;
2226
2227         if (skb->ip_summed == CHECKSUM_PARTIAL)
2228                 status |= TP_STATUS_CSUMNOTREADY;
2229         else if (skb->pkt_type != PACKET_OUTGOING &&
2230                  (skb->ip_summed == CHECKSUM_COMPLETE ||
2231                   skb_csum_unnecessary(skb)))
2232                 status |= TP_STATUS_CSUM_VALID;
2233
2234         if (snaplen > res)
2235                 snaplen = res;
2236
2237         if (sk->sk_type == SOCK_DGRAM) {
2238                 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2239                                   po->tp_reserve;
2240         } else {
2241                 unsigned int maclen = skb_network_offset(skb);
2242                 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2243                                        (maclen < 16 ? 16 : maclen)) +
2244                                        po->tp_reserve;
2245                 if (po->has_vnet_hdr)
2246                         netoff += sizeof(struct virtio_net_hdr);
2247                 macoff = netoff - maclen;
2248         }
2249         if (po->tp_version <= TPACKET_V2) {
2250                 if (macoff + snaplen > po->rx_ring.frame_size) {
2251                         if (po->copy_thresh &&
2252                             atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2253                                 if (skb_shared(skb)) {
2254                                         copy_skb = skb_clone(skb, GFP_ATOMIC);
2255                                 } else {
2256                                         copy_skb = skb_get(skb);
2257                                         skb_head = skb->data;
2258                                 }
2259                                 if (copy_skb)
2260                                         skb_set_owner_r(copy_skb, sk);
2261                         }
2262                         snaplen = po->rx_ring.frame_size - macoff;
2263                         if ((int)snaplen < 0)
2264                                 snaplen = 0;
2265                 }
2266         } else if (unlikely(macoff + snaplen >
2267                             GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2268                 u32 nval;
2269
2270                 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2271                 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2272                             snaplen, nval, macoff);
2273                 snaplen = nval;
2274                 if (unlikely((int)snaplen < 0)) {
2275                         snaplen = 0;
2276                         macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2277                 }
2278         }
2279         spin_lock(&sk->sk_receive_queue.lock);
2280         h.raw = packet_current_rx_frame(po, skb,
2281                                         TP_STATUS_KERNEL, (macoff+snaplen));
2282         if (!h.raw)
2283                 goto drop_n_account;
2284         if (po->tp_version <= TPACKET_V2) {
2285                 packet_increment_rx_head(po, &po->rx_ring);
2286         /*
2287          * LOSING will be reported till you read the stats,
2288          * because it's COR - Clear On Read.
2289          * Anyways, moving it for V1/V2 only as V3 doesn't need this
2290          * at packet level.
2291          */
2292                 if (po->stats.stats1.tp_drops)
2293                         status |= TP_STATUS_LOSING;
2294         }
2295         po->stats.stats1.tp_packets++;
2296         if (copy_skb) {
2297                 status |= TP_STATUS_COPY;
2298                 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2299         }
2300         spin_unlock(&sk->sk_receive_queue.lock);
2301
2302         if (po->has_vnet_hdr) {
2303                 if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
2304                                             sizeof(struct virtio_net_hdr),
2305                                             vio_le(), true)) {
2306                         spin_lock(&sk->sk_receive_queue.lock);
2307                         goto drop_n_account;
2308                 }
2309         }
2310
2311         skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2312
2313         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2314                 getnstimeofday(&ts);
2315
2316         status |= ts_status;
2317
2318         switch (po->tp_version) {
2319         case TPACKET_V1:
2320                 h.h1->tp_len = skb->len;
2321                 h.h1->tp_snaplen = snaplen;
2322                 h.h1->tp_mac = macoff;
2323                 h.h1->tp_net = netoff;
2324                 h.h1->tp_sec = ts.tv_sec;
2325                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2326                 hdrlen = sizeof(*h.h1);
2327                 break;
2328         case TPACKET_V2:
2329                 h.h2->tp_len = skb->len;
2330                 h.h2->tp_snaplen = snaplen;
2331                 h.h2->tp_mac = macoff;
2332                 h.h2->tp_net = netoff;
2333                 h.h2->tp_sec = ts.tv_sec;
2334                 h.h2->tp_nsec = ts.tv_nsec;
2335                 if (skb_vlan_tag_present(skb)) {
2336                         h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2337                         h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2338                         status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2339                 } else {
2340                         h.h2->tp_vlan_tci = 0;
2341                         h.h2->tp_vlan_tpid = 0;
2342                 }
2343                 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2344                 hdrlen = sizeof(*h.h2);
2345                 break;
2346         case TPACKET_V3:
2347                 /* tp_nxt_offset,vlan are already populated above.
2348                  * So DONT clear those fields here
2349                  */
2350                 h.h3->tp_status |= status;
2351                 h.h3->tp_len = skb->len;
2352                 h.h3->tp_snaplen = snaplen;
2353                 h.h3->tp_mac = macoff;
2354                 h.h3->tp_net = netoff;
2355                 h.h3->tp_sec  = ts.tv_sec;
2356                 h.h3->tp_nsec = ts.tv_nsec;
2357                 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2358                 hdrlen = sizeof(*h.h3);
2359                 break;
2360         default:
2361                 BUG();
2362         }
2363
2364         sll = h.raw + TPACKET_ALIGN(hdrlen);
2365         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2366         sll->sll_family = AF_PACKET;
2367         sll->sll_hatype = dev->type;
2368         sll->sll_protocol = skb->protocol;
2369         sll->sll_pkttype = skb->pkt_type;
2370         if (unlikely(po->origdev))
2371                 sll->sll_ifindex = orig_dev->ifindex;
2372         else
2373                 sll->sll_ifindex = dev->ifindex;
2374
2375         smp_mb();
2376
2377 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2378         if (po->tp_version <= TPACKET_V2) {
2379                 u8 *start, *end;
2380
2381                 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2382                                         macoff + snaplen);
2383
2384                 for (start = h.raw; start < end; start += PAGE_SIZE)
2385                         flush_dcache_page(pgv_to_page(start));
2386         }
2387         smp_wmb();
2388 #endif
2389
2390         if (po->tp_version <= TPACKET_V2) {
2391                 __packet_set_status(po, h.raw, status);
2392                 sk->sk_data_ready(sk);
2393         } else {
2394                 prb_clear_blk_fill_status(&po->rx_ring);
2395         }
2396
2397 drop_n_restore:
2398         if (skb_head != skb->data && skb_shared(skb)) {
2399                 skb->data = skb_head;
2400                 skb->len = skb_len;
2401         }
2402 drop:
2403         if (!is_drop_n_account)
2404                 consume_skb(skb);
2405         else
2406                 kfree_skb(skb);
2407         return 0;
2408
2409 drop_n_account:
2410         is_drop_n_account = true;
2411         po->stats.stats1.tp_drops++;
2412         spin_unlock(&sk->sk_receive_queue.lock);
2413
2414         sk->sk_data_ready(sk);
2415         kfree_skb(copy_skb);
2416         goto drop_n_restore;
2417 }
2418
2419 static void tpacket_destruct_skb(struct sk_buff *skb)
2420 {
2421         struct packet_sock *po = pkt_sk(skb->sk);
2422
2423         if (likely(po->tx_ring.pg_vec)) {
2424                 void *ph;
2425                 __u32 ts;
2426
2427                 ph = skb_shinfo(skb)->destructor_arg;
2428                 packet_dec_pending(&po->tx_ring);
2429
2430                 ts = __packet_set_timestamp(po, ph, skb);
2431                 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2432         }
2433
2434         sock_wfree(skb);
2435 }
2436
2437 static void tpacket_set_protocol(const struct net_device *dev,
2438                                  struct sk_buff *skb)
2439 {
2440         if (dev->type == ARPHRD_ETHER) {
2441                 skb_reset_mac_header(skb);
2442                 skb->protocol = eth_hdr(skb)->h_proto;
2443         }
2444 }
2445
2446 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2447 {
2448         if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2449             (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2450              __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2451               __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2452                 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2453                          __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2454                         __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2455
2456         if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2457                 return -EINVAL;
2458
2459         return 0;
2460 }
2461
2462 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2463                                  struct virtio_net_hdr *vnet_hdr)
2464 {
2465         if (*len < sizeof(*vnet_hdr))
2466                 return -EINVAL;
2467         *len -= sizeof(*vnet_hdr);
2468
2469         if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2470                 return -EFAULT;
2471
2472         return __packet_snd_vnet_parse(vnet_hdr, *len);
2473 }
2474
2475 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2476                 void *frame, struct net_device *dev, void *data, int tp_len,
2477                 __be16 proto, unsigned char *addr, int hlen, int copylen,
2478                 const struct sockcm_cookie *sockc)
2479 {
2480         union tpacket_uhdr ph;
2481         int to_write, offset, len, nr_frags, len_max;
2482         struct socket *sock = po->sk.sk_socket;
2483         struct page *page;
2484         int err;
2485
2486         ph.raw = frame;
2487
2488         skb->protocol = proto;
2489         skb->dev = dev;
2490         skb->priority = po->sk.sk_priority;
2491         skb->mark = po->sk.sk_mark;
2492         sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
2493         skb_shinfo(skb)->destructor_arg = ph.raw;
2494
2495         skb_reserve(skb, hlen);
2496         skb_reset_network_header(skb);
2497
2498         to_write = tp_len;
2499
2500         if (sock->type == SOCK_DGRAM) {
2501                 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2502                                 NULL, tp_len);
2503                 if (unlikely(err < 0))
2504                         return -EINVAL;
2505         } else if (copylen) {
2506                 int hdrlen = min_t(int, copylen, tp_len);
2507
2508                 skb_push(skb, dev->hard_header_len);
2509                 skb_put(skb, copylen - dev->hard_header_len);
2510                 err = skb_store_bits(skb, 0, data, hdrlen);
2511                 if (unlikely(err))
2512                         return err;
2513                 if (!dev_validate_header(dev, skb->data, hdrlen))
2514                         return -EINVAL;
2515                 if (!skb->protocol)
2516                         tpacket_set_protocol(dev, skb);
2517
2518                 data += hdrlen;
2519                 to_write -= hdrlen;
2520         }
2521
2522         offset = offset_in_page(data);
2523         len_max = PAGE_SIZE - offset;
2524         len = ((to_write > len_max) ? len_max : to_write);
2525
2526         skb->data_len = to_write;
2527         skb->len += to_write;
2528         skb->truesize += to_write;
2529         atomic_add(to_write, &po->sk.sk_wmem_alloc);
2530
2531         while (likely(to_write)) {
2532                 nr_frags = skb_shinfo(skb)->nr_frags;
2533
2534                 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2535                         pr_err("Packet exceed the number of skb frags(%lu)\n",
2536                                MAX_SKB_FRAGS);
2537                         return -EFAULT;
2538                 }
2539
2540                 page = pgv_to_page(data);
2541                 data += len;
2542                 flush_dcache_page(page);
2543                 get_page(page);
2544                 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2545                 to_write -= len;
2546                 offset = 0;
2547                 len_max = PAGE_SIZE;
2548                 len = ((to_write > len_max) ? len_max : to_write);
2549         }
2550
2551         skb_probe_transport_header(skb, 0);
2552
2553         return tp_len;
2554 }
2555
2556 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2557                                 int size_max, void **data)
2558 {
2559         union tpacket_uhdr ph;
2560         int tp_len, off;
2561
2562         ph.raw = frame;
2563
2564         switch (po->tp_version) {
2565         case TPACKET_V3:
2566                 if (ph.h3->tp_next_offset != 0) {
2567                         pr_warn_once("variable sized slot not supported");
2568                         return -EINVAL;
2569                 }
2570                 tp_len = ph.h3->tp_len;
2571                 break;
2572         case TPACKET_V2:
2573                 tp_len = ph.h2->tp_len;
2574                 break;
2575         default:
2576                 tp_len = ph.h1->tp_len;
2577                 break;
2578         }
2579         if (unlikely(tp_len > size_max)) {
2580                 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2581                 return -EMSGSIZE;
2582         }
2583
2584         if (unlikely(po->tp_tx_has_off)) {
2585                 int off_min, off_max;
2586
2587                 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2588                 off_max = po->tx_ring.frame_size - tp_len;
2589                 if (po->sk.sk_type == SOCK_DGRAM) {
2590                         switch (po->tp_version) {
2591                         case TPACKET_V3:
2592                                 off = ph.h3->tp_net;
2593                                 break;
2594                         case TPACKET_V2:
2595                                 off = ph.h2->tp_net;
2596                                 break;
2597                         default:
2598                                 off = ph.h1->tp_net;
2599                                 break;
2600                         }
2601                 } else {
2602                         switch (po->tp_version) {
2603                         case TPACKET_V3:
2604                                 off = ph.h3->tp_mac;
2605                                 break;
2606                         case TPACKET_V2:
2607                                 off = ph.h2->tp_mac;
2608                                 break;
2609                         default:
2610                                 off = ph.h1->tp_mac;
2611                                 break;
2612                         }
2613                 }
2614                 if (unlikely((off < off_min) || (off_max < off)))
2615                         return -EINVAL;
2616         } else {
2617                 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2618         }
2619
2620         *data = frame + off;
2621         return tp_len;
2622 }
2623
2624 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2625 {
2626         struct sk_buff *skb;
2627         struct net_device *dev;
2628         struct virtio_net_hdr *vnet_hdr = NULL;
2629         struct sockcm_cookie sockc;
2630         __be16 proto;
2631         int err, reserve = 0;
2632         void *ph;
2633         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2634         bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2635         int tp_len, size_max;
2636         unsigned char *addr;
2637         void *data;
2638         int len_sum = 0;
2639         int status = TP_STATUS_AVAILABLE;
2640         int hlen, tlen, copylen = 0;
2641
2642         mutex_lock(&po->pg_vec_lock);
2643
2644         if (likely(saddr == NULL)) {
2645                 dev     = packet_cached_dev_get(po);
2646                 proto   = po->num;
2647                 addr    = NULL;
2648         } else {
2649                 err = -EINVAL;
2650                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2651                         goto out;
2652                 if (msg->msg_namelen < (saddr->sll_halen
2653                                         + offsetof(struct sockaddr_ll,
2654                                                 sll_addr)))
2655                         goto out;
2656                 proto   = saddr->sll_protocol;
2657                 addr    = saddr->sll_addr;
2658                 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2659         }
2660
2661         sockc.tsflags = po->sk.sk_tsflags;
2662         if (msg->msg_controllen) {
2663                 err = sock_cmsg_send(&po->sk, msg, &sockc);
2664                 if (unlikely(err))
2665                         goto out;
2666         }
2667
2668         err = -ENXIO;
2669         if (unlikely(dev == NULL))
2670                 goto out;
2671         err = -ENETDOWN;
2672         if (unlikely(!(dev->flags & IFF_UP)))
2673                 goto out_put;
2674
2675         if (po->sk.sk_socket->type == SOCK_RAW)
2676                 reserve = dev->hard_header_len;
2677         size_max = po->tx_ring.frame_size
2678                 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2679
2680         if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2681                 size_max = dev->mtu + reserve + VLAN_HLEN;
2682
2683         do {
2684                 ph = packet_current_frame(po, &po->tx_ring,
2685                                           TP_STATUS_SEND_REQUEST);
2686                 if (unlikely(ph == NULL)) {
2687                         if (need_wait && need_resched())
2688                                 schedule();
2689                         continue;
2690                 }
2691
2692                 skb = NULL;
2693                 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2694                 if (tp_len < 0)
2695                         goto tpacket_error;
2696
2697                 status = TP_STATUS_SEND_REQUEST;
2698                 hlen = LL_RESERVED_SPACE(dev);
2699                 tlen = dev->needed_tailroom;
2700                 if (po->has_vnet_hdr) {
2701                         vnet_hdr = data;
2702                         data += sizeof(*vnet_hdr);
2703                         tp_len -= sizeof(*vnet_hdr);
2704                         if (tp_len < 0 ||
2705                             __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2706                                 tp_len = -EINVAL;
2707                                 goto tpacket_error;
2708                         }
2709                         copylen = __virtio16_to_cpu(vio_le(),
2710                                                     vnet_hdr->hdr_len);
2711                 }
2712                 copylen = max_t(int, copylen, dev->hard_header_len);
2713                 skb = sock_alloc_send_skb(&po->sk,
2714                                 hlen + tlen + sizeof(struct sockaddr_ll) +
2715                                 (copylen - dev->hard_header_len),
2716                                 !need_wait, &err);
2717
2718                 if (unlikely(skb == NULL)) {
2719                         /* we assume the socket was initially writeable ... */
2720                         if (likely(len_sum > 0))
2721                                 err = len_sum;
2722                         goto out_status;
2723                 }
2724                 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2725                                           addr, hlen, copylen, &sockc);
2726                 if (likely(tp_len >= 0) &&
2727                     tp_len > dev->mtu + reserve &&
2728                     !po->has_vnet_hdr &&
2729                     !packet_extra_vlan_len_allowed(dev, skb))
2730                         tp_len = -EMSGSIZE;
2731
2732                 if (unlikely(tp_len < 0)) {
2733 tpacket_error:
2734                         if (po->tp_loss) {
2735                                 __packet_set_status(po, ph,
2736                                                 TP_STATUS_AVAILABLE);
2737                                 packet_increment_head(&po->tx_ring);
2738                                 kfree_skb(skb);
2739                                 continue;
2740                         } else {
2741                                 status = TP_STATUS_WRONG_FORMAT;
2742                                 err = tp_len;
2743                                 goto out_status;
2744                         }
2745                 }
2746
2747                 if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr,
2748                                                               vio_le())) {
2749                         tp_len = -EINVAL;
2750                         goto tpacket_error;
2751                 }
2752
2753                 packet_pick_tx_queue(dev, skb);
2754
2755                 skb->destructor = tpacket_destruct_skb;
2756                 __packet_set_status(po, ph, TP_STATUS_SENDING);
2757                 packet_inc_pending(&po->tx_ring);
2758
2759                 status = TP_STATUS_SEND_REQUEST;
2760                 err = po->xmit(skb);
2761                 if (unlikely(err > 0)) {
2762                         err = net_xmit_errno(err);
2763                         if (err && __packet_get_status(po, ph) ==
2764                                    TP_STATUS_AVAILABLE) {
2765                                 /* skb was destructed already */
2766                                 skb = NULL;
2767                                 goto out_status;
2768                         }
2769                         /*
2770                          * skb was dropped but not destructed yet;
2771                          * let's treat it like congestion or err < 0
2772                          */
2773                         err = 0;
2774                 }
2775                 packet_increment_head(&po->tx_ring);
2776                 len_sum += tp_len;
2777         } while (likely((ph != NULL) ||
2778                 /* Note: packet_read_pending() might be slow if we have
2779                  * to call it as it's per_cpu variable, but in fast-path
2780                  * we already short-circuit the loop with the first
2781                  * condition, and luckily don't have to go that path
2782                  * anyway.
2783                  */
2784                  (need_wait && packet_read_pending(&po->tx_ring))));
2785
2786         err = len_sum;
2787         goto out_put;
2788
2789 out_status:
2790         __packet_set_status(po, ph, status);
2791         kfree_skb(skb);
2792 out_put:
2793         dev_put(dev);
2794 out:
2795         mutex_unlock(&po->pg_vec_lock);
2796         return err;
2797 }
2798
2799 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2800                                         size_t reserve, size_t len,
2801                                         size_t linear, int noblock,
2802                                         int *err)
2803 {
2804         struct sk_buff *skb;
2805
2806         /* Under a page?  Don't bother with paged skb. */
2807         if (prepad + len < PAGE_SIZE || !linear)
2808                 linear = len;
2809
2810         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2811                                    err, 0);
2812         if (!skb)
2813                 return NULL;
2814
2815         skb_reserve(skb, reserve);
2816         skb_put(skb, linear);
2817         skb->data_len = len - linear;
2818         skb->len += len - linear;
2819
2820         return skb;
2821 }
2822
2823 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2824 {
2825         struct sock *sk = sock->sk;
2826         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2827         struct sk_buff *skb;
2828         struct net_device *dev;
2829         __be16 proto;
2830         unsigned char *addr;
2831         int err, reserve = 0;
2832         struct sockcm_cookie sockc;
2833         struct virtio_net_hdr vnet_hdr = { 0 };
2834         int offset = 0;
2835         struct packet_sock *po = pkt_sk(sk);
2836         int hlen, tlen, linear;
2837         int extra_len = 0;
2838
2839         /*
2840          *      Get and verify the address.
2841          */
2842
2843         if (likely(saddr == NULL)) {
2844                 dev     = packet_cached_dev_get(po);
2845                 proto   = po->num;
2846                 addr    = NULL;
2847         } else {
2848                 err = -EINVAL;
2849                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2850                         goto out;
2851                 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2852                         goto out;
2853                 proto   = saddr->sll_protocol;
2854                 addr    = saddr->sll_addr;
2855                 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2856         }
2857
2858         err = -ENXIO;
2859         if (unlikely(dev == NULL))
2860                 goto out_unlock;
2861         err = -ENETDOWN;
2862         if (unlikely(!(dev->flags & IFF_UP)))
2863                 goto out_unlock;
2864
2865         sockc.tsflags = sk->sk_tsflags;
2866         sockc.mark = sk->sk_mark;
2867         if (msg->msg_controllen) {
2868                 err = sock_cmsg_send(sk, msg, &sockc);
2869                 if (unlikely(err))
2870                         goto out_unlock;
2871         }
2872
2873         if (sock->type == SOCK_RAW)
2874                 reserve = dev->hard_header_len;
2875         if (po->has_vnet_hdr) {
2876                 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2877                 if (err)
2878                         goto out_unlock;
2879         }
2880
2881         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2882                 if (!netif_supports_nofcs(dev)) {
2883                         err = -EPROTONOSUPPORT;
2884                         goto out_unlock;
2885                 }
2886                 extra_len = 4; /* We're doing our own CRC */
2887         }
2888
2889         err = -EMSGSIZE;
2890         if (!vnet_hdr.gso_type &&
2891             (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2892                 goto out_unlock;
2893
2894         err = -ENOBUFS;
2895         hlen = LL_RESERVED_SPACE(dev);
2896         tlen = dev->needed_tailroom;
2897         linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2898         linear = max(linear, min_t(int, len, dev->hard_header_len));
2899         skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2900                                msg->msg_flags & MSG_DONTWAIT, &err);
2901         if (skb == NULL)
2902                 goto out_unlock;
2903
2904         skb_set_network_header(skb, reserve);
2905
2906         err = -EINVAL;
2907         if (sock->type == SOCK_DGRAM) {
2908                 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2909                 if (unlikely(offset < 0))
2910                         goto out_free;
2911         }
2912
2913         /* Returns -EFAULT on error */
2914         err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2915         if (err)
2916                 goto out_free;
2917
2918         if (sock->type == SOCK_RAW &&
2919             !dev_validate_header(dev, skb->data, len)) {
2920                 err = -EINVAL;
2921                 goto out_free;
2922         }
2923
2924         sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
2925
2926         if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2927             !packet_extra_vlan_len_allowed(dev, skb)) {
2928                 err = -EMSGSIZE;
2929                 goto out_free;
2930         }
2931
2932         skb->protocol = proto;
2933         skb->dev = dev;
2934         skb->priority = sk->sk_priority;
2935         skb->mark = sockc.mark;
2936
2937         packet_pick_tx_queue(dev, skb);
2938
2939         if (po->has_vnet_hdr) {
2940                 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2941                 if (err)
2942                         goto out_free;
2943                 len += sizeof(vnet_hdr);
2944         }
2945
2946         skb_probe_transport_header(skb, reserve);
2947
2948         if (unlikely(extra_len == 4))
2949                 skb->no_fcs = 1;
2950
2951         err = po->xmit(skb);
2952         if (err > 0 && (err = net_xmit_errno(err)) != 0)
2953                 goto out_unlock;
2954
2955         dev_put(dev);
2956
2957         return len;
2958
2959 out_free:
2960         kfree_skb(skb);
2961 out_unlock:
2962         if (dev)
2963                 dev_put(dev);
2964 out:
2965         return err;
2966 }
2967
2968 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2969 {
2970         struct sock *sk = sock->sk;
2971         struct packet_sock *po = pkt_sk(sk);
2972
2973         if (po->tx_ring.pg_vec)
2974                 return tpacket_snd(po, msg);
2975         else
2976                 return packet_snd(sock, msg, len);
2977 }
2978
2979 /*
2980  *      Close a PACKET socket. This is fairly simple. We immediately go
2981  *      to 'closed' state and remove our protocol entry in the device list.
2982  */
2983
2984 static int packet_release(struct socket *sock)
2985 {
2986         struct sock *sk = sock->sk;
2987         struct packet_sock *po;
2988         struct packet_fanout *f;
2989         struct net *net;
2990         union tpacket_req_u req_u;
2991
2992         if (!sk)
2993                 return 0;
2994
2995         net = sock_net(sk);
2996         po = pkt_sk(sk);
2997
2998         mutex_lock(&net->packet.sklist_lock);
2999         sk_del_node_init_rcu(sk);
3000         mutex_unlock(&net->packet.sklist_lock);
3001
3002         preempt_disable();
3003         sock_prot_inuse_add(net, sk->sk_prot, -1);
3004         preempt_enable();
3005
3006         spin_lock(&po->bind_lock);
3007         unregister_prot_hook(sk, false);
3008         packet_cached_dev_reset(po);
3009
3010         if (po->prot_hook.dev) {
3011                 dev_put(po->prot_hook.dev);
3012                 po->prot_hook.dev = NULL;
3013         }
3014         spin_unlock(&po->bind_lock);
3015
3016         packet_flush_mclist(sk);
3017
3018         if (po->rx_ring.pg_vec) {
3019                 memset(&req_u, 0, sizeof(req_u));
3020                 packet_set_ring(sk, &req_u, 1, 0);
3021         }
3022
3023         if (po->tx_ring.pg_vec) {
3024                 memset(&req_u, 0, sizeof(req_u));
3025                 packet_set_ring(sk, &req_u, 1, 1);
3026         }
3027
3028         f = fanout_release(sk);
3029
3030         synchronize_net();
3031
3032         if (f) {
3033                 fanout_release_data(f);
3034                 kfree(f);
3035         }
3036         /*
3037          *      Now the socket is dead. No more input will appear.
3038          */
3039         sock_orphan(sk);
3040         sock->sk = NULL;
3041
3042         /* Purge queues */
3043
3044         skb_queue_purge(&sk->sk_receive_queue);
3045         packet_free_pending(po);
3046         sk_refcnt_debug_release(sk);
3047
3048         sock_put(sk);
3049         return 0;
3050 }
3051
3052 /*
3053  *      Attach a packet hook.
3054  */
3055
3056 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3057                           __be16 proto)
3058 {
3059         struct packet_sock *po = pkt_sk(sk);
3060         struct net_device *dev_curr;
3061         __be16 proto_curr;
3062         bool need_rehook;
3063         struct net_device *dev = NULL;
3064         int ret = 0;
3065         bool unlisted = false;
3066
3067         if (po->fanout)
3068                 return -EINVAL;
3069
3070         lock_sock(sk);
3071         spin_lock(&po->bind_lock);
3072         rcu_read_lock();
3073
3074         if (name) {
3075                 dev = dev_get_by_name_rcu(sock_net(sk), name);
3076                 if (!dev) {
3077                         ret = -ENODEV;
3078                         goto out_unlock;
3079                 }
3080         } else if (ifindex) {
3081                 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3082                 if (!dev) {
3083                         ret = -ENODEV;
3084                         goto out_unlock;
3085                 }
3086         }
3087
3088         if (dev)
3089                 dev_hold(dev);
3090
3091         proto_curr = po->prot_hook.type;
3092         dev_curr = po->prot_hook.dev;
3093
3094         need_rehook = proto_curr != proto || dev_curr != dev;
3095
3096         if (need_rehook) {
3097                 if (po->running) {
3098                         rcu_read_unlock();
3099                         __unregister_prot_hook(sk, true);
3100                         rcu_read_lock();
3101                         dev_curr = po->prot_hook.dev;
3102                         if (dev)
3103                                 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3104                                                                  dev->ifindex);
3105                 }
3106
3107                 po->num = proto;
3108                 po->prot_hook.type = proto;
3109
3110                 if (unlikely(unlisted)) {
3111                         dev_put(dev);
3112                         po->prot_hook.dev = NULL;
3113                         po->ifindex = -1;
3114                         packet_cached_dev_reset(po);
3115                 } else {
3116                         po->prot_hook.dev = dev;
3117                         po->ifindex = dev ? dev->ifindex : 0;
3118                         packet_cached_dev_assign(po, dev);
3119                 }
3120         }
3121         if (dev_curr)
3122                 dev_put(dev_curr);
3123
3124         if (proto == 0 || !need_rehook)
3125                 goto out_unlock;
3126
3127         if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3128                 register_prot_hook(sk);
3129         } else {
3130                 sk->sk_err = ENETDOWN;
3131                 if (!sock_flag(sk, SOCK_DEAD))
3132                         sk->sk_error_report(sk);
3133         }
3134
3135 out_unlock:
3136         rcu_read_unlock();
3137         spin_unlock(&po->bind_lock);
3138         release_sock(sk);
3139         return ret;
3140 }
3141
3142 /*
3143  *      Bind a packet socket to a device
3144  */
3145
3146 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3147                             int addr_len)
3148 {
3149         struct sock *sk = sock->sk;
3150         char name[sizeof(uaddr->sa_data) + 1];
3151
3152         /*
3153          *      Check legality
3154          */
3155
3156         if (addr_len != sizeof(struct sockaddr))
3157                 return -EINVAL;
3158         /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3159          * zero-terminated.
3160          */
3161         memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3162         name[sizeof(uaddr->sa_data)] = 0;
3163
3164         return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3165 }
3166
3167 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3168 {
3169         struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3170         struct sock *sk = sock->sk;
3171
3172         /*
3173          *      Check legality
3174          */
3175
3176         if (addr_len < sizeof(struct sockaddr_ll))
3177                 return -EINVAL;
3178         if (sll->sll_family != AF_PACKET)
3179                 return -EINVAL;
3180
3181         return packet_do_bind(sk, NULL, sll->sll_ifindex,
3182                               sll->sll_protocol ? : pkt_sk(sk)->num);
3183 }
3184
3185 static struct proto packet_proto = {
3186         .name     = "PACKET",
3187         .owner    = THIS_MODULE,
3188         .obj_size = sizeof(struct packet_sock),
3189 };
3190
3191 /*
3192  *      Create a packet of type SOCK_PACKET.
3193  */
3194
3195 static int packet_create(struct net *net, struct socket *sock, int protocol,
3196                          int kern)
3197 {
3198         struct sock *sk;
3199         struct packet_sock *po;
3200         __be16 proto = (__force __be16)protocol; /* weird, but documented */
3201         int err;
3202
3203         if (!ns_capable(net->user_ns, CAP_NET_RAW))
3204                 return -EPERM;
3205         if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3206             sock->type != SOCK_PACKET)
3207                 return -ESOCKTNOSUPPORT;
3208
3209         sock->state = SS_UNCONNECTED;
3210
3211         err = -ENOBUFS;
3212         sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3213         if (sk == NULL)
3214                 goto out;
3215
3216         sock->ops = &packet_ops;
3217         if (sock->type == SOCK_PACKET)
3218                 sock->ops = &packet_ops_spkt;
3219
3220         sock_init_data(sock, sk);
3221
3222         po = pkt_sk(sk);
3223         sk->sk_family = PF_PACKET;
3224         po->num = proto;
3225         po->xmit = dev_queue_xmit;
3226
3227         err = packet_alloc_pending(po);
3228         if (err)
3229                 goto out2;
3230
3231         packet_cached_dev_reset(po);
3232
3233         sk->sk_destruct = packet_sock_destruct;
3234         sk_refcnt_debug_inc(sk);
3235
3236         /*
3237          *      Attach a protocol block
3238          */
3239
3240         spin_lock_init(&po->bind_lock);
3241         mutex_init(&po->pg_vec_lock);
3242         po->rollover = NULL;
3243         po->prot_hook.func = packet_rcv;
3244
3245         if (sock->type == SOCK_PACKET)
3246                 po->prot_hook.func = packet_rcv_spkt;
3247
3248         po->prot_hook.af_packet_priv = sk;
3249
3250         if (proto) {
3251                 po->prot_hook.type = proto;
3252                 register_prot_hook(sk);
3253         }
3254
3255         mutex_lock(&net->packet.sklist_lock);
3256         sk_add_node_rcu(sk, &net->packet.sklist);
3257         mutex_unlock(&net->packet.sklist_lock);
3258
3259         preempt_disable();
3260         sock_prot_inuse_add(net, &packet_proto, 1);
3261         preempt_enable();
3262
3263         return 0;
3264 out2:
3265         sk_free(sk);
3266 out:
3267         return err;
3268 }
3269
3270 /*
3271  *      Pull a packet from our receive queue and hand it to the user.
3272  *      If necessary we block.
3273  */
3274
3275 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3276                           int flags)
3277 {
3278         struct sock *sk = sock->sk;
3279         struct sk_buff *skb;
3280         int copied, err;
3281         int vnet_hdr_len = 0;
3282         unsigned int origlen = 0;
3283
3284         err = -EINVAL;
3285         if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3286                 goto out;
3287
3288 #if 0
3289         /* What error should we return now? EUNATTACH? */
3290         if (pkt_sk(sk)->ifindex < 0)
3291                 return -ENODEV;
3292 #endif
3293
3294         if (flags & MSG_ERRQUEUE) {
3295                 err = sock_recv_errqueue(sk, msg, len,
3296                                          SOL_PACKET, PACKET_TX_TIMESTAMP);
3297                 goto out;
3298         }
3299
3300         /*
3301          *      Call the generic datagram receiver. This handles all sorts
3302          *      of horrible races and re-entrancy so we can forget about it
3303          *      in the protocol layers.
3304          *
3305          *      Now it will return ENETDOWN, if device have just gone down,
3306          *      but then it will block.
3307          */
3308
3309         skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3310
3311         /*
3312          *      An error occurred so return it. Because skb_recv_datagram()
3313          *      handles the blocking we don't see and worry about blocking
3314          *      retries.
3315          */
3316
3317         if (skb == NULL)
3318                 goto out;
3319
3320         if (pkt_sk(sk)->pressure)
3321                 packet_rcv_has_room(pkt_sk(sk), NULL);
3322
3323         if (pkt_sk(sk)->has_vnet_hdr) {
3324                 err = packet_rcv_vnet(msg, skb, &len);
3325                 if (err)
3326                         goto out_free;
3327                 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3328         }
3329
3330         /* You lose any data beyond the buffer you gave. If it worries
3331          * a user program they can ask the device for its MTU
3332          * anyway.
3333          */
3334         copied = skb->len;
3335         if (copied > len) {
3336                 copied = len;
3337                 msg->msg_flags |= MSG_TRUNC;
3338         }
3339
3340         err = skb_copy_datagram_msg(skb, 0, msg, copied);
3341         if (err)
3342                 goto out_free;
3343
3344         if (sock->type != SOCK_PACKET) {
3345                 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3346
3347                 /* Original length was stored in sockaddr_ll fields */
3348                 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3349                 sll->sll_family = AF_PACKET;
3350                 sll->sll_protocol = skb->protocol;
3351         }
3352
3353         sock_recv_ts_and_drops(msg, sk, skb);
3354
3355         if (msg->msg_name) {
3356                 /* If the address length field is there to be filled
3357                  * in, we fill it in now.
3358                  */
3359                 if (sock->type == SOCK_PACKET) {
3360                         __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3361                         msg->msg_namelen = sizeof(struct sockaddr_pkt);
3362                 } else {
3363                         struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3364
3365                         msg->msg_namelen = sll->sll_halen +
3366                                 offsetof(struct sockaddr_ll, sll_addr);
3367                 }
3368                 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3369                        msg->msg_namelen);
3370         }
3371
3372         if (pkt_sk(sk)->auxdata) {
3373                 struct tpacket_auxdata aux;
3374
3375                 aux.tp_status = TP_STATUS_USER;
3376                 if (skb->ip_summed == CHECKSUM_PARTIAL)
3377                         aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3378                 else if (skb->pkt_type != PACKET_OUTGOING &&
3379                          (skb->ip_summed == CHECKSUM_COMPLETE ||
3380                           skb_csum_unnecessary(skb)))
3381                         aux.tp_status |= TP_STATUS_CSUM_VALID;
3382
3383                 aux.tp_len = origlen;
3384                 aux.tp_snaplen = skb->len;
3385                 aux.tp_mac = 0;
3386                 aux.tp_net = skb_network_offset(skb);
3387                 if (skb_vlan_tag_present(skb)) {
3388                         aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3389                         aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3390                         aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3391                 } else {
3392                         aux.tp_vlan_tci = 0;
3393                         aux.tp_vlan_tpid = 0;
3394                 }
3395                 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3396         }
3397
3398         /*
3399          *      Free or return the buffer as appropriate. Again this
3400          *      hides all the races and re-entrancy issues from us.
3401          */
3402         err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3403
3404 out_free:
3405         skb_free_datagram(sk, skb);
3406 out:
3407         return err;
3408 }
3409
3410 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3411                                int *uaddr_len, int peer)
3412 {
3413         struct net_device *dev;
3414         struct sock *sk = sock->sk;
3415
3416         if (peer)
3417                 return -EOPNOTSUPP;
3418
3419         uaddr->sa_family = AF_PACKET;
3420         memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3421         rcu_read_lock();
3422         dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3423         if (dev)
3424                 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3425         rcu_read_unlock();
3426         *uaddr_len = sizeof(*uaddr);
3427
3428         return 0;
3429 }
3430
3431 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3432                           int *uaddr_len, int peer)
3433 {
3434         struct net_device *dev;
3435         struct sock *sk = sock->sk;
3436         struct packet_sock *po = pkt_sk(sk);
3437         DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3438
3439         if (peer)
3440                 return -EOPNOTSUPP;
3441
3442         sll->sll_family = AF_PACKET;
3443         sll->sll_ifindex = po->ifindex;
3444         sll->sll_protocol = po->num;
3445         sll->sll_pkttype = 0;
3446         rcu_read_lock();
3447         dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3448         if (dev) {
3449                 sll->sll_hatype = dev->type;
3450                 sll->sll_halen = dev->addr_len;
3451                 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3452         } else {
3453                 sll->sll_hatype = 0;    /* Bad: we have no ARPHRD_UNSPEC */
3454                 sll->sll_halen = 0;
3455         }
3456         rcu_read_unlock();
3457         *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3458
3459         return 0;
3460 }
3461
3462 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3463                          int what)
3464 {
3465         switch (i->type) {
3466         case PACKET_MR_MULTICAST:
3467                 if (i->alen != dev->addr_len)
3468                         return -EINVAL;
3469                 if (what > 0)
3470                         return dev_mc_add(dev, i->addr);
3471                 else
3472                         return dev_mc_del(dev, i->addr);
3473                 break;
3474         case PACKET_MR_PROMISC:
3475                 return dev_set_promiscuity(dev, what);
3476         case PACKET_MR_ALLMULTI:
3477                 return dev_set_allmulti(dev, what);
3478         case PACKET_MR_UNICAST:
3479                 if (i->alen != dev->addr_len)
3480                         return -EINVAL;
3481                 if (what > 0)
3482                         return dev_uc_add(dev, i->addr);
3483                 else
3484                         return dev_uc_del(dev, i->addr);
3485                 break;
3486         default:
3487                 break;
3488         }
3489         return 0;
3490 }
3491
3492 static void packet_dev_mclist_delete(struct net_device *dev,
3493                                      struct packet_mclist **mlp)
3494 {
3495         struct packet_mclist *ml;
3496
3497         while ((ml = *mlp) != NULL) {
3498                 if (ml->ifindex == dev->ifindex) {
3499                         packet_dev_mc(dev, ml, -1);
3500                         *mlp = ml->next;
3501                         kfree(ml);
3502                 } else
3503                         mlp = &ml->next;
3504         }
3505 }
3506
3507 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3508 {
3509         struct packet_sock *po = pkt_sk(sk);
3510         struct packet_mclist *ml, *i;
3511         struct net_device *dev;
3512         int err;
3513
3514         rtnl_lock();
3515
3516         err = -ENODEV;
3517         dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3518         if (!dev)
3519                 goto done;
3520
3521         err = -EINVAL;
3522         if (mreq->mr_alen > dev->addr_len)
3523                 goto done;
3524
3525         err = -ENOBUFS;
3526         i = kmalloc(sizeof(*i), GFP_KERNEL);
3527         if (i == NULL)
3528                 goto done;
3529
3530         err = 0;
3531         for (ml = po->mclist; ml; ml = ml->next) {
3532                 if (ml->ifindex == mreq->mr_ifindex &&
3533                     ml->type == mreq->mr_type &&
3534                     ml->alen == mreq->mr_alen &&
3535                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3536                         ml->count++;
3537                         /* Free the new element ... */
3538                         kfree(i);
3539                         goto done;
3540                 }
3541         }
3542
3543         i->type = mreq->mr_type;
3544         i->ifindex = mreq->mr_ifindex;
3545         i->alen = mreq->mr_alen;
3546         memcpy(i->addr, mreq->mr_address, i->alen);
3547         memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3548         i->count = 1;
3549         i->next = po->mclist;
3550         po->mclist = i;
3551         err = packet_dev_mc(dev, i, 1);
3552         if (err) {
3553                 po->mclist = i->next;
3554                 kfree(i);
3555         }
3556
3557 done:
3558         rtnl_unlock();
3559         return err;
3560 }
3561
3562 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3563 {
3564         struct packet_mclist *ml, **mlp;
3565
3566         rtnl_lock();
3567
3568         for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3569                 if (ml->ifindex == mreq->mr_ifindex &&
3570                     ml->type == mreq->mr_type &&
3571                     ml->alen == mreq->mr_alen &&
3572                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3573                         if (--ml->count == 0) {
3574                                 struct net_device *dev;
3575                                 *mlp = ml->next;
3576                                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3577                                 if (dev)
3578                                         packet_dev_mc(dev, ml, -1);
3579                                 kfree(ml);
3580                         }
3581                         break;
3582                 }
3583         }
3584         rtnl_unlock();
3585         return 0;
3586 }
3587
3588 static void packet_flush_mclist(struct sock *sk)
3589 {
3590         struct packet_sock *po = pkt_sk(sk);
3591         struct packet_mclist *ml;
3592
3593         if (!po->mclist)
3594                 return;
3595
3596         rtnl_lock();
3597         while ((ml = po->mclist) != NULL) {
3598                 struct net_device *dev;
3599
3600                 po->mclist = ml->next;
3601                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3602                 if (dev != NULL)
3603                         packet_dev_mc(dev, ml, -1);
3604                 kfree(ml);
3605         }
3606         rtnl_unlock();
3607 }
3608
3609 static int
3610 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3611 {
3612         struct sock *sk = sock->sk;
3613         struct packet_sock *po = pkt_sk(sk);
3614         int ret;
3615
3616         if (level != SOL_PACKET)
3617                 return -ENOPROTOOPT;
3618
3619         switch (optname) {
3620         case PACKET_ADD_MEMBERSHIP:
3621         case PACKET_DROP_MEMBERSHIP:
3622         {
3623                 struct packet_mreq_max mreq;
3624                 int len = optlen;
3625                 memset(&mreq, 0, sizeof(mreq));
3626                 if (len < sizeof(struct packet_mreq))
3627                         return -EINVAL;
3628                 if (len > sizeof(mreq))
3629                         len = sizeof(mreq);
3630                 if (copy_from_user(&mreq, optval, len))
3631                         return -EFAULT;
3632                 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3633                         return -EINVAL;
3634                 if (optname == PACKET_ADD_MEMBERSHIP)
3635                         ret = packet_mc_add(sk, &mreq);
3636                 else
3637                         ret = packet_mc_drop(sk, &mreq);
3638                 return ret;
3639         }
3640
3641         case PACKET_RX_RING:
3642         case PACKET_TX_RING:
3643         {
3644                 union tpacket_req_u req_u;
3645                 int len;
3646
3647                 switch (po->tp_version) {
3648                 case TPACKET_V1:
3649                 case TPACKET_V2:
3650                         len = sizeof(req_u.req);
3651                         break;
3652                 case TPACKET_V3:
3653                 default:
3654                         len = sizeof(req_u.req3);
3655                         break;
3656                 }
3657                 if (optlen < len)
3658                         return -EINVAL;
3659                 if (copy_from_user(&req_u.req, optval, len))
3660                         return -EFAULT;
3661                 return packet_set_ring(sk, &req_u, 0,
3662                         optname == PACKET_TX_RING);
3663         }
3664         case PACKET_COPY_THRESH:
3665         {
3666                 int val;
3667
3668                 if (optlen != sizeof(val))
3669                         return -EINVAL;
3670                 if (copy_from_user(&val, optval, sizeof(val)))
3671                         return -EFAULT;
3672
3673                 pkt_sk(sk)->copy_thresh = val;
3674                 return 0;
3675         }
3676         case PACKET_VERSION:
3677         {
3678                 int val;
3679
3680                 if (optlen != sizeof(val))
3681                         return -EINVAL;
3682                 if (copy_from_user(&val, optval, sizeof(val)))
3683                         return -EFAULT;
3684                 switch (val) {
3685                 case TPACKET_V1:
3686                 case TPACKET_V2:
3687                 case TPACKET_V3:
3688                         break;
3689                 default:
3690                         return -EINVAL;
3691                 }
3692                 lock_sock(sk);
3693                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3694                         ret = -EBUSY;
3695                 } else {
3696                         po->tp_version = val;
3697                         ret = 0;
3698                 }
3699                 release_sock(sk);
3700                 return ret;
3701         }
3702         case PACKET_RESERVE:
3703         {
3704                 unsigned int val;
3705
3706                 if (optlen != sizeof(val))
3707                         return -EINVAL;
3708                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3709                         return -EBUSY;
3710                 if (copy_from_user(&val, optval, sizeof(val)))
3711                         return -EFAULT;
3712                 if (val > INT_MAX)
3713                         return -EINVAL;
3714                 po->tp_reserve = val;
3715                 return 0;
3716         }
3717         case PACKET_LOSS:
3718         {
3719                 unsigned int val;
3720
3721                 if (optlen != sizeof(val))
3722                         return -EINVAL;
3723                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3724                         return -EBUSY;
3725                 if (copy_from_user(&val, optval, sizeof(val)))
3726                         return -EFAULT;
3727                 po->tp_loss = !!val;
3728                 return 0;
3729         }
3730         case PACKET_AUXDATA:
3731         {
3732                 int val;
3733
3734                 if (optlen < sizeof(val))
3735                         return -EINVAL;
3736                 if (copy_from_user(&val, optval, sizeof(val)))
3737                         return -EFAULT;
3738
3739                 po->auxdata = !!val;
3740                 return 0;
3741         }
3742         case PACKET_ORIGDEV:
3743         {
3744                 int val;
3745
3746                 if (optlen < sizeof(val))
3747                         return -EINVAL;
3748                 if (copy_from_user(&val, optval, sizeof(val)))
3749                         return -EFAULT;
3750
3751                 po->origdev = !!val;
3752                 return 0;
3753         }
3754         case PACKET_VNET_HDR:
3755         {
3756                 int val;
3757
3758                 if (sock->type != SOCK_RAW)
3759                         return -EINVAL;
3760                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3761                         return -EBUSY;
3762                 if (optlen < sizeof(val))
3763                         return -EINVAL;
3764                 if (copy_from_user(&val, optval, sizeof(val)))
3765                         return -EFAULT;
3766
3767                 po->has_vnet_hdr = !!val;
3768                 return 0;
3769         }
3770         case PACKET_TIMESTAMP:
3771         {
3772                 int val;
3773
3774                 if (optlen != sizeof(val))
3775                         return -EINVAL;
3776                 if (copy_from_user(&val, optval, sizeof(val)))
3777                         return -EFAULT;
3778
3779                 po->tp_tstamp = val;
3780                 return 0;
3781         }
3782         case PACKET_FANOUT:
3783         {
3784                 int val;
3785
3786                 if (optlen != sizeof(val))
3787                         return -EINVAL;
3788                 if (copy_from_user(&val, optval, sizeof(val)))
3789                         return -EFAULT;
3790
3791                 return fanout_add(sk, val & 0xffff, val >> 16);
3792         }
3793         case PACKET_FANOUT_DATA:
3794         {
3795                 if (!po->fanout)
3796                         return -EINVAL;
3797
3798                 return fanout_set_data(po, optval, optlen);
3799         }
3800         case PACKET_TX_HAS_OFF:
3801         {
3802                 unsigned int val;
3803
3804                 if (optlen != sizeof(val))
3805                         return -EINVAL;
3806                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3807                         return -EBUSY;
3808                 if (copy_from_user(&val, optval, sizeof(val)))
3809                         return -EFAULT;
3810                 po->tp_tx_has_off = !!val;
3811                 return 0;
3812         }
3813         case PACKET_QDISC_BYPASS:
3814         {
3815                 int val;
3816
3817                 if (optlen != sizeof(val))
3818                         return -EINVAL;
3819                 if (copy_from_user(&val, optval, sizeof(val)))
3820                         return -EFAULT;
3821
3822                 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3823                 return 0;
3824         }
3825         default:
3826                 return -ENOPROTOOPT;
3827         }
3828 }
3829
3830 static int packet_getsockopt(struct socket *sock, int level, int optname,
3831                              char __user *optval, int __user *optlen)
3832 {
3833         int len;
3834         int val, lv = sizeof(val);
3835         struct sock *sk = sock->sk;
3836         struct packet_sock *po = pkt_sk(sk);
3837         void *data = &val;
3838         union tpacket_stats_u st;
3839         struct tpacket_rollover_stats rstats;
3840
3841         if (level != SOL_PACKET)
3842                 return -ENOPROTOOPT;
3843
3844         if (get_user(len, optlen))
3845                 return -EFAULT;
3846
3847         if (len < 0)
3848                 return -EINVAL;
3849
3850         switch (optname) {
3851         case PACKET_STATISTICS:
3852                 spin_lock_bh(&sk->sk_receive_queue.lock);
3853                 memcpy(&st, &po->stats, sizeof(st));
3854                 memset(&po->stats, 0, sizeof(po->stats));
3855                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3856
3857                 if (po->tp_version == TPACKET_V3) {
3858                         lv = sizeof(struct tpacket_stats_v3);
3859                         st.stats3.tp_packets += st.stats3.tp_drops;
3860                         data = &st.stats3;
3861                 } else {
3862                         lv = sizeof(struct tpacket_stats);
3863                         st.stats1.tp_packets += st.stats1.tp_drops;
3864                         data = &st.stats1;
3865                 }
3866
3867                 break;
3868         case PACKET_AUXDATA:
3869                 val = po->auxdata;
3870                 break;
3871         case PACKET_ORIGDEV:
3872                 val = po->origdev;
3873                 break;
3874         case PACKET_VNET_HDR:
3875                 val = po->has_vnet_hdr;
3876                 break;
3877         case PACKET_VERSION:
3878                 val = po->tp_version;
3879                 break;
3880         case PACKET_HDRLEN:
3881                 if (len > sizeof(int))
3882                         len = sizeof(int);
3883                 if (len < sizeof(int))
3884                         return -EINVAL;
3885                 if (copy_from_user(&val, optval, len))
3886                         return -EFAULT;
3887                 switch (val) {
3888                 case TPACKET_V1:
3889                         val = sizeof(struct tpacket_hdr);
3890                         break;
3891                 case TPACKET_V2:
3892                         val = sizeof(struct tpacket2_hdr);
3893                         break;
3894                 case TPACKET_V3:
3895                         val = sizeof(struct tpacket3_hdr);
3896                         break;
3897                 default:
3898                         return -EINVAL;
3899                 }
3900                 break;
3901         case PACKET_RESERVE:
3902                 val = po->tp_reserve;
3903                 break;
3904         case PACKET_LOSS:
3905                 val = po->tp_loss;
3906                 break;
3907         case PACKET_TIMESTAMP:
3908                 val = po->tp_tstamp;
3909                 break;
3910         case PACKET_FANOUT:
3911                 val = (po->fanout ?
3912                        ((u32)po->fanout->id |
3913                         ((u32)po->fanout->type << 16) |
3914                         ((u32)po->fanout->flags << 24)) :
3915                        0);
3916                 break;
3917         case PACKET_ROLLOVER_STATS:
3918                 if (!po->rollover)
3919                         return -EINVAL;
3920                 rstats.tp_all = atomic_long_read(&po->rollover->num);
3921                 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3922                 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3923                 data = &rstats;
3924                 lv = sizeof(rstats);
3925                 break;
3926         case PACKET_TX_HAS_OFF:
3927                 val = po->tp_tx_has_off;
3928                 break;
3929         case PACKET_QDISC_BYPASS:
3930                 val = packet_use_direct_xmit(po);
3931                 break;
3932         default:
3933                 return -ENOPROTOOPT;
3934         }
3935
3936         if (len > lv)
3937                 len = lv;
3938         if (put_user(len, optlen))
3939                 return -EFAULT;
3940         if (copy_to_user(optval, data, len))
3941                 return -EFAULT;
3942         return 0;
3943 }
3944
3945
3946 #ifdef CONFIG_COMPAT
3947 static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
3948                                     char __user *optval, unsigned int optlen)
3949 {
3950         struct packet_sock *po = pkt_sk(sock->sk);
3951
3952         if (level != SOL_PACKET)
3953                 return -ENOPROTOOPT;
3954
3955         if (optname == PACKET_FANOUT_DATA &&
3956             po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
3957                 optval = (char __user *)get_compat_bpf_fprog(optval);
3958                 if (!optval)
3959                         return -EFAULT;
3960                 optlen = sizeof(struct sock_fprog);
3961         }
3962
3963         return packet_setsockopt(sock, level, optname, optval, optlen);
3964 }
3965 #endif
3966
3967 static int packet_notifier(struct notifier_block *this,
3968                            unsigned long msg, void *ptr)
3969 {
3970         struct sock *sk;
3971         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3972         struct net *net = dev_net(dev);
3973
3974         rcu_read_lock();
3975         sk_for_each_rcu(sk, &net->packet.sklist) {
3976                 struct packet_sock *po = pkt_sk(sk);
3977
3978                 switch (msg) {
3979                 case NETDEV_UNREGISTER:
3980                         if (po->mclist)
3981                                 packet_dev_mclist_delete(dev, &po->mclist);
3982                         /* fallthrough */
3983
3984                 case NETDEV_DOWN:
3985                         if (dev->ifindex == po->ifindex) {
3986                                 spin_lock(&po->bind_lock);
3987                                 if (po->running) {
3988                                         __unregister_prot_hook(sk, false);
3989                                         sk->sk_err = ENETDOWN;
3990                                         if (!sock_flag(sk, SOCK_DEAD))
3991                                                 sk->sk_error_report(sk);
3992                                 }
3993                                 if (msg == NETDEV_UNREGISTER) {
3994                                         packet_cached_dev_reset(po);
3995                                         po->ifindex = -1;
3996                                         if (po->prot_hook.dev)
3997                                                 dev_put(po->prot_hook.dev);
3998                                         po->prot_hook.dev = NULL;
3999                                 }
4000                                 spin_unlock(&po->bind_lock);
4001                         }
4002                         break;
4003                 case NETDEV_UP:
4004                         if (dev->ifindex == po->ifindex) {
4005                                 spin_lock(&po->bind_lock);
4006                                 if (po->num)
4007                                         register_prot_hook(sk);
4008                                 spin_unlock(&po->bind_lock);
4009                         }
4010                         break;
4011                 }
4012         }
4013         rcu_read_unlock();
4014         return NOTIFY_DONE;
4015 }
4016
4017
4018 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4019                         unsigned long arg)
4020 {
4021         struct sock *sk = sock->sk;
4022
4023         switch (cmd) {
4024         case SIOCOUTQ:
4025         {
4026                 int amount = sk_wmem_alloc_get(sk);
4027
4028                 return put_user(amount, (int __user *)arg);
4029         }
4030         case SIOCINQ:
4031         {
4032                 struct sk_buff *skb;
4033                 int amount = 0;
4034
4035                 spin_lock_bh(&sk->sk_receive_queue.lock);
4036                 skb = skb_peek(&sk->sk_receive_queue);
4037                 if (skb)
4038                         amount = skb->len;
4039                 spin_unlock_bh(&sk->sk_receive_queue.lock);
4040                 return put_user(amount, (int __user *)arg);
4041         }
4042         case SIOCGSTAMP:
4043                 return sock_get_timestamp(sk, (struct timeval __user *)arg);
4044         case SIOCGSTAMPNS:
4045                 return sock_get_timestampns(sk, (struct timespec __user *)arg);
4046
4047 #ifdef CONFIG_INET
4048         case SIOCADDRT:
4049         case SIOCDELRT:
4050         case SIOCDARP:
4051         case SIOCGARP:
4052         case SIOCSARP:
4053         case SIOCGIFADDR:
4054         case SIOCSIFADDR:
4055         case SIOCGIFBRDADDR:
4056         case SIOCSIFBRDADDR:
4057         case SIOCGIFNETMASK:
4058         case SIOCSIFNETMASK:
4059         case SIOCGIFDSTADDR:
4060         case SIOCSIFDSTADDR:
4061         case SIOCSIFFLAGS:
4062                 return inet_dgram_ops.ioctl(sock, cmd, arg);
4063 #endif
4064
4065         default:
4066                 return -ENOIOCTLCMD;
4067         }
4068         return 0;
4069 }
4070
4071 static unsigned int packet_poll(struct file *file, struct socket *sock,
4072                                 poll_table *wait)
4073 {
4074         struct sock *sk = sock->sk;
4075         struct packet_sock *po = pkt_sk(sk);
4076         unsigned int mask = datagram_poll(file, sock, wait);
4077
4078         spin_lock_bh(&sk->sk_receive_queue.lock);
4079         if (po->rx_ring.pg_vec) {
4080                 if (!packet_previous_rx_frame(po, &po->rx_ring,
4081                         TP_STATUS_KERNEL))
4082                         mask |= POLLIN | POLLRDNORM;
4083         }
4084         if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
4085                 po->pressure = 0;
4086         spin_unlock_bh(&sk->sk_receive_queue.lock);
4087         spin_lock_bh(&sk->sk_write_queue.lock);
4088         if (po->tx_ring.pg_vec) {
4089                 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4090                         mask |= POLLOUT | POLLWRNORM;
4091         }
4092         spin_unlock_bh(&sk->sk_write_queue.lock);
4093         return mask;
4094 }
4095
4096
4097 /* Dirty? Well, I still did not learn better way to account
4098  * for user mmaps.
4099  */
4100
4101 static void packet_mm_open(struct vm_area_struct *vma)
4102 {
4103         struct file *file = vma->vm_file;
4104         struct socket *sock = file->private_data;
4105         struct sock *sk = sock->sk;
4106
4107         if (sk)
4108                 atomic_inc(&pkt_sk(sk)->mapped);
4109 }
4110
4111 static void packet_mm_close(struct vm_area_struct *vma)
4112 {
4113         struct file *file = vma->vm_file;
4114         struct socket *sock = file->private_data;
4115         struct sock *sk = sock->sk;
4116
4117         if (sk)
4118                 atomic_dec(&pkt_sk(sk)->mapped);
4119 }
4120
4121 static const struct vm_operations_struct packet_mmap_ops = {
4122         .open   =       packet_mm_open,
4123         .close  =       packet_mm_close,
4124 };
4125
4126 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4127                         unsigned int len)
4128 {
4129         int i;
4130
4131         for (i = 0; i < len; i++) {
4132                 if (likely(pg_vec[i].buffer)) {
4133                         if (is_vmalloc_addr(pg_vec[i].buffer))
4134                                 vfree(pg_vec[i].buffer);
4135                         else
4136                                 free_pages((unsigned long)pg_vec[i].buffer,
4137                                            order);
4138                         pg_vec[i].buffer = NULL;
4139                 }
4140         }
4141         kfree(pg_vec);
4142 }
4143
4144 static char *alloc_one_pg_vec_page(unsigned long order)
4145 {
4146         char *buffer;
4147         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4148                           __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4149
4150         buffer = (char *) __get_free_pages(gfp_flags, order);
4151         if (buffer)
4152                 return buffer;
4153
4154         /* __get_free_pages failed, fall back to vmalloc */
4155         buffer = vzalloc((1 << order) * PAGE_SIZE);
4156         if (buffer)
4157                 return buffer;
4158
4159         /* vmalloc failed, lets dig into swap here */
4160         gfp_flags &= ~__GFP_NORETRY;
4161         buffer = (char *) __get_free_pages(gfp_flags, order);
4162         if (buffer)
4163                 return buffer;
4164
4165         /* complete and utter failure */
4166         return NULL;
4167 }
4168
4169 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4170 {
4171         unsigned int block_nr = req->tp_block_nr;
4172         struct pgv *pg_vec;
4173         int i;
4174
4175         pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4176         if (unlikely(!pg_vec))
4177                 goto out;
4178
4179         for (i = 0; i < block_nr; i++) {
4180                 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4181                 if (unlikely(!pg_vec[i].buffer))
4182                         goto out_free_pgvec;
4183         }
4184
4185 out:
4186         return pg_vec;
4187
4188 out_free_pgvec:
4189         free_pg_vec(pg_vec, order, block_nr);
4190         pg_vec = NULL;
4191         goto out;
4192 }
4193
4194 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4195                 int closing, int tx_ring)
4196 {
4197         struct pgv *pg_vec = NULL;
4198         struct packet_sock *po = pkt_sk(sk);
4199         int was_running, order = 0;
4200         struct packet_ring_buffer *rb;
4201         struct sk_buff_head *rb_queue;
4202         __be16 num;
4203         int err = -EINVAL;
4204         /* Added to avoid minimal code churn */
4205         struct tpacket_req *req = &req_u->req;
4206
4207         lock_sock(sk);
4208
4209         rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4210         rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4211
4212         err = -EBUSY;
4213         if (!closing) {
4214                 if (atomic_read(&po->mapped))
4215                         goto out;
4216                 if (packet_read_pending(rb))
4217                         goto out;
4218         }
4219
4220         if (req->tp_block_nr) {
4221                 /* Sanity tests and some calculations */
4222                 err = -EBUSY;
4223                 if (unlikely(rb->pg_vec))
4224                         goto out;
4225
4226                 switch (po->tp_version) {
4227                 case TPACKET_V1:
4228                         po->tp_hdrlen = TPACKET_HDRLEN;
4229                         break;
4230                 case TPACKET_V2:
4231                         po->tp_hdrlen = TPACKET2_HDRLEN;
4232                         break;
4233                 case TPACKET_V3:
4234                         po->tp_hdrlen = TPACKET3_HDRLEN;
4235                         break;
4236                 }
4237
4238                 err = -EINVAL;
4239                 if (unlikely((int)req->tp_block_size <= 0))
4240                         goto out;
4241                 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4242                         goto out;
4243                 if (po->tp_version >= TPACKET_V3 &&
4244                     req->tp_block_size <=
4245                           BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
4246                         goto out;
4247                 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4248                                         po->tp_reserve))
4249                         goto out;
4250                 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4251                         goto out;
4252
4253                 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4254                 if (unlikely(rb->frames_per_block == 0))
4255                         goto out;
4256                 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
4257                         goto out;
4258                 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4259                                         req->tp_frame_nr))
4260                         goto out;
4261
4262                 err = -ENOMEM;
4263                 order = get_order(req->tp_block_size);
4264                 pg_vec = alloc_pg_vec(req, order);
4265                 if (unlikely(!pg_vec))
4266                         goto out;
4267                 switch (po->tp_version) {
4268                 case TPACKET_V3:
4269                         /* Block transmit is not supported yet */
4270                         if (!tx_ring) {
4271                                 init_prb_bdqc(po, rb, pg_vec, req_u);
4272                         } else {
4273                                 struct tpacket_req3 *req3 = &req_u->req3;
4274
4275                                 if (req3->tp_retire_blk_tov ||
4276                                     req3->tp_sizeof_priv ||
4277                                     req3->tp_feature_req_word) {
4278                                         err = -EINVAL;
4279                                         goto out;
4280                                 }
4281                         }
4282                         break;
4283                 default:
4284                         break;
4285                 }
4286         }
4287         /* Done */
4288         else {
4289                 err = -EINVAL;
4290                 if (unlikely(req->tp_frame_nr))
4291                         goto out;
4292         }
4293
4294
4295         /* Detach socket from network */
4296         spin_lock(&po->bind_lock);
4297         was_running = po->running;
4298         num = po->num;
4299         if (was_running) {
4300                 po->num = 0;
4301                 __unregister_prot_hook(sk, false);
4302         }
4303         spin_unlock(&po->bind_lock);
4304
4305         synchronize_net();
4306
4307         err = -EBUSY;
4308         mutex_lock(&po->pg_vec_lock);
4309         if (closing || atomic_read(&po->mapped) == 0) {
4310                 err = 0;
4311                 spin_lock_bh(&rb_queue->lock);
4312                 swap(rb->pg_vec, pg_vec);
4313                 rb->frame_max = (req->tp_frame_nr - 1);
4314                 rb->head = 0;
4315                 rb->frame_size = req->tp_frame_size;
4316                 spin_unlock_bh(&rb_queue->lock);
4317
4318                 swap(rb->pg_vec_order, order);
4319                 swap(rb->pg_vec_len, req->tp_block_nr);
4320
4321                 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4322                 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4323                                                 tpacket_rcv : packet_rcv;
4324                 skb_queue_purge(rb_queue);
4325                 if (atomic_read(&po->mapped))
4326                         pr_err("packet_mmap: vma is busy: %d\n",
4327                                atomic_read(&po->mapped));
4328         }
4329         mutex_unlock(&po->pg_vec_lock);
4330
4331         spin_lock(&po->bind_lock);
4332         if (was_running) {
4333                 po->num = num;
4334                 register_prot_hook(sk);
4335         }
4336         spin_unlock(&po->bind_lock);
4337         if (closing && (po->tp_version > TPACKET_V2)) {
4338                 /* Because we don't support block-based V3 on tx-ring */
4339                 if (!tx_ring)
4340                         prb_shutdown_retire_blk_timer(po, rb_queue);
4341         }
4342
4343         if (pg_vec)
4344                 free_pg_vec(pg_vec, order, req->tp_block_nr);
4345 out:
4346         release_sock(sk);
4347         return err;
4348 }
4349
4350 static int packet_mmap(struct file *file, struct socket *sock,
4351                 struct vm_area_struct *vma)
4352 {
4353         struct sock *sk = sock->sk;
4354         struct packet_sock *po = pkt_sk(sk);
4355         unsigned long size, expected_size;
4356         struct packet_ring_buffer *rb;
4357         unsigned long start;
4358         int err = -EINVAL;
4359         int i;
4360
4361         if (vma->vm_pgoff)
4362                 return -EINVAL;
4363
4364         mutex_lock(&po->pg_vec_lock);
4365
4366         expected_size = 0;
4367         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4368                 if (rb->pg_vec) {
4369                         expected_size += rb->pg_vec_len
4370                                                 * rb->pg_vec_pages
4371                                                 * PAGE_SIZE;
4372                 }
4373         }
4374
4375         if (expected_size == 0)
4376                 goto out;
4377
4378         size = vma->vm_end - vma->vm_start;
4379         if (size != expected_size)
4380                 goto out;
4381
4382         start = vma->vm_start;
4383         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4384                 if (rb->pg_vec == NULL)
4385                         continue;
4386
4387                 for (i = 0; i < rb->pg_vec_len; i++) {
4388                         struct page *page;
4389                         void *kaddr = rb->pg_vec[i].buffer;
4390                         int pg_num;
4391
4392                         for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4393                                 page = pgv_to_page(kaddr);
4394                                 err = vm_insert_page(vma, start, page);
4395                                 if (unlikely(err))
4396                                         goto out;
4397                                 start += PAGE_SIZE;
4398                                 kaddr += PAGE_SIZE;
4399                         }
4400                 }
4401         }
4402
4403         atomic_inc(&po->mapped);
4404         vma->vm_ops = &packet_mmap_ops;
4405         err = 0;
4406
4407 out:
4408         mutex_unlock(&po->pg_vec_lock);
4409         return err;
4410 }
4411
4412 static const struct proto_ops packet_ops_spkt = {
4413         .family =       PF_PACKET,
4414         .owner =        THIS_MODULE,
4415         .release =      packet_release,
4416         .bind =         packet_bind_spkt,
4417         .connect =      sock_no_connect,
4418         .socketpair =   sock_no_socketpair,
4419         .accept =       sock_no_accept,
4420         .getname =      packet_getname_spkt,
4421         .poll =         datagram_poll,
4422         .ioctl =        packet_ioctl,
4423         .listen =       sock_no_listen,
4424         .shutdown =     sock_no_shutdown,
4425         .setsockopt =   sock_no_setsockopt,
4426         .getsockopt =   sock_no_getsockopt,
4427         .sendmsg =      packet_sendmsg_spkt,
4428         .recvmsg =      packet_recvmsg,
4429         .mmap =         sock_no_mmap,
4430         .sendpage =     sock_no_sendpage,
4431 };
4432
4433 static const struct proto_ops packet_ops = {
4434         .family =       PF_PACKET,
4435         .owner =        THIS_MODULE,
4436         .release =      packet_release,
4437         .bind =         packet_bind,
4438         .connect =      sock_no_connect,
4439         .socketpair =   sock_no_socketpair,
4440         .accept =       sock_no_accept,
4441         .getname =      packet_getname,
4442         .poll =         packet_poll,
4443         .ioctl =        packet_ioctl,
4444         .listen =       sock_no_listen,
4445         .shutdown =     sock_no_shutdown,
4446         .setsockopt =   packet_setsockopt,
4447         .getsockopt =   packet_getsockopt,
4448 #ifdef CONFIG_COMPAT
4449         .compat_setsockopt = compat_packet_setsockopt,
4450 #endif
4451         .sendmsg =      packet_sendmsg,
4452         .recvmsg =      packet_recvmsg,
4453         .mmap =         packet_mmap,
4454         .sendpage =     sock_no_sendpage,
4455 };
4456
4457 static const struct net_proto_family packet_family_ops = {
4458         .family =       PF_PACKET,
4459         .create =       packet_create,
4460         .owner  =       THIS_MODULE,
4461 };
4462
4463 static struct notifier_block packet_netdev_notifier = {
4464         .notifier_call =        packet_notifier,
4465 };
4466
4467 #ifdef CONFIG_PROC_FS
4468
4469 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4470         __acquires(RCU)
4471 {
4472         struct net *net = seq_file_net(seq);
4473
4474         rcu_read_lock();
4475         return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4476 }
4477
4478 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4479 {
4480         struct net *net = seq_file_net(seq);
4481         return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4482 }
4483
4484 static void packet_seq_stop(struct seq_file *seq, void *v)
4485         __releases(RCU)
4486 {
4487         rcu_read_unlock();
4488 }
4489
4490 static int packet_seq_show(struct seq_file *seq, void *v)
4491 {
4492         if (v == SEQ_START_TOKEN)
4493                 seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4494         else {
4495                 struct sock *s = sk_entry(v);
4496                 const struct packet_sock *po = pkt_sk(s);
4497
4498                 seq_printf(seq,
4499                            "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4500                            s,
4501                            atomic_read(&s->sk_refcnt),
4502                            s->sk_type,
4503                            ntohs(po->num),
4504                            po->ifindex,
4505                            po->running,
4506                            atomic_read(&s->sk_rmem_alloc),
4507                            from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4508                            sock_i_ino(s));
4509         }
4510
4511         return 0;
4512 }
4513
4514 static const struct seq_operations packet_seq_ops = {
4515         .start  = packet_seq_start,
4516         .next   = packet_seq_next,
4517         .stop   = packet_seq_stop,
4518         .show   = packet_seq_show,
4519 };
4520
4521 static int packet_seq_open(struct inode *inode, struct file *file)
4522 {
4523         return seq_open_net(inode, file, &packet_seq_ops,
4524                             sizeof(struct seq_net_private));
4525 }
4526
4527 static const struct file_operations packet_seq_fops = {
4528         .owner          = THIS_MODULE,
4529         .open           = packet_seq_open,
4530         .read           = seq_read,
4531         .llseek         = seq_lseek,
4532         .release        = seq_release_net,
4533 };
4534
4535 #endif
4536
4537 static int __net_init packet_net_init(struct net *net)
4538 {
4539         mutex_init(&net->packet.sklist_lock);
4540         INIT_HLIST_HEAD(&net->packet.sklist);
4541
4542         if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
4543                 return -ENOMEM;
4544
4545         return 0;
4546 }
4547
4548 static void __net_exit packet_net_exit(struct net *net)
4549 {
4550         remove_proc_entry("packet", net->proc_net);
4551 }
4552
4553 static struct pernet_operations packet_net_ops = {
4554         .init = packet_net_init,
4555         .exit = packet_net_exit,
4556 };
4557
4558
4559 static void __exit packet_exit(void)
4560 {
4561         unregister_netdevice_notifier(&packet_netdev_notifier);
4562         unregister_pernet_subsys(&packet_net_ops);
4563         sock_unregister(PF_PACKET);
4564         proto_unregister(&packet_proto);
4565 }
4566
4567 static int __init packet_init(void)
4568 {
4569         int rc = proto_register(&packet_proto, 0);
4570
4571         if (rc != 0)
4572                 goto out;
4573
4574         sock_register(&packet_family_ops);
4575         register_pernet_subsys(&packet_net_ops);
4576         register_netdevice_notifier(&packet_netdev_notifier);
4577 out:
4578         return rc;
4579 }
4580
4581 module_init(packet_init);
4582 module_exit(packet_exit);
4583 MODULE_LICENSE("GPL");
4584 MODULE_ALIAS_NETPROTO(PF_PACKET);