]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/openvswitch/actions.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / net / openvswitch / actions.c
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/skbuff.h>
22 #include <linux/in.h>
23 #include <linux/ip.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
31
32 #include <net/ip.h>
33 #include <net/ipv6.h>
34 #include <net/checksum.h>
35 #include <net/dsfield.h>
36 #include <net/mpls.h>
37 #include <net/sctp/checksum.h>
38
39 #include "datapath.h"
40 #include "flow.h"
41 #include "vport.h"
42
43 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
44                               struct sw_flow_key *key,
45                               const struct nlattr *attr, int len);
46
47 struct deferred_action {
48         struct sk_buff *skb;
49         const struct nlattr *actions;
50
51         /* Store pkt_key clone when creating deferred action. */
52         struct sw_flow_key pkt_key;
53 };
54
55 #define DEFERRED_ACTION_FIFO_SIZE 10
56 struct action_fifo {
57         int head;
58         int tail;
59         /* Deferred action fifo queue storage. */
60         struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
61 };
62
63 static struct action_fifo __percpu *action_fifos;
64 static DEFINE_PER_CPU(int, exec_actions_level);
65
66 static void action_fifo_init(struct action_fifo *fifo)
67 {
68         fifo->head = 0;
69         fifo->tail = 0;
70 }
71
72 static bool action_fifo_is_empty(const struct action_fifo *fifo)
73 {
74         return (fifo->head == fifo->tail);
75 }
76
77 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
78 {
79         if (action_fifo_is_empty(fifo))
80                 return NULL;
81
82         return &fifo->fifo[fifo->tail++];
83 }
84
85 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
86 {
87         if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
88                 return NULL;
89
90         return &fifo->fifo[fifo->head++];
91 }
92
93 /* Return true if fifo is not full */
94 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
95                                                     const struct sw_flow_key *key,
96                                                     const struct nlattr *attr)
97 {
98         struct action_fifo *fifo;
99         struct deferred_action *da;
100
101         fifo = this_cpu_ptr(action_fifos);
102         da = action_fifo_put(fifo);
103         if (da) {
104                 da->skb = skb;
105                 da->actions = attr;
106                 da->pkt_key = *key;
107         }
108
109         return da;
110 }
111
112 static void invalidate_flow_key(struct sw_flow_key *key)
113 {
114         key->eth.type = htons(0);
115 }
116
117 static bool is_flow_key_valid(const struct sw_flow_key *key)
118 {
119         return !!key->eth.type;
120 }
121
122 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
123                      const struct ovs_action_push_mpls *mpls)
124 {
125         __be32 *new_mpls_lse;
126         struct ethhdr *hdr;
127
128         /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
129         if (skb->encapsulation)
130                 return -ENOTSUPP;
131
132         if (skb_cow_head(skb, MPLS_HLEN) < 0)
133                 return -ENOMEM;
134
135         skb_push(skb, MPLS_HLEN);
136         memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
137                 skb->mac_len);
138         skb_reset_mac_header(skb);
139
140         new_mpls_lse = (__be32 *)skb_mpls_header(skb);
141         *new_mpls_lse = mpls->mpls_lse;
142
143         if (skb->ip_summed == CHECKSUM_COMPLETE)
144                 skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
145                                                              MPLS_HLEN, 0));
146
147         hdr = eth_hdr(skb);
148         hdr->h_proto = mpls->mpls_ethertype;
149
150         skb_set_inner_protocol(skb, skb->protocol);
151         skb->protocol = mpls->mpls_ethertype;
152
153         invalidate_flow_key(key);
154         return 0;
155 }
156
157 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
158                     const __be16 ethertype)
159 {
160         struct ethhdr *hdr;
161         int err;
162
163         err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
164         if (unlikely(err))
165                 return err;
166
167         skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
168
169         memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
170                 skb->mac_len);
171
172         __skb_pull(skb, MPLS_HLEN);
173         skb_reset_mac_header(skb);
174
175         /* skb_mpls_header() is used to locate the ethertype
176          * field correctly in the presence of VLAN tags.
177          */
178         hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
179         hdr->h_proto = ethertype;
180         if (eth_p_mpls(skb->protocol))
181                 skb->protocol = ethertype;
182
183         invalidate_flow_key(key);
184         return 0;
185 }
186
187 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key,
188                     const __be32 *mpls_lse)
189 {
190         __be32 *stack;
191         int err;
192
193         err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
194         if (unlikely(err))
195                 return err;
196
197         stack = (__be32 *)skb_mpls_header(skb);
198         if (skb->ip_summed == CHECKSUM_COMPLETE) {
199                 __be32 diff[] = { ~(*stack), *mpls_lse };
200                 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
201                                           ~skb->csum);
202         }
203
204         *stack = *mpls_lse;
205         key->mpls.top_lse = *mpls_lse;
206         return 0;
207 }
208
209 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
210 {
211         int err;
212
213         err = skb_vlan_pop(skb);
214         if (vlan_tx_tag_present(skb))
215                 invalidate_flow_key(key);
216         else
217                 key->eth.tci = 0;
218         return err;
219 }
220
221 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
222                      const struct ovs_action_push_vlan *vlan)
223 {
224         if (vlan_tx_tag_present(skb))
225                 invalidate_flow_key(key);
226         else
227                 key->eth.tci = vlan->vlan_tci;
228         return skb_vlan_push(skb, vlan->vlan_tpid,
229                              ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
230 }
231
232 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key,
233                         const struct ovs_key_ethernet *eth_key)
234 {
235         int err;
236         err = skb_ensure_writable(skb, ETH_HLEN);
237         if (unlikely(err))
238                 return err;
239
240         skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
241
242         ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
243         ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
244
245         ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
246
247         ether_addr_copy(key->eth.src, eth_key->eth_src);
248         ether_addr_copy(key->eth.dst, eth_key->eth_dst);
249         return 0;
250 }
251
252 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
253                         __be32 *addr, __be32 new_addr)
254 {
255         int transport_len = skb->len - skb_transport_offset(skb);
256
257         if (nh->protocol == IPPROTO_TCP) {
258                 if (likely(transport_len >= sizeof(struct tcphdr)))
259                         inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
260                                                  *addr, new_addr, 1);
261         } else if (nh->protocol == IPPROTO_UDP) {
262                 if (likely(transport_len >= sizeof(struct udphdr))) {
263                         struct udphdr *uh = udp_hdr(skb);
264
265                         if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
266                                 inet_proto_csum_replace4(&uh->check, skb,
267                                                          *addr, new_addr, 1);
268                                 if (!uh->check)
269                                         uh->check = CSUM_MANGLED_0;
270                         }
271                 }
272         }
273
274         csum_replace4(&nh->check, *addr, new_addr);
275         skb_clear_hash(skb);
276         *addr = new_addr;
277 }
278
279 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
280                                  __be32 addr[4], const __be32 new_addr[4])
281 {
282         int transport_len = skb->len - skb_transport_offset(skb);
283
284         if (l4_proto == NEXTHDR_TCP) {
285                 if (likely(transport_len >= sizeof(struct tcphdr)))
286                         inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
287                                                   addr, new_addr, 1);
288         } else if (l4_proto == NEXTHDR_UDP) {
289                 if (likely(transport_len >= sizeof(struct udphdr))) {
290                         struct udphdr *uh = udp_hdr(skb);
291
292                         if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
293                                 inet_proto_csum_replace16(&uh->check, skb,
294                                                           addr, new_addr, 1);
295                                 if (!uh->check)
296                                         uh->check = CSUM_MANGLED_0;
297                         }
298                 }
299         } else if (l4_proto == NEXTHDR_ICMP) {
300                 if (likely(transport_len >= sizeof(struct icmp6hdr)))
301                         inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
302                                                   skb, addr, new_addr, 1);
303         }
304 }
305
306 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
307                           __be32 addr[4], const __be32 new_addr[4],
308                           bool recalculate_csum)
309 {
310         if (recalculate_csum)
311                 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
312
313         skb_clear_hash(skb);
314         memcpy(addr, new_addr, sizeof(__be32[4]));
315 }
316
317 static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
318 {
319         nh->priority = tc >> 4;
320         nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
321 }
322
323 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
324 {
325         nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
326         nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
327         nh->flow_lbl[2] = fl & 0x000000FF;
328 }
329
330 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
331 {
332         csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
333         nh->ttl = new_ttl;
334 }
335
336 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key,
337                     const struct ovs_key_ipv4 *ipv4_key)
338 {
339         struct iphdr *nh;
340         int err;
341
342         err = skb_ensure_writable(skb, skb_network_offset(skb) +
343                                   sizeof(struct iphdr));
344         if (unlikely(err))
345                 return err;
346
347         nh = ip_hdr(skb);
348
349         if (ipv4_key->ipv4_src != nh->saddr) {
350                 set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
351                 key->ipv4.addr.src = ipv4_key->ipv4_src;
352         }
353
354         if (ipv4_key->ipv4_dst != nh->daddr) {
355                 set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
356                 key->ipv4.addr.dst = ipv4_key->ipv4_dst;
357         }
358
359         if (ipv4_key->ipv4_tos != nh->tos) {
360                 ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
361                 key->ip.tos = nh->tos;
362         }
363
364         if (ipv4_key->ipv4_ttl != nh->ttl) {
365                 set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
366                 key->ip.ttl = ipv4_key->ipv4_ttl;
367         }
368
369         return 0;
370 }
371
372 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key,
373                     const struct ovs_key_ipv6 *ipv6_key)
374 {
375         struct ipv6hdr *nh;
376         int err;
377         __be32 *saddr;
378         __be32 *daddr;
379
380         err = skb_ensure_writable(skb, skb_network_offset(skb) +
381                                   sizeof(struct ipv6hdr));
382         if (unlikely(err))
383                 return err;
384
385         nh = ipv6_hdr(skb);
386         saddr = (__be32 *)&nh->saddr;
387         daddr = (__be32 *)&nh->daddr;
388
389         if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) {
390                 set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
391                               ipv6_key->ipv6_src, true);
392                 memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src,
393                        sizeof(ipv6_key->ipv6_src));
394         }
395
396         if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
397                 unsigned int offset = 0;
398                 int flags = IP6_FH_F_SKIP_RH;
399                 bool recalc_csum = true;
400
401                 if (ipv6_ext_hdr(nh->nexthdr))
402                         recalc_csum = ipv6_find_hdr(skb, &offset,
403                                                     NEXTHDR_ROUTING, NULL,
404                                                     &flags) != NEXTHDR_ROUTING;
405
406                 set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
407                               ipv6_key->ipv6_dst, recalc_csum);
408                 memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst,
409                        sizeof(ipv6_key->ipv6_dst));
410         }
411
412         set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
413         key->ip.tos = ipv6_get_dsfield(nh);
414
415         set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
416         key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
417
418         nh->hop_limit = ipv6_key->ipv6_hlimit;
419         key->ip.ttl = ipv6_key->ipv6_hlimit;
420         return 0;
421 }
422
423 /* Must follow skb_ensure_writable() since that can move the skb data. */
424 static void set_tp_port(struct sk_buff *skb, __be16 *port,
425                          __be16 new_port, __sum16 *check)
426 {
427         inet_proto_csum_replace2(check, skb, *port, new_port, 0);
428         *port = new_port;
429         skb_clear_hash(skb);
430 }
431
432 static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
433 {
434         struct udphdr *uh = udp_hdr(skb);
435
436         if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
437                 set_tp_port(skb, port, new_port, &uh->check);
438
439                 if (!uh->check)
440                         uh->check = CSUM_MANGLED_0;
441         } else {
442                 *port = new_port;
443                 skb_clear_hash(skb);
444         }
445 }
446
447 static int set_udp(struct sk_buff *skb, struct sw_flow_key *key,
448                    const struct ovs_key_udp *udp_port_key)
449 {
450         struct udphdr *uh;
451         int err;
452
453         err = skb_ensure_writable(skb, skb_transport_offset(skb) +
454                                   sizeof(struct udphdr));
455         if (unlikely(err))
456                 return err;
457
458         uh = udp_hdr(skb);
459         if (udp_port_key->udp_src != uh->source) {
460                 set_udp_port(skb, &uh->source, udp_port_key->udp_src);
461                 key->tp.src = udp_port_key->udp_src;
462         }
463
464         if (udp_port_key->udp_dst != uh->dest) {
465                 set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
466                 key->tp.dst = udp_port_key->udp_dst;
467         }
468
469         return 0;
470 }
471
472 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key,
473                    const struct ovs_key_tcp *tcp_port_key)
474 {
475         struct tcphdr *th;
476         int err;
477
478         err = skb_ensure_writable(skb, skb_transport_offset(skb) +
479                                   sizeof(struct tcphdr));
480         if (unlikely(err))
481                 return err;
482
483         th = tcp_hdr(skb);
484         if (tcp_port_key->tcp_src != th->source) {
485                 set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
486                 key->tp.src = tcp_port_key->tcp_src;
487         }
488
489         if (tcp_port_key->tcp_dst != th->dest) {
490                 set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
491                 key->tp.dst = tcp_port_key->tcp_dst;
492         }
493
494         return 0;
495 }
496
497 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key,
498                     const struct ovs_key_sctp *sctp_port_key)
499 {
500         struct sctphdr *sh;
501         int err;
502         unsigned int sctphoff = skb_transport_offset(skb);
503
504         err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
505         if (unlikely(err))
506                 return err;
507
508         sh = sctp_hdr(skb);
509         if (sctp_port_key->sctp_src != sh->source ||
510             sctp_port_key->sctp_dst != sh->dest) {
511                 __le32 old_correct_csum, new_csum, old_csum;
512
513                 old_csum = sh->checksum;
514                 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
515
516                 sh->source = sctp_port_key->sctp_src;
517                 sh->dest = sctp_port_key->sctp_dst;
518
519                 new_csum = sctp_compute_cksum(skb, sctphoff);
520
521                 /* Carry any checksum errors through. */
522                 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
523
524                 skb_clear_hash(skb);
525                 key->tp.src = sctp_port_key->sctp_src;
526                 key->tp.dst = sctp_port_key->sctp_dst;
527         }
528
529         return 0;
530 }
531
532 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
533 {
534         struct vport *vport = ovs_vport_rcu(dp, out_port);
535
536         if (likely(vport))
537                 ovs_vport_send(vport, skb);
538         else
539                 kfree_skb(skb);
540 }
541
542 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
543                             struct sw_flow_key *key, const struct nlattr *attr)
544 {
545         struct ovs_tunnel_info info;
546         struct dp_upcall_info upcall;
547         const struct nlattr *a;
548         int rem;
549
550         upcall.cmd = OVS_PACKET_CMD_ACTION;
551         upcall.userdata = NULL;
552         upcall.portid = 0;
553         upcall.egress_tun_info = NULL;
554
555         for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
556                  a = nla_next(a, &rem)) {
557                 switch (nla_type(a)) {
558                 case OVS_USERSPACE_ATTR_USERDATA:
559                         upcall.userdata = a;
560                         break;
561
562                 case OVS_USERSPACE_ATTR_PID:
563                         upcall.portid = nla_get_u32(a);
564                         break;
565
566                 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
567                         /* Get out tunnel info. */
568                         struct vport *vport;
569
570                         vport = ovs_vport_rcu(dp, nla_get_u32(a));
571                         if (vport) {
572                                 int err;
573
574                                 err = ovs_vport_get_egress_tun_info(vport, skb,
575                                                                     &info);
576                                 if (!err)
577                                         upcall.egress_tun_info = &info;
578                         }
579                         break;
580                 }
581
582                 } /* End of switch. */
583         }
584
585         return ovs_dp_upcall(dp, skb, key, &upcall);
586 }
587
588 static int sample(struct datapath *dp, struct sk_buff *skb,
589                   struct sw_flow_key *key, const struct nlattr *attr)
590 {
591         const struct nlattr *acts_list = NULL;
592         const struct nlattr *a;
593         int rem;
594
595         for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
596                  a = nla_next(a, &rem)) {
597                 switch (nla_type(a)) {
598                 case OVS_SAMPLE_ATTR_PROBABILITY:
599                         if (prandom_u32() >= nla_get_u32(a))
600                                 return 0;
601                         break;
602
603                 case OVS_SAMPLE_ATTR_ACTIONS:
604                         acts_list = a;
605                         break;
606                 }
607         }
608
609         rem = nla_len(acts_list);
610         a = nla_data(acts_list);
611
612         /* Actions list is empty, do nothing */
613         if (unlikely(!rem))
614                 return 0;
615
616         /* The only known usage of sample action is having a single user-space
617          * action. Treat this usage as a special case.
618          * The output_userspace() should clone the skb to be sent to the
619          * user space. This skb will be consumed by its caller.
620          */
621         if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
622                    nla_is_last(a, rem)))
623                 return output_userspace(dp, skb, key, a);
624
625         skb = skb_clone(skb, GFP_ATOMIC);
626         if (!skb)
627                 /* Skip the sample action when out of memory. */
628                 return 0;
629
630         if (!add_deferred_actions(skb, key, a)) {
631                 if (net_ratelimit())
632                         pr_warn("%s: deferred actions limit reached, dropping sample action\n",
633                                 ovs_dp_name(dp));
634
635                 kfree_skb(skb);
636         }
637         return 0;
638 }
639
640 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
641                          const struct nlattr *attr)
642 {
643         struct ovs_action_hash *hash_act = nla_data(attr);
644         u32 hash = 0;
645
646         /* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
647         hash = skb_get_hash(skb);
648         hash = jhash_1word(hash, hash_act->hash_basis);
649         if (!hash)
650                 hash = 0x1;
651
652         key->ovs_flow_hash = hash;
653 }
654
655 static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key,
656                               const struct nlattr *nested_attr)
657 {
658         int err = 0;
659
660         switch (nla_type(nested_attr)) {
661         case OVS_KEY_ATTR_PRIORITY:
662                 skb->priority = nla_get_u32(nested_attr);
663                 key->phy.priority = skb->priority;
664                 break;
665
666         case OVS_KEY_ATTR_SKB_MARK:
667                 skb->mark = nla_get_u32(nested_attr);
668                 key->phy.skb_mark = skb->mark;
669                 break;
670
671         case OVS_KEY_ATTR_TUNNEL_INFO:
672                 OVS_CB(skb)->egress_tun_info = nla_data(nested_attr);
673                 break;
674
675         case OVS_KEY_ATTR_ETHERNET:
676                 err = set_eth_addr(skb, key, nla_data(nested_attr));
677                 break;
678
679         case OVS_KEY_ATTR_IPV4:
680                 err = set_ipv4(skb, key, nla_data(nested_attr));
681                 break;
682
683         case OVS_KEY_ATTR_IPV6:
684                 err = set_ipv6(skb, key, nla_data(nested_attr));
685                 break;
686
687         case OVS_KEY_ATTR_TCP:
688                 err = set_tcp(skb, key, nla_data(nested_attr));
689                 break;
690
691         case OVS_KEY_ATTR_UDP:
692                 err = set_udp(skb, key, nla_data(nested_attr));
693                 break;
694
695         case OVS_KEY_ATTR_SCTP:
696                 err = set_sctp(skb, key, nla_data(nested_attr));
697                 break;
698
699         case OVS_KEY_ATTR_MPLS:
700                 err = set_mpls(skb, key, nla_data(nested_attr));
701                 break;
702         }
703
704         return err;
705 }
706
707 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
708                           struct sw_flow_key *key,
709                           const struct nlattr *a, int rem)
710 {
711         struct deferred_action *da;
712
713         if (!is_flow_key_valid(key)) {
714                 int err;
715
716                 err = ovs_flow_key_update(skb, key);
717                 if (err)
718                         return err;
719         }
720         BUG_ON(!is_flow_key_valid(key));
721
722         if (!nla_is_last(a, rem)) {
723                 /* Recirc action is the not the last action
724                  * of the action list, need to clone the skb.
725                  */
726                 skb = skb_clone(skb, GFP_ATOMIC);
727
728                 /* Skip the recirc action when out of memory, but
729                  * continue on with the rest of the action list.
730                  */
731                 if (!skb)
732                         return 0;
733         }
734
735         da = add_deferred_actions(skb, key, NULL);
736         if (da) {
737                 da->pkt_key.recirc_id = nla_get_u32(a);
738         } else {
739                 kfree_skb(skb);
740
741                 if (net_ratelimit())
742                         pr_warn("%s: deferred action limit reached, drop recirc action\n",
743                                 ovs_dp_name(dp));
744         }
745
746         return 0;
747 }
748
749 /* Execute a list of actions against 'skb'. */
750 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
751                               struct sw_flow_key *key,
752                               const struct nlattr *attr, int len)
753 {
754         /* Every output action needs a separate clone of 'skb', but the common
755          * case is just a single output action, so that doing a clone and
756          * then freeing the original skbuff is wasteful.  So the following code
757          * is slightly obscure just to avoid that.
758          */
759         int prev_port = -1;
760         const struct nlattr *a;
761         int rem;
762
763         for (a = attr, rem = len; rem > 0;
764              a = nla_next(a, &rem)) {
765                 int err = 0;
766
767                 if (unlikely(prev_port != -1)) {
768                         struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
769
770                         if (out_skb)
771                                 do_output(dp, out_skb, prev_port);
772
773                         prev_port = -1;
774                 }
775
776                 switch (nla_type(a)) {
777                 case OVS_ACTION_ATTR_OUTPUT:
778                         prev_port = nla_get_u32(a);
779                         break;
780
781                 case OVS_ACTION_ATTR_USERSPACE:
782                         output_userspace(dp, skb, key, a);
783                         break;
784
785                 case OVS_ACTION_ATTR_HASH:
786                         execute_hash(skb, key, a);
787                         break;
788
789                 case OVS_ACTION_ATTR_PUSH_MPLS:
790                         err = push_mpls(skb, key, nla_data(a));
791                         break;
792
793                 case OVS_ACTION_ATTR_POP_MPLS:
794                         err = pop_mpls(skb, key, nla_get_be16(a));
795                         break;
796
797                 case OVS_ACTION_ATTR_PUSH_VLAN:
798                         err = push_vlan(skb, key, nla_data(a));
799                         break;
800
801                 case OVS_ACTION_ATTR_POP_VLAN:
802                         err = pop_vlan(skb, key);
803                         break;
804
805                 case OVS_ACTION_ATTR_RECIRC:
806                         err = execute_recirc(dp, skb, key, a, rem);
807                         if (nla_is_last(a, rem)) {
808                                 /* If this is the last action, the skb has
809                                  * been consumed or freed.
810                                  * Return immediately.
811                                  */
812                                 return err;
813                         }
814                         break;
815
816                 case OVS_ACTION_ATTR_SET:
817                         err = execute_set_action(skb, key, nla_data(a));
818                         break;
819
820                 case OVS_ACTION_ATTR_SAMPLE:
821                         err = sample(dp, skb, key, a);
822                         break;
823                 }
824
825                 if (unlikely(err)) {
826                         kfree_skb(skb);
827                         return err;
828                 }
829         }
830
831         if (prev_port != -1)
832                 do_output(dp, skb, prev_port);
833         else
834                 consume_skb(skb);
835
836         return 0;
837 }
838
839 static void process_deferred_actions(struct datapath *dp)
840 {
841         struct action_fifo *fifo = this_cpu_ptr(action_fifos);
842
843         /* Do not touch the FIFO in case there is no deferred actions. */
844         if (action_fifo_is_empty(fifo))
845                 return;
846
847         /* Finishing executing all deferred actions. */
848         do {
849                 struct deferred_action *da = action_fifo_get(fifo);
850                 struct sk_buff *skb = da->skb;
851                 struct sw_flow_key *key = &da->pkt_key;
852                 const struct nlattr *actions = da->actions;
853
854                 if (actions)
855                         do_execute_actions(dp, skb, key, actions,
856                                            nla_len(actions));
857                 else
858                         ovs_dp_process_packet(skb, key);
859         } while (!action_fifo_is_empty(fifo));
860
861         /* Reset FIFO for the next packet.  */
862         action_fifo_init(fifo);
863 }
864
865 /* Execute a list of actions against 'skb'. */
866 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
867                         const struct sw_flow_actions *acts,
868                         struct sw_flow_key *key)
869 {
870         int level = this_cpu_read(exec_actions_level);
871         int err;
872
873         this_cpu_inc(exec_actions_level);
874         OVS_CB(skb)->egress_tun_info = NULL;
875         err = do_execute_actions(dp, skb, key,
876                                  acts->actions, acts->actions_len);
877
878         if (!level)
879                 process_deferred_actions(dp);
880
881         this_cpu_dec(exec_actions_level);
882         return err;
883 }
884
885 int action_fifos_init(void)
886 {
887         action_fifos = alloc_percpu(struct action_fifo);
888         if (!action_fifos)
889                 return -ENOMEM;
890
891         return 0;
892 }
893
894 void action_fifos_exit(void)
895 {
896         free_percpu(action_fifos);
897 }