2 * Just-In-Time compiler for BPF filters on 32bit ARM
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
11 #include <linux/bitops.h>
12 #include <linux/compiler.h>
13 #include <linux/errno.h>
14 #include <linux/filter.h>
15 #include <linux/moduleloader.h>
16 #include <linux/netdevice.h>
17 #include <linux/string.h>
18 #include <linux/slab.h>
19 #include <linux/if_vlan.h>
20 #include <asm/cacheflush.h>
21 #include <asm/hwcap.h>
23 #include "bpf_jit_32.h"
31 * r6 pointer to the skb
36 #define r_scratch ARM_R0
37 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
42 #define r_skb_data ARM_R7
43 #define r_skb_hl ARM_R8
45 #define SCRATCH_SP_OFFSET 0
46 #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
48 #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
49 #define SEEN_MEM_WORD(k) (1 << (k))
50 #define SEEN_X (1 << BPF_MEMWORDS)
51 #define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
52 #define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
53 #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
55 #define FLAG_NEED_X_RESET (1 << 0)
58 const struct sk_filter *skf;
60 unsigned prologue_bytes;
66 #if __LINUX_ARM_ARCH__ < 7
73 int bpf_jit_enable __read_mostly;
75 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
80 err = skb_copy_bits(skb, offset, &ret, 1);
82 return (u64)err << 32 | ret;
85 static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
90 err = skb_copy_bits(skb, offset, &ret, 2);
92 return (u64)err << 32 | ntohs(ret);
95 static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
100 err = skb_copy_bits(skb, offset, &ret, 4);
102 return (u64)err << 32 | ntohl(ret);
106 * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
107 * (where the assembly routines like __aeabi_uidiv could cause problems).
109 static u32 jit_udiv(u32 dividend, u32 divisor)
111 return dividend / divisor;
114 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
116 if (ctx->target != NULL)
117 ctx->target[ctx->idx] = inst | (cond << 28);
123 * Emit an instruction that will be executed unconditionally.
125 static inline void emit(u32 inst, struct jit_ctx *ctx)
127 _emit(ARM_COND_AL, inst, ctx);
130 static u16 saved_regs(struct jit_ctx *ctx)
134 if ((ctx->skf->len > 1) ||
135 (ctx->skf->insns[0].code == BPF_S_RET_A))
138 #ifdef CONFIG_FRAME_POINTER
139 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
141 if (ctx->seen & SEEN_CALL)
144 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
146 if (ctx->seen & SEEN_DATA)
147 ret |= (1 << r_skb_data) | (1 << r_skb_hl);
148 if (ctx->seen & SEEN_X)
154 static inline int mem_words_used(struct jit_ctx *ctx)
156 /* yes, we do waste some stack space IF there are "holes" in the set" */
157 return fls(ctx->seen & SEEN_MEM);
160 static inline bool is_load_to_a(u16 inst)
168 case BPF_S_ANC_IFINDEX:
170 case BPF_S_ANC_PROTOCOL:
171 case BPF_S_ANC_RXHASH:
172 case BPF_S_ANC_VLAN_TAG:
173 case BPF_S_ANC_VLAN_TAG_PRESENT:
174 case BPF_S_ANC_QUEUE:
181 static void build_prologue(struct jit_ctx *ctx)
183 u16 reg_set = saved_regs(ctx);
184 u16 first_inst = ctx->skf->insns[0].code;
187 #ifdef CONFIG_FRAME_POINTER
188 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
189 emit(ARM_PUSH(reg_set), ctx);
190 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
193 emit(ARM_PUSH(reg_set), ctx);
196 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
197 emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
199 if (ctx->seen & SEEN_DATA) {
200 off = offsetof(struct sk_buff, data);
201 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
202 /* headlen = len - data_len */
203 off = offsetof(struct sk_buff, len);
204 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
205 off = offsetof(struct sk_buff, data_len);
206 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
207 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
210 if (ctx->flags & FLAG_NEED_X_RESET)
211 emit(ARM_MOV_I(r_X, 0), ctx);
213 /* do not leak kernel data to userspace */
214 if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
215 emit(ARM_MOV_I(r_A, 0), ctx);
217 /* stack space for the BPF_MEM words */
218 if (ctx->seen & SEEN_MEM)
219 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
222 static void build_epilogue(struct jit_ctx *ctx)
224 u16 reg_set = saved_regs(ctx);
226 if (ctx->seen & SEEN_MEM)
227 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
229 reg_set &= ~(1 << ARM_LR);
231 #ifdef CONFIG_FRAME_POINTER
232 /* the first instruction of the prologue was: mov ip, sp */
233 reg_set &= ~(1 << ARM_IP);
234 reg_set |= (1 << ARM_SP);
235 emit(ARM_LDM(ARM_SP, reg_set), ctx);
238 if (ctx->seen & SEEN_CALL)
239 reg_set |= 1 << ARM_PC;
240 emit(ARM_POP(reg_set), ctx);
243 if (!(ctx->seen & SEEN_CALL))
244 emit(ARM_BX(ARM_LR), ctx);
248 static int16_t imm8m(u32 x)
252 for (rot = 0; rot < 16; rot++)
253 if ((x & ~ror32(0xff, 2 * rot)) == 0)
254 return rol32(x, 2 * rot) | (rot << 8);
259 #if __LINUX_ARM_ARCH__ < 7
261 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
263 unsigned i = 0, offset;
266 /* on the "fake" run we just count them (duplicates included) */
267 if (ctx->target == NULL) {
272 while ((i < ctx->imm_count) && ctx->imms[i]) {
273 if (ctx->imms[i] == k)
278 if (ctx->imms[i] == 0)
281 /* constants go just after the epilogue */
282 offset = ctx->offsets[ctx->skf->len];
283 offset += ctx->prologue_bytes;
284 offset += ctx->epilogue_bytes;
287 ctx->target[offset / 4] = k;
289 /* PC in ARM mode == address of the instruction + 8 */
290 imm = offset - (8 + ctx->idx * 4);
295 #endif /* __LINUX_ARM_ARCH__ */
298 * Move an immediate that's not an imm8m to a core register.
300 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
302 #if __LINUX_ARM_ARCH__ < 7
303 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
305 emit(ARM_MOVW(rd, val & 0xffff), ctx);
307 emit(ARM_MOVT(rd, val >> 16), ctx);
311 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
313 int imm12 = imm8m(val);
316 emit(ARM_MOV_I(rd, imm12), ctx);
318 emit_mov_i_no8m(rd, val, ctx);
321 #if __LINUX_ARM_ARCH__ < 6
323 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
325 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
326 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
327 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
328 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
329 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
330 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
331 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
332 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
335 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
337 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
338 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
339 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
342 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
344 emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx);
345 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx);
346 emit(ARM_LSL_I(r_dst, r_dst, 8), ctx);
347 emit(ARM_LSL_R(r_dst, r_dst, 8), ctx);
352 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
354 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
355 #ifdef __LITTLE_ENDIAN
356 _emit(cond, ARM_REV(r_res, r_res), ctx);
360 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
362 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
363 #ifdef __LITTLE_ENDIAN
364 _emit(cond, ARM_REV16(r_res, r_res), ctx);
368 static inline void emit_swap16(u8 r_dst __maybe_unused,
369 u8 r_src __maybe_unused,
370 struct jit_ctx *ctx __maybe_unused)
372 #ifdef __LITTLE_ENDIAN
373 emit(ARM_REV16(r_dst, r_src), ctx);
377 #endif /* __LINUX_ARM_ARCH__ < 6 */
380 /* Compute the immediate value for a PC-relative branch. */
381 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
385 if (ctx->target == NULL)
388 * BPF allows only forward jumps and the offset of the target is
389 * still the one computed during the first pass.
391 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
396 #define OP_IMM3(op, r1, r2, imm_val, ctx) \
398 imm12 = imm8m(imm_val); \
400 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
401 emit(op ## _R((r1), (r2), r_scratch), ctx); \
403 emit(op ## _I((r1), (r2), imm12), ctx); \
407 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
409 if (ctx->ret0_fp_idx >= 0) {
410 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
411 /* NOP to keep the size constant between passes */
412 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
414 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
415 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
419 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
421 #if __LINUX_ARM_ARCH__ < 5
422 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
424 if (elf_hwcap & HWCAP_THUMB)
425 emit(ARM_BX(tgt_reg), ctx);
427 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
429 emit(ARM_BLX_R(tgt_reg), ctx);
433 static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
435 #if __LINUX_ARM_ARCH__ == 7
436 if (elf_hwcap & HWCAP_IDIVA) {
437 emit(ARM_UDIV(rd, rm, rn), ctx);
442 emit(ARM_MOV_R(ARM_R0, rm), ctx);
444 emit(ARM_MOV_R(ARM_R1, rn), ctx);
446 ctx->seen |= SEEN_CALL;
447 emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
448 emit_blx_r(ARM_R3, ctx);
451 emit(ARM_MOV_R(rd, ARM_R0), ctx);
454 static inline void update_on_xread(struct jit_ctx *ctx)
456 if (!(ctx->seen & SEEN_X))
457 ctx->flags |= FLAG_NEED_X_RESET;
462 static int build_body(struct jit_ctx *ctx)
464 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
465 const struct sk_filter *prog = ctx->skf;
466 const struct sock_filter *inst;
467 unsigned i, load_order, off, condt;
471 for (i = 0; i < prog->len; i++) {
472 inst = &(prog->insns[i]);
473 /* K as an immediate value operand */
476 /* compute offsets only in the fake pass */
477 if (ctx->target == NULL)
478 ctx->offsets[i] = ctx->idx * 4;
480 switch (inst->code) {
482 emit_mov_i(r_A, k, ctx);
485 ctx->seen |= SEEN_SKB;
486 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
487 emit(ARM_LDR_I(r_A, r_skb,
488 offsetof(struct sk_buff, len)), ctx);
492 ctx->seen |= SEEN_MEM_WORD(k);
493 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
504 /* the interpreter will deal with the negative K */
507 emit_mov_i(r_off, k, ctx);
509 ctx->seen |= SEEN_DATA | SEEN_CALL;
511 if (load_order > 0) {
512 emit(ARM_SUB_I(r_scratch, r_skb_hl,
513 1 << load_order), ctx);
514 emit(ARM_CMP_R(r_scratch, r_off), ctx);
517 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
521 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
525 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
527 else if (load_order == 1)
528 emit_load_be16(condt, r_A, r_scratch, ctx);
529 else if (load_order == 2)
530 emit_load_be32(condt, r_A, r_scratch, ctx);
532 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
535 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
536 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
537 /* the offset is already in R1 */
538 emit_blx_r(ARM_R3, ctx);
539 /* check the result of skb_copy_bits */
540 emit(ARM_CMP_I(ARM_R1, 0), ctx);
541 emit_err_ret(ARM_COND_NE, ctx);
542 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
553 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
557 emit_mov_i(r_X, k, ctx);
559 case BPF_S_LDX_W_LEN:
560 ctx->seen |= SEEN_X | SEEN_SKB;
561 emit(ARM_LDR_I(r_X, r_skb,
562 offsetof(struct sk_buff, len)), ctx);
565 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
566 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
568 case BPF_S_LDX_B_MSH:
569 /* x = ((*(frame + k)) & 0xf) << 2; */
570 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
571 /* the interpreter should deal with the negative K */
574 /* offset in r1: we might have to take the slow path */
575 emit_mov_i(r_off, k, ctx);
576 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
578 /* load in r0: common with the slowpath */
579 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
582 * emit_mov_i() might generate one or two instructions,
583 * the same holds for emit_blx_r()
585 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
587 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
589 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
590 emit_blx_r(ARM_R3, ctx);
591 /* check the return value of skb_copy_bits */
592 emit(ARM_CMP_I(ARM_R1, 0), ctx);
593 emit_err_ret(ARM_COND_NE, ctx);
595 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
596 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
599 ctx->seen |= SEEN_MEM_WORD(k);
600 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
603 update_on_xread(ctx);
604 ctx->seen |= SEEN_MEM_WORD(k);
605 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
607 case BPF_S_ALU_ADD_K:
609 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
611 case BPF_S_ALU_ADD_X:
612 update_on_xread(ctx);
613 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
615 case BPF_S_ALU_SUB_K:
617 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
619 case BPF_S_ALU_SUB_X:
620 update_on_xread(ctx);
621 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
623 case BPF_S_ALU_MUL_K:
625 emit_mov_i(r_scratch, k, ctx);
626 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
628 case BPF_S_ALU_MUL_X:
629 update_on_xread(ctx);
630 emit(ARM_MUL(r_A, r_A, r_X), ctx);
632 case BPF_S_ALU_DIV_K:
633 /* current k == reciprocal_value(userspace k) */
634 emit_mov_i(r_scratch, k, ctx);
635 /* A = top 32 bits of the product */
636 emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
638 case BPF_S_ALU_DIV_X:
639 update_on_xread(ctx);
640 emit(ARM_CMP_I(r_X, 0), ctx);
641 emit_err_ret(ARM_COND_EQ, ctx);
642 emit_udiv(r_A, r_A, r_X, ctx);
646 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
649 update_on_xread(ctx);
650 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
652 case BPF_S_ALU_XOR_K:
654 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
656 case BPF_S_ANC_ALU_XOR_X:
657 case BPF_S_ALU_XOR_X:
659 update_on_xread(ctx);
660 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
662 case BPF_S_ALU_AND_K:
664 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
666 case BPF_S_ALU_AND_X:
667 update_on_xread(ctx);
668 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
670 case BPF_S_ALU_LSH_K:
671 if (unlikely(k > 31))
673 emit(ARM_LSL_I(r_A, r_A, k), ctx);
675 case BPF_S_ALU_LSH_X:
676 update_on_xread(ctx);
677 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
679 case BPF_S_ALU_RSH_K:
680 if (unlikely(k > 31))
682 emit(ARM_LSR_I(r_A, r_A, k), ctx);
684 case BPF_S_ALU_RSH_X:
685 update_on_xread(ctx);
686 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
690 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
694 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
696 case BPF_S_JMP_JEQ_K:
697 /* pc += (A == K) ? pc->jt : pc->jf */
700 case BPF_S_JMP_JGT_K:
701 /* pc += (A > K) ? pc->jt : pc->jf */
704 case BPF_S_JMP_JGE_K:
705 /* pc += (A >= K) ? pc->jt : pc->jf */
710 emit_mov_i_no8m(r_scratch, k, ctx);
711 emit(ARM_CMP_R(r_A, r_scratch), ctx);
713 emit(ARM_CMP_I(r_A, imm12), ctx);
717 _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
720 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
723 case BPF_S_JMP_JEQ_X:
724 /* pc += (A == X) ? pc->jt : pc->jf */
727 case BPF_S_JMP_JGT_X:
728 /* pc += (A > X) ? pc->jt : pc->jf */
731 case BPF_S_JMP_JGE_X:
732 /* pc += (A >= X) ? pc->jt : pc->jf */
735 update_on_xread(ctx);
736 emit(ARM_CMP_R(r_A, r_X), ctx);
738 case BPF_S_JMP_JSET_K:
739 /* pc += (A & K) ? pc->jt : pc->jf */
741 /* not set iff all zeroes iff Z==1 iff EQ */
745 emit_mov_i_no8m(r_scratch, k, ctx);
746 emit(ARM_TST_R(r_A, r_scratch), ctx);
748 emit(ARM_TST_I(r_A, imm12), ctx);
751 case BPF_S_JMP_JSET_X:
752 /* pc += (A & X) ? pc->jt : pc->jf */
753 update_on_xread(ctx);
755 emit(ARM_TST_R(r_A, r_X), ctx);
758 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
761 if ((k == 0) && (ctx->ret0_fp_idx < 0))
762 ctx->ret0_fp_idx = i;
763 emit_mov_i(ARM_R0, k, ctx);
765 if (i != ctx->skf->len - 1)
766 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
771 emit(ARM_MOV_R(r_X, r_A), ctx);
775 update_on_xread(ctx);
776 emit(ARM_MOV_R(r_A, r_X), ctx);
778 case BPF_S_ANC_PROTOCOL:
779 /* A = ntohs(skb->protocol) */
780 ctx->seen |= SEEN_SKB;
781 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
783 off = offsetof(struct sk_buff, protocol);
784 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
785 emit_swap16(r_A, r_scratch, ctx);
788 /* r_scratch = current_thread_info() */
789 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
790 /* A = current_thread_info()->cpu */
791 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
792 off = offsetof(struct thread_info, cpu);
793 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
795 case BPF_S_ANC_IFINDEX:
796 /* A = skb->dev->ifindex */
797 ctx->seen |= SEEN_SKB;
798 off = offsetof(struct sk_buff, dev);
799 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
801 emit(ARM_CMP_I(r_scratch, 0), ctx);
802 emit_err_ret(ARM_COND_EQ, ctx);
804 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
806 off = offsetof(struct net_device, ifindex);
807 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
810 ctx->seen |= SEEN_SKB;
811 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
812 off = offsetof(struct sk_buff, mark);
813 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
815 case BPF_S_ANC_RXHASH:
816 ctx->seen |= SEEN_SKB;
817 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
818 off = offsetof(struct sk_buff, rxhash);
819 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
821 case BPF_S_ANC_VLAN_TAG:
822 case BPF_S_ANC_VLAN_TAG_PRESENT:
823 ctx->seen |= SEEN_SKB;
824 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
825 off = offsetof(struct sk_buff, vlan_tci);
826 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
827 if (inst->code == BPF_S_ANC_VLAN_TAG)
828 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
830 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
832 case BPF_S_ANC_QUEUE:
833 ctx->seen |= SEEN_SKB;
834 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
835 queue_mapping) != 2);
836 BUILD_BUG_ON(offsetof(struct sk_buff,
837 queue_mapping) > 0xff);
838 off = offsetof(struct sk_buff, queue_mapping);
839 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
846 /* compute offsets only during the first pass */
847 if (ctx->target == NULL)
848 ctx->offsets[i] = ctx->idx * 4;
854 void bpf_jit_compile(struct sk_filter *fp)
863 memset(&ctx, 0, sizeof(ctx));
865 ctx.ret0_fp_idx = -1;
867 ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
868 if (ctx.offsets == NULL)
871 /* fake pass to fill in the ctx->seen */
872 if (unlikely(build_body(&ctx)))
876 build_prologue(&ctx);
877 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
879 #if __LINUX_ARM_ARCH__ < 7
881 build_epilogue(&ctx);
882 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
884 ctx.idx += ctx.imm_count;
886 ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
887 if (ctx.imms == NULL)
891 /* there's nothing after the epilogue on ARMv7 */
892 build_epilogue(&ctx);
895 alloc_size = 4 * ctx.idx;
896 ctx.target = module_alloc(max(sizeof(struct work_struct),
898 if (unlikely(ctx.target == NULL))
902 build_prologue(&ctx);
904 build_epilogue(&ctx);
906 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
908 #if __LINUX_ARM_ARCH__ < 7
913 if (bpf_jit_enable > 1)
914 print_hex_dump(KERN_INFO, "BPF JIT code: ",
915 DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
918 fp->bpf_func = (void *)ctx.target;
924 static void bpf_jit_free_worker(struct work_struct *work)
926 module_free(NULL, work);
929 void bpf_jit_free(struct sk_filter *fp)
931 struct work_struct *work;
933 if (fp->bpf_func != sk_run_filter) {
934 work = (struct work_struct *)fp->bpf_func;
936 INIT_WORK(work, bpf_jit_free_worker);