1 /* bpf_jit_comp.c : BPF JIT compiler
3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
11 #include <linux/netdevice.h>
12 #include <linux/filter.h>
13 #include <linux/if_vlan.h>
14 #include <asm/cacheflush.h>
16 int bpf_jit_enable __read_mostly;
19 * assembly code in arch/x86/net/bpf_jit.S
21 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
22 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
23 extern u8 sk_load_byte_positive_offset[];
24 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
25 extern u8 sk_load_byte_negative_offset[];
27 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
40 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
42 #define EMIT1(b1) EMIT(b1, 1)
43 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
44 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
45 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
46 #define EMIT1_off32(b1, off) \
47 do {EMIT1(b1); EMIT(off, 4); } while (0)
48 #define EMIT2_off32(b1, b2, off) \
49 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
50 #define EMIT3_off32(b1, b2, b3, off) \
51 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
52 #define EMIT4_off32(b1, b2, b3, b4, off) \
53 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
55 static inline bool is_imm8(int value)
57 return value <= 127 && value >= -128;
60 static inline bool is_simm32(s64 value)
62 return value == (s64) (s32) value;
66 #define EMIT_mov(DST, SRC) \
68 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
71 static int bpf_size_to_x86_bytes(int bpf_size)
73 if (bpf_size == BPF_W)
75 else if (bpf_size == BPF_H)
77 else if (bpf_size == BPF_B)
79 else if (bpf_size == BPF_DW)
85 /* list of x86 cond jumps opcodes (. + s8)
86 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
97 static inline void bpf_flush_icache(void *start, void *end)
99 mm_segment_t old_fs = get_fs();
103 flush_icache_range((unsigned long)start, (unsigned long)end);
107 #define CHOOSE_LOAD_FUNC(K, func) \
108 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
110 /* pick a register outside of BPF range for JIT internal work */
111 #define AUX_REG (MAX_BPF_REG + 1)
113 /* the following table maps BPF registers to x64 registers.
114 * x64 register r12 is unused, since if used as base address register
115 * in load/store instructions, it always needs an extra byte of encoding
117 static const int reg2hex[] = {
118 [BPF_REG_0] = 0, /* rax */
119 [BPF_REG_1] = 7, /* rdi */
120 [BPF_REG_2] = 6, /* rsi */
121 [BPF_REG_3] = 2, /* rdx */
122 [BPF_REG_4] = 1, /* rcx */
123 [BPF_REG_5] = 0, /* r8 */
124 [BPF_REG_6] = 3, /* rbx callee saved */
125 [BPF_REG_7] = 5, /* r13 callee saved */
126 [BPF_REG_8] = 6, /* r14 callee saved */
127 [BPF_REG_9] = 7, /* r15 callee saved */
128 [BPF_REG_FP] = 5, /* rbp readonly */
129 [AUX_REG] = 3, /* r11 temp register */
132 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
133 * which need extra byte of encoding.
134 * rax,rcx,...,rbp have simpler encoding
136 static inline bool is_ereg(u32 reg)
138 if (reg == BPF_REG_5 || reg == AUX_REG ||
139 (reg >= BPF_REG_7 && reg <= BPF_REG_9))
145 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
146 static inline u8 add_1mod(u8 byte, u32 reg)
153 static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
162 /* encode 'dst_reg' register into x64 opcode 'byte' */
163 static inline u8 add_1reg(u8 byte, u32 dst_reg)
165 return byte + reg2hex[dst_reg];
168 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
169 static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
171 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
174 static void jit_fill_hole(void *area, unsigned int size)
176 /* fill whole space with int3 instructions */
177 memset(area, 0xcc, size);
181 unsigned int cleanup_addr; /* epilogue code offset */
185 /* maximum number of bytes emitted while JITing one eBPF insn */
186 #define BPF_MAX_INSN_SIZE 128
187 #define BPF_INSN_SAFETY 64
189 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
190 int oldproglen, struct jit_context *ctx)
192 struct bpf_insn *insn = bpf_prog->insnsi;
193 int insn_cnt = bpf_prog->len;
194 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
195 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
199 int stacksize = MAX_BPF_STACK +
200 32 /* space for rbx, r13, r14, r15 */ +
201 8 /* space for skb_copy_bits() buffer */;
203 EMIT1(0x55); /* push rbp */
204 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
206 /* sub rsp, stacksize */
207 EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
209 /* all classic BPF filters use R6(rbx) save it */
211 /* mov qword ptr [rbp-X],rbx */
212 EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
214 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
215 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
216 * R8(r14). R9(r15) spill could be made conditional, but there is only
217 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
218 * The overhead of extra spill is negligible for any filter other
219 * than synthetic ones. Therefore not worth adding complexity.
222 /* mov qword ptr [rbp-X],r13 */
223 EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
224 /* mov qword ptr [rbp-X],r14 */
225 EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
226 /* mov qword ptr [rbp-X],r15 */
227 EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
229 /* clear A and X registers */
230 EMIT2(0x31, 0xc0); /* xor eax, eax */
231 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
234 /* r9d : skb->len - skb->data_len (headlen)
237 if (is_imm8(offsetof(struct sk_buff, len)))
238 /* mov %r9d, off8(%rdi) */
239 EMIT4(0x44, 0x8b, 0x4f,
240 offsetof(struct sk_buff, len));
242 /* mov %r9d, off32(%rdi) */
243 EMIT3_off32(0x44, 0x8b, 0x8f,
244 offsetof(struct sk_buff, len));
246 if (is_imm8(offsetof(struct sk_buff, data_len)))
247 /* sub %r9d, off8(%rdi) */
248 EMIT4(0x44, 0x2b, 0x4f,
249 offsetof(struct sk_buff, data_len));
251 EMIT3_off32(0x44, 0x2b, 0x8f,
252 offsetof(struct sk_buff, data_len));
254 if (is_imm8(offsetof(struct sk_buff, data)))
255 /* mov %r10, off8(%rdi) */
256 EMIT4(0x4c, 0x8b, 0x57,
257 offsetof(struct sk_buff, data));
259 /* mov %r10, off32(%rdi) */
260 EMIT3_off32(0x4c, 0x8b, 0x97,
261 offsetof(struct sk_buff, data));
264 for (i = 0; i < insn_cnt; i++, insn++) {
265 const s32 imm32 = insn->imm;
266 u32 dst_reg = insn->dst_reg;
267 u32 src_reg = insn->src_reg;
268 u8 b1 = 0, b2 = 0, b3 = 0;
274 switch (insn->code) {
276 case BPF_ALU | BPF_ADD | BPF_X:
277 case BPF_ALU | BPF_SUB | BPF_X:
278 case BPF_ALU | BPF_AND | BPF_X:
279 case BPF_ALU | BPF_OR | BPF_X:
280 case BPF_ALU | BPF_XOR | BPF_X:
281 case BPF_ALU64 | BPF_ADD | BPF_X:
282 case BPF_ALU64 | BPF_SUB | BPF_X:
283 case BPF_ALU64 | BPF_AND | BPF_X:
284 case BPF_ALU64 | BPF_OR | BPF_X:
285 case BPF_ALU64 | BPF_XOR | BPF_X:
286 switch (BPF_OP(insn->code)) {
287 case BPF_ADD: b2 = 0x01; break;
288 case BPF_SUB: b2 = 0x29; break;
289 case BPF_AND: b2 = 0x21; break;
290 case BPF_OR: b2 = 0x09; break;
291 case BPF_XOR: b2 = 0x31; break;
293 if (BPF_CLASS(insn->code) == BPF_ALU64)
294 EMIT1(add_2mod(0x48, dst_reg, src_reg));
295 else if (is_ereg(dst_reg) || is_ereg(src_reg))
296 EMIT1(add_2mod(0x40, dst_reg, src_reg));
297 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
301 case BPF_ALU64 | BPF_MOV | BPF_X:
302 EMIT_mov(dst_reg, src_reg);
306 case BPF_ALU | BPF_MOV | BPF_X:
307 if (is_ereg(dst_reg) || is_ereg(src_reg))
308 EMIT1(add_2mod(0x40, dst_reg, src_reg));
309 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
313 case BPF_ALU | BPF_NEG:
314 case BPF_ALU64 | BPF_NEG:
315 if (BPF_CLASS(insn->code) == BPF_ALU64)
316 EMIT1(add_1mod(0x48, dst_reg));
317 else if (is_ereg(dst_reg))
318 EMIT1(add_1mod(0x40, dst_reg));
319 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
322 case BPF_ALU | BPF_ADD | BPF_K:
323 case BPF_ALU | BPF_SUB | BPF_K:
324 case BPF_ALU | BPF_AND | BPF_K:
325 case BPF_ALU | BPF_OR | BPF_K:
326 case BPF_ALU | BPF_XOR | BPF_K:
327 case BPF_ALU64 | BPF_ADD | BPF_K:
328 case BPF_ALU64 | BPF_SUB | BPF_K:
329 case BPF_ALU64 | BPF_AND | BPF_K:
330 case BPF_ALU64 | BPF_OR | BPF_K:
331 case BPF_ALU64 | BPF_XOR | BPF_K:
332 if (BPF_CLASS(insn->code) == BPF_ALU64)
333 EMIT1(add_1mod(0x48, dst_reg));
334 else if (is_ereg(dst_reg))
335 EMIT1(add_1mod(0x40, dst_reg));
337 switch (BPF_OP(insn->code)) {
338 case BPF_ADD: b3 = 0xC0; break;
339 case BPF_SUB: b3 = 0xE8; break;
340 case BPF_AND: b3 = 0xE0; break;
341 case BPF_OR: b3 = 0xC8; break;
342 case BPF_XOR: b3 = 0xF0; break;
346 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
348 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
351 case BPF_ALU64 | BPF_MOV | BPF_K:
352 /* optimization: if imm32 is positive,
353 * use 'mov eax, imm32' (which zero-extends imm32)
357 /* 'mov rax, imm32' sign extends imm32 */
358 b1 = add_1mod(0x48, dst_reg);
361 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
365 case BPF_ALU | BPF_MOV | BPF_K:
366 /* mov %eax, imm32 */
367 if (is_ereg(dst_reg))
368 EMIT1(add_1mod(0x40, dst_reg));
369 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
372 case BPF_LD | BPF_IMM | BPF_DW:
373 if (insn[1].code != 0 || insn[1].src_reg != 0 ||
374 insn[1].dst_reg != 0 || insn[1].off != 0) {
375 /* verifier must catch invalid insns */
376 pr_err("invalid BPF_LD_IMM64 insn\n");
380 /* movabsq %rax, imm64 */
381 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
382 EMIT(insn[0].imm, 4);
383 EMIT(insn[1].imm, 4);
389 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
390 case BPF_ALU | BPF_MOD | BPF_X:
391 case BPF_ALU | BPF_DIV | BPF_X:
392 case BPF_ALU | BPF_MOD | BPF_K:
393 case BPF_ALU | BPF_DIV | BPF_K:
394 case BPF_ALU64 | BPF_MOD | BPF_X:
395 case BPF_ALU64 | BPF_DIV | BPF_X:
396 case BPF_ALU64 | BPF_MOD | BPF_K:
397 case BPF_ALU64 | BPF_DIV | BPF_K:
398 EMIT1(0x50); /* push rax */
399 EMIT1(0x52); /* push rdx */
401 if (BPF_SRC(insn->code) == BPF_X)
402 /* mov r11, src_reg */
403 EMIT_mov(AUX_REG, src_reg);
406 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
408 /* mov rax, dst_reg */
409 EMIT_mov(BPF_REG_0, dst_reg);
412 * equivalent to 'xor rdx, rdx', but one byte less
416 if (BPF_SRC(insn->code) == BPF_X) {
417 /* if (src_reg == 0) return 0 */
420 EMIT4(0x49, 0x83, 0xFB, 0x00);
422 /* jne .+9 (skip over pop, pop, xor and jmp) */
423 EMIT2(X86_JNE, 1 + 1 + 2 + 5);
424 EMIT1(0x5A); /* pop rdx */
425 EMIT1(0x58); /* pop rax */
426 EMIT2(0x31, 0xc0); /* xor eax, eax */
429 * addrs[i] - 11, because there are 11 bytes
430 * after this insn: div, mov, pop, pop, mov
432 jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
433 EMIT1_off32(0xE9, jmp_offset);
436 if (BPF_CLASS(insn->code) == BPF_ALU64)
438 EMIT3(0x49, 0xF7, 0xF3);
441 EMIT3(0x41, 0xF7, 0xF3);
443 if (BPF_OP(insn->code) == BPF_MOD)
445 EMIT3(0x49, 0x89, 0xD3);
448 EMIT3(0x49, 0x89, 0xC3);
450 EMIT1(0x5A); /* pop rdx */
451 EMIT1(0x58); /* pop rax */
453 /* mov dst_reg, r11 */
454 EMIT_mov(dst_reg, AUX_REG);
457 case BPF_ALU | BPF_MUL | BPF_K:
458 case BPF_ALU | BPF_MUL | BPF_X:
459 case BPF_ALU64 | BPF_MUL | BPF_K:
460 case BPF_ALU64 | BPF_MUL | BPF_X:
461 EMIT1(0x50); /* push rax */
462 EMIT1(0x52); /* push rdx */
464 /* mov r11, dst_reg */
465 EMIT_mov(AUX_REG, dst_reg);
467 if (BPF_SRC(insn->code) == BPF_X)
468 /* mov rax, src_reg */
469 EMIT_mov(BPF_REG_0, src_reg);
472 EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
474 if (BPF_CLASS(insn->code) == BPF_ALU64)
475 EMIT1(add_1mod(0x48, AUX_REG));
476 else if (is_ereg(AUX_REG))
477 EMIT1(add_1mod(0x40, AUX_REG));
479 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
482 EMIT_mov(AUX_REG, BPF_REG_0);
484 EMIT1(0x5A); /* pop rdx */
485 EMIT1(0x58); /* pop rax */
487 /* mov dst_reg, r11 */
488 EMIT_mov(dst_reg, AUX_REG);
492 case BPF_ALU | BPF_LSH | BPF_K:
493 case BPF_ALU | BPF_RSH | BPF_K:
494 case BPF_ALU | BPF_ARSH | BPF_K:
495 case BPF_ALU64 | BPF_LSH | BPF_K:
496 case BPF_ALU64 | BPF_RSH | BPF_K:
497 case BPF_ALU64 | BPF_ARSH | BPF_K:
498 if (BPF_CLASS(insn->code) == BPF_ALU64)
499 EMIT1(add_1mod(0x48, dst_reg));
500 else if (is_ereg(dst_reg))
501 EMIT1(add_1mod(0x40, dst_reg));
503 switch (BPF_OP(insn->code)) {
504 case BPF_LSH: b3 = 0xE0; break;
505 case BPF_RSH: b3 = 0xE8; break;
506 case BPF_ARSH: b3 = 0xF8; break;
508 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
511 case BPF_ALU | BPF_LSH | BPF_X:
512 case BPF_ALU | BPF_RSH | BPF_X:
513 case BPF_ALU | BPF_ARSH | BPF_X:
514 case BPF_ALU64 | BPF_LSH | BPF_X:
515 case BPF_ALU64 | BPF_RSH | BPF_X:
516 case BPF_ALU64 | BPF_ARSH | BPF_X:
518 /* check for bad case when dst_reg == rcx */
519 if (dst_reg == BPF_REG_4) {
520 /* mov r11, dst_reg */
521 EMIT_mov(AUX_REG, dst_reg);
525 if (src_reg != BPF_REG_4) { /* common case */
526 EMIT1(0x51); /* push rcx */
528 /* mov rcx, src_reg */
529 EMIT_mov(BPF_REG_4, src_reg);
532 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
533 if (BPF_CLASS(insn->code) == BPF_ALU64)
534 EMIT1(add_1mod(0x48, dst_reg));
535 else if (is_ereg(dst_reg))
536 EMIT1(add_1mod(0x40, dst_reg));
538 switch (BPF_OP(insn->code)) {
539 case BPF_LSH: b3 = 0xE0; break;
540 case BPF_RSH: b3 = 0xE8; break;
541 case BPF_ARSH: b3 = 0xF8; break;
543 EMIT2(0xD3, add_1reg(b3, dst_reg));
545 if (src_reg != BPF_REG_4)
546 EMIT1(0x59); /* pop rcx */
548 if (insn->dst_reg == BPF_REG_4)
549 /* mov dst_reg, r11 */
550 EMIT_mov(insn->dst_reg, AUX_REG);
553 case BPF_ALU | BPF_END | BPF_FROM_BE:
556 /* emit 'ror %ax, 8' to swap lower 2 bytes */
558 if (is_ereg(dst_reg))
560 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
563 /* emit 'bswap eax' to swap lower 4 bytes */
564 if (is_ereg(dst_reg))
568 EMIT1(add_1reg(0xC8, dst_reg));
571 /* emit 'bswap rax' to swap 8 bytes */
572 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
573 add_1reg(0xC8, dst_reg));
578 case BPF_ALU | BPF_END | BPF_FROM_LE:
581 /* ST: *(u8*)(dst_reg + off) = imm */
582 case BPF_ST | BPF_MEM | BPF_B:
583 if (is_ereg(dst_reg))
588 case BPF_ST | BPF_MEM | BPF_H:
589 if (is_ereg(dst_reg))
590 EMIT3(0x66, 0x41, 0xC7);
594 case BPF_ST | BPF_MEM | BPF_W:
595 if (is_ereg(dst_reg))
600 case BPF_ST | BPF_MEM | BPF_DW:
601 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
603 st: if (is_imm8(insn->off))
604 EMIT2(add_1reg(0x40, dst_reg), insn->off);
606 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
608 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
611 /* STX: *(u8*)(dst_reg + off) = src_reg */
612 case BPF_STX | BPF_MEM | BPF_B:
613 /* emit 'mov byte ptr [rax + off], al' */
614 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
615 /* have to add extra byte for x86 SIL, DIL regs */
616 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
617 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
621 case BPF_STX | BPF_MEM | BPF_H:
622 if (is_ereg(dst_reg) || is_ereg(src_reg))
623 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
627 case BPF_STX | BPF_MEM | BPF_W:
628 if (is_ereg(dst_reg) || is_ereg(src_reg))
629 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
633 case BPF_STX | BPF_MEM | BPF_DW:
634 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
635 stx: if (is_imm8(insn->off))
636 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
638 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
642 /* LDX: dst_reg = *(u8*)(src_reg + off) */
643 case BPF_LDX | BPF_MEM | BPF_B:
644 /* emit 'movzx rax, byte ptr [rax + off]' */
645 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
647 case BPF_LDX | BPF_MEM | BPF_H:
648 /* emit 'movzx rax, word ptr [rax + off]' */
649 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
651 case BPF_LDX | BPF_MEM | BPF_W:
652 /* emit 'mov eax, dword ptr [rax+0x14]' */
653 if (is_ereg(dst_reg) || is_ereg(src_reg))
654 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
658 case BPF_LDX | BPF_MEM | BPF_DW:
659 /* emit 'mov rax, qword ptr [rax+0x14]' */
660 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
661 ldx: /* if insn->off == 0 we can save one extra byte, but
662 * special case of x86 r13 which always needs an offset
663 * is not worth the hassle
665 if (is_imm8(insn->off))
666 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
668 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
672 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
673 case BPF_STX | BPF_XADD | BPF_W:
674 /* emit 'lock add dword ptr [rax + off], eax' */
675 if (is_ereg(dst_reg) || is_ereg(src_reg))
676 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
680 case BPF_STX | BPF_XADD | BPF_DW:
681 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
682 xadd: if (is_imm8(insn->off))
683 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
685 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
690 case BPF_JMP | BPF_CALL:
691 func = (u8 *) __bpf_call_base + imm32;
692 jmp_offset = func - (image + addrs[i]);
694 EMIT2(0x41, 0x52); /* push %r10 */
695 EMIT2(0x41, 0x51); /* push %r9 */
696 /* need to adjust jmp offset, since
697 * pop %r9, pop %r10 take 4 bytes after call insn
701 if (!imm32 || !is_simm32(jmp_offset)) {
702 pr_err("unsupported bpf func %d addr %p image %p\n",
706 EMIT1_off32(0xE8, jmp_offset);
708 EMIT2(0x41, 0x59); /* pop %r9 */
709 EMIT2(0x41, 0x5A); /* pop %r10 */
714 case BPF_JMP | BPF_JEQ | BPF_X:
715 case BPF_JMP | BPF_JNE | BPF_X:
716 case BPF_JMP | BPF_JGT | BPF_X:
717 case BPF_JMP | BPF_JGE | BPF_X:
718 case BPF_JMP | BPF_JSGT | BPF_X:
719 case BPF_JMP | BPF_JSGE | BPF_X:
720 /* cmp dst_reg, src_reg */
721 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
722 add_2reg(0xC0, dst_reg, src_reg));
725 case BPF_JMP | BPF_JSET | BPF_X:
726 /* test dst_reg, src_reg */
727 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
728 add_2reg(0xC0, dst_reg, src_reg));
731 case BPF_JMP | BPF_JSET | BPF_K:
732 /* test dst_reg, imm32 */
733 EMIT1(add_1mod(0x48, dst_reg));
734 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
737 case BPF_JMP | BPF_JEQ | BPF_K:
738 case BPF_JMP | BPF_JNE | BPF_K:
739 case BPF_JMP | BPF_JGT | BPF_K:
740 case BPF_JMP | BPF_JGE | BPF_K:
741 case BPF_JMP | BPF_JSGT | BPF_K:
742 case BPF_JMP | BPF_JSGE | BPF_K:
743 /* cmp dst_reg, imm8/32 */
744 EMIT1(add_1mod(0x48, dst_reg));
747 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
749 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
751 emit_cond_jmp: /* convert BPF opcode to x86 */
752 switch (BPF_OP(insn->code)) {
761 /* GT is unsigned '>', JA in x86 */
765 /* GE is unsigned '>=', JAE in x86 */
769 /* signed '>', GT in x86 */
773 /* signed '>=', GE in x86 */
776 default: /* to silence gcc warning */
779 jmp_offset = addrs[i + insn->off] - addrs[i];
780 if (is_imm8(jmp_offset)) {
781 EMIT2(jmp_cond, jmp_offset);
782 } else if (is_simm32(jmp_offset)) {
783 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
785 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
791 case BPF_JMP | BPF_JA:
792 jmp_offset = addrs[i + insn->off] - addrs[i];
794 /* optimize out nop jumps */
797 if (is_imm8(jmp_offset)) {
798 EMIT2(0xEB, jmp_offset);
799 } else if (is_simm32(jmp_offset)) {
800 EMIT1_off32(0xE9, jmp_offset);
802 pr_err("jmp gen bug %llx\n", jmp_offset);
807 case BPF_LD | BPF_IND | BPF_W:
810 case BPF_LD | BPF_ABS | BPF_W:
811 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
813 ctx->seen_ld_abs = seen_ld_abs = true;
814 jmp_offset = func - (image + addrs[i]);
815 if (!func || !is_simm32(jmp_offset)) {
816 pr_err("unsupported bpf func %d addr %p image %p\n",
820 if (BPF_MODE(insn->code) == BPF_ABS) {
821 /* mov %esi, imm32 */
822 EMIT1_off32(0xBE, imm32);
824 /* mov %rsi, src_reg */
825 EMIT_mov(BPF_REG_2, src_reg);
829 EMIT3(0x83, 0xC6, imm32);
831 /* add %esi, imm32 */
832 EMIT2_off32(0x81, 0xC6, imm32);
835 /* skb pointer is in R6 (%rbx), it will be copied into
836 * %rdi if skb_copy_bits() call is necessary.
837 * sk_load_* helpers also use %r10 and %r9d.
840 EMIT1_off32(0xE8, jmp_offset); /* call */
843 case BPF_LD | BPF_IND | BPF_H:
846 case BPF_LD | BPF_ABS | BPF_H:
847 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
849 case BPF_LD | BPF_IND | BPF_B:
852 case BPF_LD | BPF_ABS | BPF_B:
853 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
856 case BPF_JMP | BPF_EXIT:
857 if (i != insn_cnt - 1) {
858 jmp_offset = ctx->cleanup_addr - addrs[i];
861 /* update cleanup_addr */
862 ctx->cleanup_addr = proglen;
863 /* mov rbx, qword ptr [rbp-X] */
864 EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
865 /* mov r13, qword ptr [rbp-X] */
866 EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
867 /* mov r14, qword ptr [rbp-X] */
868 EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
869 /* mov r15, qword ptr [rbp-X] */
870 EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
872 EMIT1(0xC9); /* leave */
873 EMIT1(0xC3); /* ret */
877 /* By design x64 JIT should support all BPF instructions
878 * This error will be seen if new instruction was added
879 * to interpreter, but not to JIT
880 * or if there is junk in bpf_prog
882 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
887 if (ilen > BPF_MAX_INSN_SIZE) {
888 pr_err("bpf_jit_compile fatal insn size error\n");
893 if (unlikely(proglen + ilen > oldproglen)) {
894 pr_err("bpf_jit_compile fatal error\n");
897 memcpy(image + proglen, temp, ilen);
906 void bpf_jit_compile(struct bpf_prog *prog)
910 void bpf_int_jit_compile(struct bpf_prog *prog)
912 struct bpf_binary_header *header = NULL;
913 int proglen, oldproglen = 0;
914 struct jit_context ctx = {};
923 if (!prog || !prog->len)
926 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
930 /* Before first pass, make a rough estimation of addrs[]
931 * each bpf instruction is translated to less than 64 bytes
933 for (proglen = 0, i = 0; i < prog->len; i++) {
937 ctx.cleanup_addr = proglen;
939 for (pass = 0; pass < 10; pass++) {
940 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
944 bpf_jit_binary_free(header);
948 if (proglen != oldproglen) {
949 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
950 proglen, oldproglen);
955 if (proglen == oldproglen) {
956 header = bpf_jit_binary_alloc(proglen, &image,
961 oldproglen = proglen;
964 if (bpf_jit_enable > 1)
965 bpf_jit_dump(prog->len, proglen, 0, image);
968 bpf_flush_icache(header, image + proglen);
969 set_memory_ro((unsigned long)header, header->pages);
970 prog->bpf_func = (void *)image;
977 void bpf_jit_free(struct bpf_prog *fp)
979 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
980 struct bpf_binary_header *header = (void *)addr;
985 set_memory_rw(addr, header->pages);
986 bpf_jit_binary_free(header);
989 bpf_prog_unlock_free(fp);