]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - tools/testing/selftests/bpf/test_verifier.c
bpf: fix byte order test in test_verifier
[karo-tx-linux.git] / tools / testing / selftests / bpf / test_verifier.c
index 404aec5208128101812f6c9189ed046d1ec6eb24..d3ed7324105e4eeeb281c4c90922c27541b2b58f 100644 (file)
@@ -8,6 +8,7 @@
  * License as published by the Free Software Foundation.
  */
 
+#include <endian.h>
 #include <asm/types.h>
 #include <linux/types.h>
 #include <stdint.h>
@@ -1098,7 +1099,7 @@ static struct bpf_test tests[] = {
                "check skb->hash byte load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash)),
 #else
@@ -1135,7 +1136,7 @@ static struct bpf_test tests[] = {
                "check skb->hash byte load not permitted 3",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash) + 3),
 #else
@@ -1244,7 +1245,7 @@ static struct bpf_test tests[] = {
                "check skb->hash half load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash)),
 #else
@@ -1259,7 +1260,7 @@ static struct bpf_test tests[] = {
                "check skb->hash half load not permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, hash) + 2),
 #else
@@ -4969,7 +4970,7 @@ static struct bpf_test tests[] = {
                        BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
                                sizeof(struct test_val), 4),
                        BPF_MOV64_IMM(BPF_REG_4, 0),
-                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+                       BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
                        BPF_MOV64_IMM(BPF_REG_3, 0),
                        BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -4995,7 +4996,7 @@ static struct bpf_test tests[] = {
                        BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
                                sizeof(struct test_val) + 1, 4),
                        BPF_MOV64_IMM(BPF_REG_4, 0),
-                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+                       BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
                        BPF_MOV64_IMM(BPF_REG_3, 0),
                        BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -5023,7 +5024,7 @@ static struct bpf_test tests[] = {
                        BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
                                sizeof(struct test_val) - 20, 4),
                        BPF_MOV64_IMM(BPF_REG_4, 0),
-                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+                       BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
                        BPF_MOV64_IMM(BPF_REG_3, 0),
                        BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -5050,7 +5051,7 @@ static struct bpf_test tests[] = {
                        BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
                                sizeof(struct test_val) - 19, 4),
                        BPF_MOV64_IMM(BPF_REG_4, 0),
-                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+                       BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
                        BPF_MOV64_IMM(BPF_REG_3, 0),
                        BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -5422,7 +5423,7 @@ static struct bpf_test tests[] = {
                "check bpf_perf_event_data->sample_period byte load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
@@ -5438,7 +5439,7 @@ static struct bpf_test tests[] = {
                "check bpf_perf_event_data->sample_period half load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
@@ -5454,7 +5455,7 @@ static struct bpf_test tests[] = {
                "check bpf_perf_event_data->sample_period word load permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct bpf_perf_event_data, sample_period)),
 #else
@@ -5481,7 +5482,7 @@ static struct bpf_test tests[] = {
                "check skb->data half load not permitted",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, data)),
 #else
@@ -5497,7 +5498,7 @@ static struct bpf_test tests[] = {
                "check skb->tc_classid half load not permitted for lwt prog",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER == __LITTLE_ENDIAN
                        BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, tc_classid)),
 #else
@@ -5510,6 +5511,504 @@ static struct bpf_test tests[] = {
                .errstr = "invalid bpf_context access",
                .prog_type = BPF_PROG_TYPE_LWT_IN,
        },
+       {
+               "bounds checks mixing signed and unsigned, positive bounds",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, 2),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, -1),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, -1),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+                       BPF_MOV64_IMM(BPF_REG_8, 0),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+                       BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R8 invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 3",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, -1),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
+                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+                       BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R8 invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 4",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 5",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, -1),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 invalid mem access",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 6",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_6, -1),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
+                       BPF_MOV64_IMM(BPF_REG_5, 0),
+                       BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "R4 min value is negative, either use unsigned",
+               .errstr = "R4 min value is negative, either use unsigned",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 7",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 8",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024 + 1),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 9",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, -1),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 10",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 11",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 12",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, -1),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+                       /* Dead branch. */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 13",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, -6),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 14",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, 2),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+                       BPF_MOV64_IMM(BPF_REG_7, 1),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 15",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, -1),
+                       BPF_MOV64_IMM(BPF_REG_8, 2),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
+                       BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -7),
+               },
+               .fixup_map1 = { 4 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "bounds checks mixing signed and unsigned, variant 16",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_MOV64_IMM(BPF_REG_2, -6),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "subtraction bounds (map value)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
@@ -5633,7 +6132,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
 
        fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
                                     prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
-                                    "GPL", 0, bpf_vlog, sizeof(bpf_vlog));
+                                    "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
 
        expected_ret = unpriv && test->result_unpriv != UNDEF ?
                       test->result_unpriv : test->result;