From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: ARC-Seal: i=1; a=rsa-sha256; t=1524235646; cv=none; d=google.com; s=arc-20160816; b=x41bqRXGwQpo2X1R9ZMuZDTux8YvV1oTNT3tujMwqDVOMg3LBb+4vr5fu593l3E39u t+ueyua6H5bCbbVvsVMEBm+cHA3b/9xQ2tz/GmA2qfdalNofith0mIhTxOFf+pclDb5Y HVj89lW/orubpMDN6pcRN60lYqsNhgYrN2cpcwrTlD+lh/AtjKPF4p9QKg5uzTcXFSFb 88EQ/FxZI0GDzOKEQWcQyFme0lootcopaBD3NNmNuarUcfYt5DGHBw/+njgXmHIofLRA KcL4KaY14Lmp5V/Q0wfnbH+RLf8Ka7iWLjUhnxEuKJsLXYcA+4DsVESGWUJHPX6SRy9u evuQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=references:in-reply-to:references:in-reply-to:message-id:date :subject:cc:to:from:dkim-signature:arc-authentication-results; bh=8aU8YeUlOND1FF0Sx/8Le3M31tRAo6oy4SHxkwB8qQo=; b=vtcWDMyVGDH2ByynRZ0Ej52bFTVQGHORWbmQb/73Juo9T17W/Mclr64EOwuIqrTCQb xDW4gTDhPfRUkin6o4M/4U8W9T9EVv8QulMSgFqIntQdE93H8gy+DReWvtbzf4KnRgcm foYVB3Z6B6K/CS81Sc51NAmrikjcm0uCDmDgGrNnKZkesMLBoM990HvORCLvH9oNzize fmOYSOYF7EXR8rPbWeNHH1EGVg6Q6cKCQ9mCi4zGK47AWmwoCirSkw1Cmx1JBPdJwFPX zBKBo3ejUw5KadqoLaeFkmbRm5XFnn9EL8Aqz5Vj80QG5bXF0CjmNqWz19o61uPcJG+4 yRig== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@google.com header.s=20161025 header.b=GKKH9q0w; spf=pass (google.com: domain of andreyknvl@google.com designates 209.85.220.65 as permitted sender) smtp.mailfrom=andreyknvl@google.com; dmarc=pass (p=REJECT sp=REJECT dis=NONE) header.from=google.com Authentication-Results: mx.google.com; dkim=pass header.i=@google.com header.s=20161025 header.b=GKKH9q0w; spf=pass (google.com: domain of andreyknvl@google.com designates 209.85.220.65 as permitted sender) smtp.mailfrom=andreyknvl@google.com; dmarc=pass (p=REJECT sp=REJECT dis=NONE) header.from=google.com X-Google-Smtp-Source: AIpwx48yN8GFzwIz4j63F2qBJQBS5Dp7Q1wykVTLTUt+jEi7b6B7RQ1qMUwQ6Dn3kDtkPo35sCzx4w== From: Andrey Konovalov To: Andrey Ryabinin , Alexander Potapenko , Dmitry Vyukov , Jonathan Corbet , Catalin Marinas , Will Deacon , Marc Zyngier , Christopher Li , Christoph Lameter , Pekka Enberg , David Rientjes , Joonsoo Kim , Andrew Morton , Masahiro Yamada , Michal Marek , "GitAuthor : Andrey Konovalov" , Mark Rutland , Ard Biesheuvel , Yury Norov , Nick Desaulniers , Suzuki K Poulose , Kristina Martsenko , Punit Agrawal , Dave Martin , Michael Weiser , James Morse , Julien Thierry , Steve Capper , Tyler Baicar , "Eric W . Biederman" , Thomas Gleixner , Ingo Molnar , Paul Lawrence , Greg Kroah-Hartman , David Woodhouse , Sandipan Das , Kees Cook , Herbert Xu , Geert Uytterhoeven , Josh Poimboeuf , Arnd Bergmann , kasan-dev@googlegroups.com, linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org, kvmarm@lists.cs.columbia.edu, linux-sparse@vger.kernel.org, linux-mm@kvack.org, linux-kbuild@vger.kernel.org Cc: Kostya Serebryany , Evgeniy Stepanov , Lee Smith , Ramana Radhakrishnan , Jacob Bramley , Ruben Ayrapetyan , Kees Cook , Jann Horn , Mark Brand Subject: [RFC PATCH v3 13/15] khwasan, arm64: add brk handler for inline instrumentation Date: Fri, 20 Apr 2018 16:46:51 +0200 Message-Id: X-Mailer: git-send-email 2.17.0.484.g0c8726318c-goog In-Reply-To: References: In-Reply-To: References: X-getmail-retrieved-from-mailbox: INBOX X-GMAIL-THRID: =?utf-8?q?1598276917662411482?= X-GMAIL-MSGID: =?utf-8?q?1598276917662411482?= X-Mailing-List: linux-kernel@vger.kernel.org List-ID: KHWASAN inline instrumentation mode (which embeds checks of shadow memory into the generated code, instead of inserting a callback) generates a brk instruction when a tag mismatch is detected. This commit add a KHWASAN brk handler, that decodes the immediate value passed to the brk instructions (to extract information about the memory access that triggered the mismatch), reads the register values (x0 contains the guilty address) and reports the bug. Signed-off-by: Andrey Konovalov --- arch/arm64/include/asm/brk-imm.h | 2 + arch/arm64/kernel/traps.c | 69 +++++++++++++++++++++++++++++++- 2 files changed, 69 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h index ed693c5bcec0..e4a7013321dc 100644 --- a/arch/arm64/include/asm/brk-imm.h +++ b/arch/arm64/include/asm/brk-imm.h @@ -16,10 +16,12 @@ * 0x400: for dynamic BRK instruction * 0x401: for compile time BRK instruction * 0x800: kernel-mode BUG() and WARN() traps + * 0x9xx: KHWASAN trap (allowed values 0x900 - 0x9ff) */ #define FAULT_BRK_IMM 0x100 #define KGDB_DYN_DBG_BRK_IMM 0x400 #define KGDB_COMPILED_DBG_BRK_IMM 0x401 #define BUG_BRK_IMM 0x800 +#define KHWASAN_BRK_IMM 0x900 #endif diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index ba964da31a25..b25effc7972a 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -269,10 +270,14 @@ void arm64_notify_die(const char *str, struct pt_regs *regs, } } -void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) +void __arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) { regs->pc += size; +} +void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) +{ + __arm64_skip_faulting_instruction(regs, size); /* * If we were single stepping, we want to get the step exception after * we return from the trap. @@ -789,7 +794,7 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr) } /* If thread survives, skip over the BUG instruction and continue: */ - arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); + __arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); return DBG_HOOK_HANDLED; } @@ -799,6 +804,59 @@ static struct break_hook bug_break_hook = { .fn = bug_handler, }; +#ifdef CONFIG_KASAN_HW + +#define KHWASAN_ESR_RECOVER 0x20 +#define KHWASAN_ESR_WRITE 0x10 +#define KHWASAN_ESR_SIZE_MASK 0x0f +#define KHWASAN_ESR_SIZE(esr) (1 << ((esr) & KHWASAN_ESR_SIZE_MASK)) + +static int khwasan_handler(struct pt_regs *regs, unsigned int esr) +{ + bool recover = esr & KHWASAN_ESR_RECOVER; + bool write = esr & KHWASAN_ESR_WRITE; + size_t size = KHWASAN_ESR_SIZE(esr); + u64 addr = regs->regs[0]; + u64 pc = regs->pc; + + if (user_mode(regs)) + return DBG_HOOK_ERROR; + + kasan_report(addr, size, write, pc); + + /* + * The instrumentation allows to control whether we can proceed after + * a crash was detected. This is done by passing the -recover flag to + * the compiler. Disabling recovery allows to generate more compact + * code. + * + * Unfortunately disabling recovery doesn't work for the kernel right + * now. KHWASAN reporting is disabled in some contexts (for example when + * the allocator accesses slab object metadata; same is true for KASAN; + * this is controlled by current->kasan_depth). All these accesses are + * detected by the tool, even though the reports for them are not + * printed. + * + * This is something that might be fixed at some point in the future. + */ + if (!recover) + die("Oops - KHWASAN", regs, 0); + + /* If thread survives, skip over the brk instruction and continue: */ + __arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); + return DBG_HOOK_HANDLED; +} + +#define KHWASAN_ESR_VAL (0xf2000000 | KHWASAN_BRK_IMM) +#define KHWASAN_ESR_MASK 0xffffff00 + +static struct break_hook khwasan_break_hook = { + .esr_val = KHWASAN_ESR_VAL, + .esr_mask = KHWASAN_ESR_MASK, + .fn = khwasan_handler, +}; +#endif + /* * Initial handler for AArch64 BRK exceptions * This handler only used until debug_traps_init(). @@ -806,6 +864,10 @@ static struct break_hook bug_break_hook = { int __init early_brk64(unsigned long addr, unsigned int esr, struct pt_regs *regs) { +#ifdef CONFIG_KASAN_HW + if ((esr & KHWASAN_ESR_MASK) == KHWASAN_ESR_VAL) + return khwasan_handler(regs, esr) != DBG_HOOK_HANDLED; +#endif return bug_handler(regs, esr) != DBG_HOOK_HANDLED; } @@ -813,4 +875,7 @@ int __init early_brk64(unsigned long addr, unsigned int esr, void __init trap_init(void) { register_break_hook(&bug_break_hook); +#ifdef CONFIG_KASAN_HW + register_break_hook(&khwasan_break_hook); +#endif } -- 2.17.0.484.g0c8726318c-goog