LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH v2 0/3] Bitops instrumentation for KASAN
@ 2019-05-29 14:14 Marco Elver
  2019-05-29 14:14 ` [PATCH v2 1/3] lib/test_kasan: Add bitops tests Marco Elver
                   ` (3 more replies)
  0 siblings, 4 replies; 12+ messages in thread
From: Marco Elver @ 2019-05-29 14:14 UTC (permalink / raw)
  To: peterz, aryabinin, dvyukov, glider, andreyknvl, mark.rutland
  Cc: corbet, tglx, mingo, bp, hpa, x86, arnd, jpoimboe, linux-doc,
	linux-kernel, linux-arch, kasan-dev, Marco Elver

The previous version of this patch series and discussion can be found
here:  https://lkml.org/lkml/2019/5/28/769

The most significant change is the change of the instrumented access
size to cover the entire word of a bit.

Marco Elver (3):
  lib/test_kasan: Add bitops tests
  x86: Move CPU feature test out of uaccess region
  asm-generic, x86: Add bitops instrumentation for KASAN

 Documentation/core-api/kernel-api.rst     |   2 +-
 arch/x86/ia32/ia32_signal.c               |   9 +-
 arch/x86/include/asm/bitops.h             | 210 ++++----------
 include/asm-generic/bitops-instrumented.h | 317 ++++++++++++++++++++++
 lib/test_kasan.c                          |  75 ++++-
 5 files changed, 450 insertions(+), 163 deletions(-)
 create mode 100644 include/asm-generic/bitops-instrumented.h

-- 
2.22.0.rc1.257.g3120a18244-goog


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 1/3] lib/test_kasan: Add bitops tests
  2019-05-29 14:14 [PATCH v2 0/3] Bitops instrumentation for KASAN Marco Elver
@ 2019-05-29 14:14 ` Marco Elver
  2019-05-29 15:15   ` Mark Rutland
  2019-05-29 14:15 ` [PATCH 2/3] x86: Move CPU feature test out of uaccess region Marco Elver
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 12+ messages in thread
From: Marco Elver @ 2019-05-29 14:14 UTC (permalink / raw)
  To: peterz, aryabinin, dvyukov, glider, andreyknvl, mark.rutland
  Cc: corbet, tglx, mingo, bp, hpa, x86, arnd, jpoimboe, linux-doc,
	linux-kernel, linux-arch, kasan-dev, Marco Elver

This adds bitops tests to the test_kasan module. In a follow-up patch,
support for bitops instrumentation will be added.

Signed-off-by: Marco Elver <elver@google.com>
---
Changes in v2:
* Use BITS_PER_LONG.
* Use heap allocated memory for test, as newer compilers (correctly)
  warn on OOB stack access.
---
 lib/test_kasan.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 72 insertions(+), 3 deletions(-)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 7de2702621dc..6562df0ca30d 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -11,16 +11,17 @@
 
 #define pr_fmt(fmt) "kasan test: %s " fmt, __func__
 
+#include <linux/bitops.h>
 #include <linux/delay.h>
+#include <linux/kasan.h>
 #include <linux/kernel.h>
-#include <linux/mman.h>
 #include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
 #include <linux/printk.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/kasan.h>
 
 /*
  * Note: test functions are marked noinline so that their names appear in
@@ -623,6 +624,73 @@ static noinline void __init kasan_strings(void)
 	strnlen(ptr, 1);
 }
 
+static noinline void __init kasan_bitops(void)
+{
+	long *bits = kmalloc(sizeof(long), GFP_KERNEL | __GFP_ZERO);
+	if (!bits)
+		return;
+
+	pr_info("within-bounds in set_bit");
+	set_bit(0, bits);
+
+	pr_info("within-bounds in set_bit");
+	set_bit(BITS_PER_LONG - 1, bits);
+
+	pr_info("out-of-bounds in set_bit\n");
+	set_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in __set_bit\n");
+	__set_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in clear_bit\n");
+	clear_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in __clear_bit\n");
+	__clear_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in clear_bit_unlock\n");
+	clear_bit_unlock(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in __clear_bit_unlock\n");
+	__clear_bit_unlock(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in change_bit\n");
+	change_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in __change_bit\n");
+	__change_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in test_and_set_bit\n");
+	test_and_set_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in __test_and_set_bit\n");
+	__test_and_set_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in test_and_set_bit_lock\n");
+	test_and_set_bit_lock(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in test_and_clear_bit\n");
+	test_and_clear_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in __test_and_clear_bit\n");
+	__test_and_clear_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in test_and_change_bit\n");
+	test_and_change_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in __test_and_change_bit\n");
+	__test_and_change_bit(BITS_PER_LONG, bits);
+
+	pr_info("out-of-bounds in test_bit\n");
+	(void)test_bit(BITS_PER_LONG, bits);
+
+#if defined(clear_bit_unlock_is_negative_byte)
+	pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
+	clear_bit_unlock_is_negative_byte(BITS_PER_LONG, bits);
+#endif
+	kfree(bits);
+}
+
 static int __init kmalloc_tests_init(void)
 {
 	/*
@@ -664,6 +732,7 @@ static int __init kmalloc_tests_init(void)
 	kasan_memchr();
 	kasan_memcmp();
 	kasan_strings();
+	kasan_bitops();
 
 	kasan_restore_multi_shot(multishot);
 
-- 
2.22.0.rc1.257.g3120a18244-goog


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 2/3] x86: Move CPU feature test out of uaccess region
  2019-05-29 14:14 [PATCH v2 0/3] Bitops instrumentation for KASAN Marco Elver
  2019-05-29 14:14 ` [PATCH v2 1/3] lib/test_kasan: Add bitops tests Marco Elver
@ 2019-05-29 14:15 ` Marco Elver
  2019-05-29 14:29   ` hpa
  2019-05-29 14:15 ` [PATCH v2 3/3] asm-generic, x86: Add bitops instrumentation for KASAN Marco Elver
  2019-05-31 15:12 ` [PATCH v2 0/3] Bitops " Marco Elver
  3 siblings, 1 reply; 12+ messages in thread
From: Marco Elver @ 2019-05-29 14:15 UTC (permalink / raw)
  To: peterz, aryabinin, dvyukov, glider, andreyknvl, mark.rutland
  Cc: corbet, tglx, mingo, bp, hpa, x86, arnd, jpoimboe, linux-doc,
	linux-kernel, linux-arch, kasan-dev, Marco Elver

This patch is a pre-requisite for enabling KASAN bitops instrumentation:
moves boot_cpu_has feature test out of the uaccess region, as
boot_cpu_has uses test_bit. With instrumentation, the KASAN check would
otherwise be flagged by objtool.

This approach is preferred over adding the explicit kasan_check_*
functions to the uaccess whitelist of objtool, as the case here appears
to be the only one.

Signed-off-by: Marco Elver <elver@google.com>
---
v1:
* This patch replaces patch: 'tools/objtool: add kasan_check_* to
  uaccess whitelist'
---
 arch/x86/ia32/ia32_signal.c | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 629d1ee05599..12264e3c9c43 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -333,6 +333,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
 	void __user *restorer;
 	int err = 0;
 	void __user *fpstate = NULL;
+	bool has_xsave;
 
 	/* __copy_to_user optimizes that into a single 8 byte store */
 	static const struct {
@@ -352,13 +353,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
 	if (!access_ok(frame, sizeof(*frame)))
 		return -EFAULT;
 
+	/*
+	 * Move non-uaccess accesses out of uaccess region if not strictly
+	 * required; this also helps avoid objtool flagging these accesses with
+	 * instrumentation enabled.
+	 */
+	has_xsave = boot_cpu_has(X86_FEATURE_XSAVE);
 	put_user_try {
 		put_user_ex(sig, &frame->sig);
 		put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
 		put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
 
 		/* Create the ucontext.  */
-		if (boot_cpu_has(X86_FEATURE_XSAVE))
+		if (has_xsave)
 			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
 		else
 			put_user_ex(0, &frame->uc.uc_flags);
-- 
2.22.0.rc1.257.g3120a18244-goog


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 3/3] asm-generic, x86: Add bitops instrumentation for KASAN
  2019-05-29 14:14 [PATCH v2 0/3] Bitops instrumentation for KASAN Marco Elver
  2019-05-29 14:14 ` [PATCH v2 1/3] lib/test_kasan: Add bitops tests Marco Elver
  2019-05-29 14:15 ` [PATCH 2/3] x86: Move CPU feature test out of uaccess region Marco Elver
@ 2019-05-29 14:15 ` Marco Elver
  2019-05-29 15:32   ` Mark Rutland
  2019-05-31 15:12 ` [PATCH v2 0/3] Bitops " Marco Elver
  3 siblings, 1 reply; 12+ messages in thread
From: Marco Elver @ 2019-05-29 14:15 UTC (permalink / raw)
  To: peterz, aryabinin, dvyukov, glider, andreyknvl, mark.rutland
  Cc: corbet, tglx, mingo, bp, hpa, x86, arnd, jpoimboe, linux-doc,
	linux-kernel, linux-arch, kasan-dev, Marco Elver

This adds a new header to asm-generic to allow optionally instrumenting
architecture-specific asm implementations of bitops.

This change includes the required change for x86 as reference and
changes the kernel API doc to point to bitops-instrumented.h instead.
Rationale: the functions in x86's bitops.h are no longer the kernel API
functions, but instead the arch_ prefixed functions, which are then
instrumented via bitops-instrumented.h.

Other architectures can similarly add support for asm implementations of
bitops.

The documentation text has been copied/moved, and *no* changes to it
have been made in this patch.

Tested: using lib/test_kasan with bitops tests (pre-requisite patch).

Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=198439
Signed-off-by: Marco Elver <elver@google.com>
---
Changes in v2:
* Instrument word-sized accesses, as specified by the interface.
---
 Documentation/core-api/kernel-api.rst     |   2 +-
 arch/x86/include/asm/bitops.h             | 210 ++++----------
 include/asm-generic/bitops-instrumented.h | 317 ++++++++++++++++++++++
 3 files changed, 370 insertions(+), 159 deletions(-)
 create mode 100644 include/asm-generic/bitops-instrumented.h

diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index a29c99d13331..65266fa1b706 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -51,7 +51,7 @@ The Linux kernel provides more basic utility functions.
 Bit Operations
 --------------
 
-.. kernel-doc:: arch/x86/include/asm/bitops.h
+.. kernel-doc:: include/asm-generic/bitops-instrumented.h
    :internal:
 
 Bitmap Operations
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 8e790ec219a5..8ebf7af9a0f4 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -49,23 +49,8 @@
 #define CONST_MASK_ADDR(nr, addr)	WBYTE_ADDR((void *)(addr) + ((nr)>>3))
 #define CONST_MASK(nr)			(1 << ((nr) & 7))
 
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered.  See __set_bit()
- * if you do not require the atomic guarantees.
- *
- * Note: there are no guarantees that this function will not be reordered
- * on non x86 architectures, so if you are writing portable code,
- * make sure not to rely on its reordering guarantees.
- *
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
 static __always_inline void
-set_bit(long nr, volatile unsigned long *addr)
+arch_set_bit(long nr, volatile unsigned long *addr)
 {
 	if (IS_IMMEDIATE(nr)) {
 		asm volatile(LOCK_PREFIX "orb %1,%0"
@@ -77,33 +62,17 @@ set_bit(long nr, volatile unsigned long *addr)
 			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
 	}
 }
+#define arch_set_bit arch_set_bit
 
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch___set_bit(long nr, volatile unsigned long *addr)
 {
 	asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
+#define arch___set_bit arch___set_bit
 
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered.  However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
- * in order to ensure changes are visible on other processors.
- */
 static __always_inline void
-clear_bit(long nr, volatile unsigned long *addr)
+arch_clear_bit(long nr, volatile unsigned long *addr)
 {
 	if (IS_IMMEDIATE(nr)) {
 		asm volatile(LOCK_PREFIX "andb %1,%0"
@@ -114,27 +83,25 @@ clear_bit(long nr, volatile unsigned long *addr)
 			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
 	}
 }
+#define arch_clear_bit arch_clear_bit
 
-/*
- * clear_bit_unlock - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and implies release semantics before the memory
- * operation. It can be used for an unlock.
- */
-static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
 {
 	barrier();
-	clear_bit(nr, addr);
+	arch_clear_bit(nr, addr);
 }
+#define arch_clear_bit_unlock arch_clear_bit_unlock
 
-static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch___clear_bit(long nr, volatile unsigned long *addr)
 {
 	asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
+#define arch___clear_bit arch___clear_bit
 
-static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
 {
 	bool negative;
 	asm volatile(LOCK_PREFIX "andb %2,%1"
@@ -143,48 +110,25 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
 		: "ir" ((char) ~(1 << nr)) : "memory");
 	return negative;
 }
+#define arch_clear_bit_unlock_is_negative_byte                                 \
+	arch_clear_bit_unlock_is_negative_byte
 
-// Let everybody know we have it
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
-
-/*
- * __clear_bit_unlock - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * __clear_bit() is non-atomic and implies release semantics before the memory
- * operation. It can be used for an unlock if no other CPUs can concurrently
- * modify other bits in the word.
- */
-static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
 {
-	__clear_bit(nr, addr);
+	arch___clear_bit(nr, addr);
 }
+#define arch___clear_bit_unlock arch___clear_bit_unlock
 
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch___change_bit(long nr, volatile unsigned long *addr)
 {
 	asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
+#define arch___change_bit arch___change_bit
 
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static __always_inline void change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch_change_bit(long nr, volatile unsigned long *addr)
 {
 	if (IS_IMMEDIATE(nr)) {
 		asm volatile(LOCK_PREFIX "xorb %1,%0"
@@ -195,43 +139,24 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
 			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
 	}
 }
+#define arch_change_bit arch_change_bit
 
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch_test_and_set_bit(long nr, volatile unsigned long *addr)
 {
 	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
 }
+#define arch_test_and_set_bit arch_test_and_set_bit
 
-/**
- * test_and_set_bit_lock - Set a bit and return its old value for lock
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This is the same as test_and_set_bit on x86.
- */
 static __always_inline bool
-test_and_set_bit_lock(long nr, volatile unsigned long *addr)
+arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
 {
-	return test_and_set_bit(nr, addr);
+	return arch_test_and_set_bit(nr, addr);
 }
+#define arch_test_and_set_bit_lock arch_test_and_set_bit_lock
 
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail.  You must protect multiple accesses with a lock.
- */
-static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_set_bit(long nr, volatile unsigned long *addr)
 {
 	bool oldbit;
 
@@ -241,37 +166,17 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
 	    : ADDR, "Ir" (nr) : "memory");
 	return oldbit;
 }
+#define arch___test_and_set_bit arch___test_and_set_bit
 
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
 	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
 }
+#define arch_test_and_clear_bit arch_test_and_clear_bit
 
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail.  You must protect multiple accesses with a lock.
- *
- * Note: the operation is performed atomically with respect to
- * the local CPU, but not other CPUs. Portable code should not
- * rely on this behaviour.
- * KVM relies on this behaviour on x86 for modifying memory that is also
- * accessed from a hypervisor on the same CPU if running in a VM: don't change
- * this without also updating arch/x86/kernel/kvm.c
- */
-static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
 	bool oldbit;
 
@@ -281,9 +186,10 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
 		     : ADDR, "Ir" (nr) : "memory");
 	return oldbit;
 }
+#define arch___test_and_clear_bit arch___test_and_clear_bit
 
-/* WARNING: non atomic and it can be reordered! */
-static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_change_bit(long nr, volatile unsigned long *addr)
 {
 	bool oldbit;
 
@@ -294,19 +200,14 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
 
 	return oldbit;
 }
+#define arch___test_and_change_bit arch___test_and_change_bit
 
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch_test_and_change_bit(long nr, volatile unsigned long *addr)
 {
 	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
 }
+#define arch_test_and_change_bit arch_test_and_change_bit
 
 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
 {
@@ -326,16 +227,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
 	return oldbit;
 }
 
-#if 0 /* Fool kernel-doc since it doesn't do macros yet */
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static bool test_bit(int nr, const volatile unsigned long *addr);
-#endif
-
-#define test_bit(nr, addr)			\
+#define arch_test_bit(nr, addr)			\
 	(__builtin_constant_p((nr))		\
 	 ? constant_test_bit((nr), (addr))	\
 	 : variable_test_bit((nr), (addr)))
@@ -504,6 +396,8 @@ static __always_inline int fls64(__u64 x)
 
 #include <asm-generic/bitops/const_hweight.h>
 
+#include <asm-generic/bitops-instrumented.h>
+
 #include <asm-generic/bitops/le.h>
 
 #include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/include/asm-generic/bitops-instrumented.h b/include/asm-generic/bitops-instrumented.h
new file mode 100644
index 000000000000..b01b0dd93964
--- /dev/null
+++ b/include/asm-generic/bitops-instrumented.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file provides wrappers with sanitizer instrumentation for bit
+ * operations.
+ *
+ * To use this functionality, an arch's bitops.h file needs to define each of
+ * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
+ * arch___set_bit(), etc.), #define each provided arch_ function, and include
+ * this file after their definitions. For undefined arch_ functions, it is
+ * assumed that they are provided via asm-generic/bitops, which are implicitly
+ * instrumented.
+ */
+#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H
+#define _ASM_GENERIC_BITOPS_INSTRUMENTED_H
+
+#include <linux/kasan-checks.h>
+
+#if defined(arch_set_bit)
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered.  See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch_set_bit(nr, addr);
+}
+#endif
+
+#if defined(arch___set_bit)
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __set_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch___set_bit(nr, addr);
+}
+#endif
+
+#if defined(arch_clear_bit)
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered.  However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void clear_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch_clear_bit(nr, addr);
+}
+#endif
+
+#if defined(arch___clear_bit)
+/**
+ * __clear_bit - Clears a bit in memory
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * Unlike clear_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __clear_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch___clear_bit(nr, addr);
+}
+#endif
+
+#if defined(arch_clear_bit_unlock)
+/**
+ * clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit_unlock() is atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock.
+ */
+static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch_clear_bit_unlock(nr, addr);
+}
+#endif
+
+#if defined(arch___clear_bit_unlock)
+/**
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * __clear_bit_unlock() is non-atomic and implies release semantics before the
+ * memory operation. It can be used for an unlock if no other CPUs can
+ * concurrently modify other bits in the word.
+ */
+static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch___clear_bit_unlock(nr, addr);
+}
+#endif
+
+#if defined(arch_change_bit)
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch_change_bit(nr, addr);
+}
+#endif
+
+#if defined(arch___change_bit)
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __change_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch___change_bit(nr, addr);
+}
+#endif
+
+#if defined(arch_test_and_set_bit)
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch_test_and_set_bit(nr, addr);
+}
+#endif
+
+#if defined(arch___test_and_set_bit)
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch___test_and_set_bit(nr, addr);
+}
+#endif
+
+#if defined(arch_test_and_set_bit_lock)
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics if
+ * the returned value is 0.
+ * It can be used to implement bit locks.
+ */
+static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch_test_and_set_bit_lock(nr, addr);
+}
+#endif
+
+#if defined(arch_test_and_clear_bit)
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch_test_and_clear_bit(nr, addr);
+}
+#endif
+
+#if defined(arch___test_and_clear_bit)
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ *
+ * Note: the operation is performed atomically with respect to
+ * the local CPU, but not other CPUs. Portable code should not
+ * rely on this behaviour.
+ * KVM relies on this behaviour on x86 for modifying memory that is also
+ * accessed from a hypervisor on the same CPU if running in a VM: don't change
+ * this without also updating arch/x86/kernel/kvm.c
+ */
+static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch___test_and_clear_bit(nr, addr);
+}
+#endif
+
+#if defined(arch_test_and_change_bit)
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch_test_and_change_bit(nr, addr);
+}
+#endif
+
+#if defined(arch___test_and_change_bit)
+/**
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail.  You must protect multiple accesses with a lock.
+ */
+static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch___test_and_change_bit(nr, addr);
+}
+#endif
+
+#if defined(arch_test_bit)
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline bool test_bit(long nr, const volatile unsigned long *addr)
+{
+	kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
+	return arch_test_bit(nr, addr);
+}
+#endif
+
+#if defined(arch_clear_bit_unlock_is_negative_byte)
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ *                                     byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+static inline bool
+clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch_clear_bit_unlock_is_negative_byte(nr, addr);
+}
+/* Let everybody know we have it. */
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_H */
-- 
2.22.0.rc1.257.g3120a18244-goog


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3] x86: Move CPU feature test out of uaccess region
  2019-05-29 14:15 ` [PATCH 2/3] x86: Move CPU feature test out of uaccess region Marco Elver
@ 2019-05-29 14:29   ` hpa
  2019-05-31  9:57     ` Marco Elver
  0 siblings, 1 reply; 12+ messages in thread
From: hpa @ 2019-05-29 14:29 UTC (permalink / raw)
  To: Marco Elver, peterz, aryabinin, dvyukov, glider, andreyknvl,
	mark.rutland
  Cc: corbet, tglx, mingo, bp, x86, arnd, jpoimboe, linux-doc,
	linux-kernel, linux-arch, kasan-dev

On May 29, 2019 7:15:00 AM PDT, Marco Elver <elver@google.com> wrote:
>This patch is a pre-requisite for enabling KASAN bitops
>instrumentation:
>moves boot_cpu_has feature test out of the uaccess region, as
>boot_cpu_has uses test_bit. With instrumentation, the KASAN check would
>otherwise be flagged by objtool.
>
>This approach is preferred over adding the explicit kasan_check_*
>functions to the uaccess whitelist of objtool, as the case here appears
>to be the only one.
>
>Signed-off-by: Marco Elver <elver@google.com>
>---
>v1:
>* This patch replaces patch: 'tools/objtool: add kasan_check_* to
>  uaccess whitelist'
>---
> arch/x86/ia32/ia32_signal.c | 9 ++++++++-
> 1 file changed, 8 insertions(+), 1 deletion(-)
>
>diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
>index 629d1ee05599..12264e3c9c43 100644
>--- a/arch/x86/ia32/ia32_signal.c
>+++ b/arch/x86/ia32/ia32_signal.c
>@@ -333,6 +333,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal
>*ksig,
> 	void __user *restorer;
> 	int err = 0;
> 	void __user *fpstate = NULL;
>+	bool has_xsave;
> 
> 	/* __copy_to_user optimizes that into a single 8 byte store */
> 	static const struct {
>@@ -352,13 +353,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal
>*ksig,
> 	if (!access_ok(frame, sizeof(*frame)))
> 		return -EFAULT;
> 
>+	/*
>+	 * Move non-uaccess accesses out of uaccess region if not strictly
>+	 * required; this also helps avoid objtool flagging these accesses
>with
>+	 * instrumentation enabled.
>+	 */
>+	has_xsave = boot_cpu_has(X86_FEATURE_XSAVE);
> 	put_user_try {
> 		put_user_ex(sig, &frame->sig);
> 		put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
> 		put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
> 
> 		/* Create the ucontext.  */
>-		if (boot_cpu_has(X86_FEATURE_XSAVE))
>+		if (has_xsave)
> 			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
> 		else
> 			put_user_ex(0, &frame->uc.uc_flags);

This was meant to use static_cpu_has(). Why did that get dropped?
-- 
Sent from my Android device with K-9 Mail. Please excuse my brevity.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 1/3] lib/test_kasan: Add bitops tests
  2019-05-29 14:14 ` [PATCH v2 1/3] lib/test_kasan: Add bitops tests Marco Elver
@ 2019-05-29 15:15   ` Mark Rutland
  0 siblings, 0 replies; 12+ messages in thread
From: Mark Rutland @ 2019-05-29 15:15 UTC (permalink / raw)
  To: Marco Elver
  Cc: peterz, aryabinin, dvyukov, glider, andreyknvl, corbet, tglx,
	mingo, bp, hpa, x86, arnd, jpoimboe, linux-doc, linux-kernel,
	linux-arch, kasan-dev

On Wed, May 29, 2019 at 04:14:59PM +0200, Marco Elver wrote:
> This adds bitops tests to the test_kasan module. In a follow-up patch,
> support for bitops instrumentation will be added.
> 
> Signed-off-by: Marco Elver <elver@google.com>
> ---
> Changes in v2:
> * Use BITS_PER_LONG.
> * Use heap allocated memory for test, as newer compilers (correctly)
>   warn on OOB stack access.
> ---
>  lib/test_kasan.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 72 insertions(+), 3 deletions(-)
> 
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index 7de2702621dc..6562df0ca30d 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -11,16 +11,17 @@
>  
>  #define pr_fmt(fmt) "kasan test: %s " fmt, __func__
>  
> +#include <linux/bitops.h>
>  #include <linux/delay.h>
> +#include <linux/kasan.h>
>  #include <linux/kernel.h>
> -#include <linux/mman.h>
>  #include <linux/mm.h>
> +#include <linux/mman.h>
> +#include <linux/module.h>
>  #include <linux/printk.h>
>  #include <linux/slab.h>
>  #include <linux/string.h>
>  #include <linux/uaccess.h>
> -#include <linux/module.h>
> -#include <linux/kasan.h>
>  
>  /*
>   * Note: test functions are marked noinline so that their names appear in
> @@ -623,6 +624,73 @@ static noinline void __init kasan_strings(void)
>  	strnlen(ptr, 1);
>  }
>  
> +static noinline void __init kasan_bitops(void)
> +{
> +	long *bits = kmalloc(sizeof(long), GFP_KERNEL | __GFP_ZERO);

Trivial nit, but this can/should be:

	long *bits = kzalloc(sizeof(*bits), GFP_KERNEL);


... which is the usual style for sizeof() to keep the LHS and RHS types
the same, and using kzalloc avoids the need to explicitly pass
__GFP_ZERO.

Otherwise, this looks good to me.

> +	if (!bits)
> +		return;
> +
> +	pr_info("within-bounds in set_bit");
> +	set_bit(0, bits);
> +
> +	pr_info("within-bounds in set_bit");
> +	set_bit(BITS_PER_LONG - 1, bits);
> +
> +	pr_info("out-of-bounds in set_bit\n");
> +	set_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in __set_bit\n");
> +	__set_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in clear_bit\n");
> +	clear_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in __clear_bit\n");
> +	__clear_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in clear_bit_unlock\n");
> +	clear_bit_unlock(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in __clear_bit_unlock\n");
> +	__clear_bit_unlock(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in change_bit\n");
> +	change_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in __change_bit\n");
> +	__change_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in test_and_set_bit\n");
> +	test_and_set_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in __test_and_set_bit\n");
> +	__test_and_set_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in test_and_set_bit_lock\n");
> +	test_and_set_bit_lock(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in test_and_clear_bit\n");
> +	test_and_clear_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in __test_and_clear_bit\n");
> +	__test_and_clear_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in test_and_change_bit\n");
> +	test_and_change_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in __test_and_change_bit\n");
> +	__test_and_change_bit(BITS_PER_LONG, bits);
> +
> +	pr_info("out-of-bounds in test_bit\n");
> +	(void)test_bit(BITS_PER_LONG, bits);
> +
> +#if defined(clear_bit_unlock_is_negative_byte)
> +	pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
> +	clear_bit_unlock_is_negative_byte(BITS_PER_LONG, bits);
> +#endif
> +	kfree(bits);
> +}
> +
>  static int __init kmalloc_tests_init(void)
>  {
>  	/*
> @@ -664,6 +732,7 @@ static int __init kmalloc_tests_init(void)
>  	kasan_memchr();
>  	kasan_memcmp();
>  	kasan_strings();
> +	kasan_bitops();
>  
>  	kasan_restore_multi_shot(multishot);
>  
> -- 
> 2.22.0.rc1.257.g3120a18244-goog
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 3/3] asm-generic, x86: Add bitops instrumentation for KASAN
  2019-05-29 14:15 ` [PATCH v2 3/3] asm-generic, x86: Add bitops instrumentation for KASAN Marco Elver
@ 2019-05-29 15:32   ` Mark Rutland
  2019-05-29 15:40     ` Marco Elver
  0 siblings, 1 reply; 12+ messages in thread
From: Mark Rutland @ 2019-05-29 15:32 UTC (permalink / raw)
  To: Marco Elver, peterz
  Cc: aryabinin, dvyukov, glider, andreyknvl, corbet, tglx, mingo, bp,
	hpa, x86, arnd, jpoimboe, linux-doc, linux-kernel, linux-arch,
	kasan-dev

On Wed, May 29, 2019 at 04:15:01PM +0200, Marco Elver wrote:
> This adds a new header to asm-generic to allow optionally instrumenting
> architecture-specific asm implementations of bitops.
> 
> This change includes the required change for x86 as reference and
> changes the kernel API doc to point to bitops-instrumented.h instead.
> Rationale: the functions in x86's bitops.h are no longer the kernel API
> functions, but instead the arch_ prefixed functions, which are then
> instrumented via bitops-instrumented.h.
> 
> Other architectures can similarly add support for asm implementations of
> bitops.
> 
> The documentation text has been copied/moved, and *no* changes to it
> have been made in this patch.
> 
> Tested: using lib/test_kasan with bitops tests (pre-requisite patch).
> 
> Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=198439
> Signed-off-by: Marco Elver <elver@google.com>
> ---
> Changes in v2:
> * Instrument word-sized accesses, as specified by the interface.
> ---
>  Documentation/core-api/kernel-api.rst     |   2 +-
>  arch/x86/include/asm/bitops.h             | 210 ++++----------
>  include/asm-generic/bitops-instrumented.h | 317 ++++++++++++++++++++++
>  3 files changed, 370 insertions(+), 159 deletions(-)
>  create mode 100644 include/asm-generic/bitops-instrumented.h

[...]

> diff --git a/include/asm-generic/bitops-instrumented.h b/include/asm-generic/bitops-instrumented.h
> new file mode 100644
> index 000000000000..b01b0dd93964
> --- /dev/null
> +++ b/include/asm-generic/bitops-instrumented.h
> @@ -0,0 +1,317 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +/*
> + * This file provides wrappers with sanitizer instrumentation for bit
> + * operations.
> + *
> + * To use this functionality, an arch's bitops.h file needs to define each of
> + * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
> + * arch___set_bit(), etc.), #define each provided arch_ function, and include
> + * this file after their definitions. For undefined arch_ functions, it is
> + * assumed that they are provided via asm-generic/bitops, which are implicitly
> + * instrumented.
> + */

If using the asm-generic/bitops.h, all of the below will be defined
unconditionally, so I don't believe we need the ifdeffery for each
function.

> +#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H
> +#define _ASM_GENERIC_BITOPS_INSTRUMENTED_H
> +
> +#include <linux/kasan-checks.h>
> +
> +#if defined(arch_set_bit)
> +/**
> + * set_bit - Atomically set a bit in memory
> + * @nr: the bit to set
> + * @addr: the address to start counting from
> + *
> + * This function is atomic and may not be reordered.  See __set_bit()
> + * if you do not require the atomic guarantees.
> + *
> + * Note: there are no guarantees that this function will not be reordered
> + * on non x86 architectures, so if you are writing portable code,
> + * make sure not to rely on its reordering guarantees.

These two paragraphs are contradictory.

Since this is not under arch/x86, please fix this to describe the
generic semantics; any x86-specific behaviour should be commented under
arch/x86.

AFAICT per include/asm-generic/bitops/atomic.h, generically this
provides no ordering guarantees. So I think this can be:

/**
 * set_bit - Atomically set a bit in memory
 * @nr: the bit to set
 * @addr: the address to start counting from
 *
 * This function is atomic and may be reordered.
 *
 * Note that @nr may be almost arbitrarily large; this function is not
 * restricted to acting on a single-word quantity.
 */

... with the x86 ordering beahviour commented in x86's arch_set_bit.

Peter, do you have a better wording for the above?

[...]

> +#if defined(arch___test_and_clear_bit)
> +/**
> + * __test_and_clear_bit - Clear a bit and return its old value
> + * @nr: Bit to clear
> + * @addr: Address to count from
> + *
> + * This operation is non-atomic and can be reordered.
> + * If two examples of this operation race, one can appear to succeed
> + * but actually fail.  You must protect multiple accesses with a lock.
> + *
> + * Note: the operation is performed atomically with respect to
> + * the local CPU, but not other CPUs. Portable code should not
> + * rely on this behaviour.
> + * KVM relies on this behaviour on x86 for modifying memory that is also
> + * accessed from a hypervisor on the same CPU if running in a VM: don't change
> + * this without also updating arch/x86/kernel/kvm.c
> + */

Likewise, please only specify the generic semantics in this header, and
leave the x86-specific behaviour commented under arch/x86.

Otherwise this looks sound to me.

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 3/3] asm-generic, x86: Add bitops instrumentation for KASAN
  2019-05-29 15:32   ` Mark Rutland
@ 2019-05-29 15:40     ` Marco Elver
  0 siblings, 0 replies; 12+ messages in thread
From: Marco Elver @ 2019-05-29 15:40 UTC (permalink / raw)
  To: Mark Rutland
  Cc: peterz, Andrey Ryabinin, Dmitry Vyukov, Alexander Potapenko,
	Andrey Konovalov, corbet, tglx, mingo, bp, hpa, x86, arnd,
	jpoimboe, linux-doc, LKML, linux-arch, kasan-dev

On Wed, 29 May 2019 at 17:33, Mark Rutland <mark.rutland@arm.com> wrote:
>
> On Wed, May 29, 2019 at 04:15:01PM +0200, Marco Elver wrote:
> > This adds a new header to asm-generic to allow optionally instrumenting
> > architecture-specific asm implementations of bitops.
> >
> > This change includes the required change for x86 as reference and
> > changes the kernel API doc to point to bitops-instrumented.h instead.
> > Rationale: the functions in x86's bitops.h are no longer the kernel API
> > functions, but instead the arch_ prefixed functions, which are then
> > instrumented via bitops-instrumented.h.
> >
> > Other architectures can similarly add support for asm implementations of
> > bitops.
> >
> > The documentation text has been copied/moved, and *no* changes to it
> > have been made in this patch.
> >
> > Tested: using lib/test_kasan with bitops tests (pre-requisite patch).
> >
> > Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=198439
> > Signed-off-by: Marco Elver <elver@google.com>
> > ---
> > Changes in v2:
> > * Instrument word-sized accesses, as specified by the interface.
> > ---
> >  Documentation/core-api/kernel-api.rst     |   2 +-
> >  arch/x86/include/asm/bitops.h             | 210 ++++----------
> >  include/asm-generic/bitops-instrumented.h | 317 ++++++++++++++++++++++
> >  3 files changed, 370 insertions(+), 159 deletions(-)
> >  create mode 100644 include/asm-generic/bitops-instrumented.h
>
> [...]
>
> > diff --git a/include/asm-generic/bitops-instrumented.h b/include/asm-generic/bitops-instrumented.h
> > new file mode 100644
> > index 000000000000..b01b0dd93964
> > --- /dev/null
> > +++ b/include/asm-generic/bitops-instrumented.h
> > @@ -0,0 +1,317 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +
> > +/*
> > + * This file provides wrappers with sanitizer instrumentation for bit
> > + * operations.
> > + *
> > + * To use this functionality, an arch's bitops.h file needs to define each of
> > + * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
> > + * arch___set_bit(), etc.), #define each provided arch_ function, and include
> > + * this file after their definitions. For undefined arch_ functions, it is
> > + * assumed that they are provided via asm-generic/bitops, which are implicitly
> > + * instrumented.
> > + */
>
> If using the asm-generic/bitops.h, all of the below will be defined
> unconditionally, so I don't believe we need the ifdeffery for each
> function.
>
> > +#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H
> > +#define _ASM_GENERIC_BITOPS_INSTRUMENTED_H
> > +
> > +#include <linux/kasan-checks.h>
> > +
> > +#if defined(arch_set_bit)
> > +/**
> > + * set_bit - Atomically set a bit in memory
> > + * @nr: the bit to set
> > + * @addr: the address to start counting from
> > + *
> > + * This function is atomic and may not be reordered.  See __set_bit()
> > + * if you do not require the atomic guarantees.
> > + *
> > + * Note: there are no guarantees that this function will not be reordered
> > + * on non x86 architectures, so if you are writing portable code,
> > + * make sure not to rely on its reordering guarantees.
>
> These two paragraphs are contradictory.
>
> Since this is not under arch/x86, please fix this to describe the
> generic semantics; any x86-specific behaviour should be commented under
> arch/x86.
>
> AFAICT per include/asm-generic/bitops/atomic.h, generically this
> provides no ordering guarantees. So I think this can be:
>
> /**
>  * set_bit - Atomically set a bit in memory
>  * @nr: the bit to set
>  * @addr: the address to start counting from
>  *
>  * This function is atomic and may be reordered.
>  *
>  * Note that @nr may be almost arbitrarily large; this function is not
>  * restricted to acting on a single-word quantity.
>  */
>
> ... with the x86 ordering beahviour commented in x86's arch_set_bit.
>
> Peter, do you have a better wording for the above?
>
> [...]
>
> > +#if defined(arch___test_and_clear_bit)
> > +/**
> > + * __test_and_clear_bit - Clear a bit and return its old value
> > + * @nr: Bit to clear
> > + * @addr: Address to count from
> > + *
> > + * This operation is non-atomic and can be reordered.
> > + * If two examples of this operation race, one can appear to succeed
> > + * but actually fail.  You must protect multiple accesses with a lock.
> > + *
> > + * Note: the operation is performed atomically with respect to
> > + * the local CPU, but not other CPUs. Portable code should not
> > + * rely on this behaviour.
> > + * KVM relies on this behaviour on x86 for modifying memory that is also
> > + * accessed from a hypervisor on the same CPU if running in a VM: don't change
> > + * this without also updating arch/x86/kernel/kvm.c
> > + */
>
> Likewise, please only specify the generic semantics in this header, and
> leave the x86-specific behaviour commented under arch/x86.

The current official API documentation refers to x86 bitops.h (also
see the Documentation/core-api/kernel-api.rst change):
https://www.kernel.org/doc/htmldocs/kernel-api/API-set-bit.html

I'm happy to change in this patch, but note that this would change the
official API documentation.  Alternatively it could be done in a
separate patch.

Let me know what you prefer.

Thanks,
-- Marco

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3] x86: Move CPU feature test out of uaccess region
  2019-05-29 14:29   ` hpa
@ 2019-05-31  9:57     ` Marco Elver
  2019-05-31 23:41       ` hpa
  0 siblings, 1 reply; 12+ messages in thread
From: Marco Elver @ 2019-05-31  9:57 UTC (permalink / raw)
  To: H. Peter Anvin
  Cc: Peter Zijlstra, Andrey Ryabinin, Dmitry Vyukov,
	Alexander Potapenko, Andrey Konovalov, Mark Rutland,
	Jonathan Corbet, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
	the arch/x86 maintainers, Arnd Bergmann, Josh Poimboeuf,
	open list:DOCUMENTATION, LKML, linux-arch, kasan-dev

On Wed, 29 May 2019 at 16:29, <hpa@zytor.com> wrote:
>
> On May 29, 2019 7:15:00 AM PDT, Marco Elver <elver@google.com> wrote:
> >This patch is a pre-requisite for enabling KASAN bitops
> >instrumentation:
> >moves boot_cpu_has feature test out of the uaccess region, as
> >boot_cpu_has uses test_bit. With instrumentation, the KASAN check would
> >otherwise be flagged by objtool.
> >
> >This approach is preferred over adding the explicit kasan_check_*
> >functions to the uaccess whitelist of objtool, as the case here appears
> >to be the only one.
> >
> >Signed-off-by: Marco Elver <elver@google.com>
> >---
> >v1:
> >* This patch replaces patch: 'tools/objtool: add kasan_check_* to
> >  uaccess whitelist'
> >---
> > arch/x86/ia32/ia32_signal.c | 9 ++++++++-
> > 1 file changed, 8 insertions(+), 1 deletion(-)
> >
> >diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
> >index 629d1ee05599..12264e3c9c43 100644
> >--- a/arch/x86/ia32/ia32_signal.c
> >+++ b/arch/x86/ia32/ia32_signal.c
> >@@ -333,6 +333,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal
> >*ksig,
> >       void __user *restorer;
> >       int err = 0;
> >       void __user *fpstate = NULL;
> >+      bool has_xsave;
> >
> >       /* __copy_to_user optimizes that into a single 8 byte store */
> >       static const struct {
> >@@ -352,13 +353,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal
> >*ksig,
> >       if (!access_ok(frame, sizeof(*frame)))
> >               return -EFAULT;
> >
> >+      /*
> >+       * Move non-uaccess accesses out of uaccess region if not strictly
> >+       * required; this also helps avoid objtool flagging these accesses
> >with
> >+       * instrumentation enabled.
> >+       */
> >+      has_xsave = boot_cpu_has(X86_FEATURE_XSAVE);
> >       put_user_try {
> >               put_user_ex(sig, &frame->sig);
> >               put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
> >               put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
> >
> >               /* Create the ucontext.  */
> >-              if (boot_cpu_has(X86_FEATURE_XSAVE))
> >+              if (has_xsave)
> >                       put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
> >               else
> >                       put_user_ex(0, &frame->uc.uc_flags);
>
> This was meant to use static_cpu_has(). Why did that get dropped?

I couldn't find any mailing list thread referring to why this doesn't
use static_cpu_has, do you have any background?

static_cpu_has also solves the UACCESS warning.

If you confirm it is safe to change to static_cpu_has(), I will change
this patch. Note that I should then also change
arch/x86/kernel/signal.c to mirror the change for 32bit  (although
KASAN is not supported for 32bit x86).

Thanks,
-- Marco

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 0/3] Bitops instrumentation for KASAN
  2019-05-29 14:14 [PATCH v2 0/3] Bitops instrumentation for KASAN Marco Elver
                   ` (2 preceding siblings ...)
  2019-05-29 14:15 ` [PATCH v2 3/3] asm-generic, x86: Add bitops instrumentation for KASAN Marco Elver
@ 2019-05-31 15:12 ` Marco Elver
  3 siblings, 0 replies; 12+ messages in thread
From: Marco Elver @ 2019-05-31 15:12 UTC (permalink / raw)
  To: Peter Zijlstra, Andrey Ryabinin, Dmitry Vyukov,
	Alexander Potapenko, Andrey Konovalov, Mark Rutland
  Cc: Jonathan Corbet, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
	H. Peter Anvin, the arch/x86 maintainers, Arnd Bergmann,
	Josh Poimboeuf, open list:DOCUMENTATION, LKML, linux-arch,
	kasan-dev

Addressed comments, and sent v3:
http://lkml.kernel.org/r/20190531150828.157832-1-elver@google.com

Many thanks!

-- Marco

On Wed, 29 May 2019 at 16:23, Marco Elver <elver@google.com> wrote:
>
> The previous version of this patch series and discussion can be found
> here:  https://lkml.org/lkml/2019/5/28/769
>
> The most significant change is the change of the instrumented access
> size to cover the entire word of a bit.
>
> Marco Elver (3):
>   lib/test_kasan: Add bitops tests
>   x86: Move CPU feature test out of uaccess region
>   asm-generic, x86: Add bitops instrumentation for KASAN
>
>  Documentation/core-api/kernel-api.rst     |   2 +-
>  arch/x86/ia32/ia32_signal.c               |   9 +-
>  arch/x86/include/asm/bitops.h             | 210 ++++----------
>  include/asm-generic/bitops-instrumented.h | 317 ++++++++++++++++++++++
>  lib/test_kasan.c                          |  75 ++++-
>  5 files changed, 450 insertions(+), 163 deletions(-)
>  create mode 100644 include/asm-generic/bitops-instrumented.h
>
> --
> 2.22.0.rc1.257.g3120a18244-goog
>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3] x86: Move CPU feature test out of uaccess region
  2019-05-31  9:57     ` Marco Elver
@ 2019-05-31 23:41       ` hpa
  2019-06-03  9:03         ` Marco Elver
  0 siblings, 1 reply; 12+ messages in thread
From: hpa @ 2019-05-31 23:41 UTC (permalink / raw)
  To: Marco Elver
  Cc: Peter Zijlstra, Andrey Ryabinin, Dmitry Vyukov,
	Alexander Potapenko, Andrey Konovalov, Mark Rutland,
	Jonathan Corbet, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
	the arch/x86 maintainers, Arnd Bergmann, Josh Poimboeuf,
	open list:DOCUMENTATION, LKML, linux-arch, kasan-dev

On May 31, 2019 2:57:36 AM PDT, Marco Elver <elver@google.com> wrote:
>On Wed, 29 May 2019 at 16:29, <hpa@zytor.com> wrote:
>>
>> On May 29, 2019 7:15:00 AM PDT, Marco Elver <elver@google.com> wrote:
>> >This patch is a pre-requisite for enabling KASAN bitops
>> >instrumentation:
>> >moves boot_cpu_has feature test out of the uaccess region, as
>> >boot_cpu_has uses test_bit. With instrumentation, the KASAN check
>would
>> >otherwise be flagged by objtool.
>> >
>> >This approach is preferred over adding the explicit kasan_check_*
>> >functions to the uaccess whitelist of objtool, as the case here
>appears
>> >to be the only one.
>> >
>> >Signed-off-by: Marco Elver <elver@google.com>
>> >---
>> >v1:
>> >* This patch replaces patch: 'tools/objtool: add kasan_check_* to
>> >  uaccess whitelist'
>> >---
>> > arch/x86/ia32/ia32_signal.c | 9 ++++++++-
>> > 1 file changed, 8 insertions(+), 1 deletion(-)
>> >
>> >diff --git a/arch/x86/ia32/ia32_signal.c
>b/arch/x86/ia32/ia32_signal.c
>> >index 629d1ee05599..12264e3c9c43 100644
>> >--- a/arch/x86/ia32/ia32_signal.c
>> >+++ b/arch/x86/ia32/ia32_signal.c
>> >@@ -333,6 +333,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal
>> >*ksig,
>> >       void __user *restorer;
>> >       int err = 0;
>> >       void __user *fpstate = NULL;
>> >+      bool has_xsave;
>> >
>> >       /* __copy_to_user optimizes that into a single 8 byte store
>*/
>> >       static const struct {
>> >@@ -352,13 +353,19 @@ int ia32_setup_rt_frame(int sig, struct
>ksignal
>> >*ksig,
>> >       if (!access_ok(frame, sizeof(*frame)))
>> >               return -EFAULT;
>> >
>> >+      /*
>> >+       * Move non-uaccess accesses out of uaccess region if not
>strictly
>> >+       * required; this also helps avoid objtool flagging these
>accesses
>> >with
>> >+       * instrumentation enabled.
>> >+       */
>> >+      has_xsave = boot_cpu_has(X86_FEATURE_XSAVE);
>> >       put_user_try {
>> >               put_user_ex(sig, &frame->sig);
>> >               put_user_ex(ptr_to_compat(&frame->info),
>&frame->pinfo);
>> >               put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
>> >
>> >               /* Create the ucontext.  */
>> >-              if (boot_cpu_has(X86_FEATURE_XSAVE))
>> >+              if (has_xsave)
>> >                       put_user_ex(UC_FP_XSTATE,
>&frame->uc.uc_flags);
>> >               else
>> >                       put_user_ex(0, &frame->uc.uc_flags);
>>
>> This was meant to use static_cpu_has(). Why did that get dropped?
>
>I couldn't find any mailing list thread referring to why this doesn't
>use static_cpu_has, do you have any background?
>
>static_cpu_has also solves the UACCESS warning.
>
>If you confirm it is safe to change to static_cpu_has(), I will change
>this patch. Note that I should then also change
>arch/x86/kernel/signal.c to mirror the change for 32bit  (although
>KASAN is not supported for 32bit x86).
>
>Thanks,
>-- Marco

I believe at some point the intent was that boot_cpu_has() was safer and could be used everywhere.
-- 
Sent from my Android device with K-9 Mail. Please excuse my brevity.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3] x86: Move CPU feature test out of uaccess region
  2019-05-31 23:41       ` hpa
@ 2019-06-03  9:03         ` Marco Elver
  0 siblings, 0 replies; 12+ messages in thread
From: Marco Elver @ 2019-06-03  9:03 UTC (permalink / raw)
  To: H. Peter Anvin
  Cc: Peter Zijlstra, Andrey Ryabinin, Dmitry Vyukov,
	Alexander Potapenko, Andrey Konovalov, Mark Rutland,
	Jonathan Corbet, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
	the arch/x86 maintainers, Arnd Bergmann, Josh Poimboeuf,
	open list:DOCUMENTATION, LKML, linux-arch, kasan-dev

Thanks for the clarification.

I found that static_cpu_has was replaced by static_cpu_has_safe:
https://lkml.org/lkml/2016/1/24/29 -- so is it fair to assume that
both are equally safe at this point?

I have sent a follow-up patch which uses static_cpu_has:
http://lkml.kernel.org/r/20190531150828.157832-3-elver@google.com

Many thanks!
-- Marco

On Sat, 1 Jun 2019 at 03:13, <hpa@zytor.com> wrote:
>
> On May 31, 2019 2:57:36 AM PDT, Marco Elver <elver@google.com> wrote:
> >On Wed, 29 May 2019 at 16:29, <hpa@zytor.com> wrote:
> >>
> >> On May 29, 2019 7:15:00 AM PDT, Marco Elver <elver@google.com> wrote:
> >> >This patch is a pre-requisite for enabling KASAN bitops
> >> >instrumentation:
> >> >moves boot_cpu_has feature test out of the uaccess region, as
> >> >boot_cpu_has uses test_bit. With instrumentation, the KASAN check
> >would
> >> >otherwise be flagged by objtool.
> >> >
> >> >This approach is preferred over adding the explicit kasan_check_*
> >> >functions to the uaccess whitelist of objtool, as the case here
> >appears
> >> >to be the only one.
> >> >
> >> >Signed-off-by: Marco Elver <elver@google.com>
> >> >---
> >> >v1:
> >> >* This patch replaces patch: 'tools/objtool: add kasan_check_* to
> >> >  uaccess whitelist'
> >> >---
> >> > arch/x86/ia32/ia32_signal.c | 9 ++++++++-
> >> > 1 file changed, 8 insertions(+), 1 deletion(-)
> >> >
> >> >diff --git a/arch/x86/ia32/ia32_signal.c
> >b/arch/x86/ia32/ia32_signal.c
> >> >index 629d1ee05599..12264e3c9c43 100644
> >> >--- a/arch/x86/ia32/ia32_signal.c
> >> >+++ b/arch/x86/ia32/ia32_signal.c
> >> >@@ -333,6 +333,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal
> >> >*ksig,
> >> >       void __user *restorer;
> >> >       int err = 0;
> >> >       void __user *fpstate = NULL;
> >> >+      bool has_xsave;
> >> >
> >> >       /* __copy_to_user optimizes that into a single 8 byte store
> >*/
> >> >       static const struct {
> >> >@@ -352,13 +353,19 @@ int ia32_setup_rt_frame(int sig, struct
> >ksignal
> >> >*ksig,
> >> >       if (!access_ok(frame, sizeof(*frame)))
> >> >               return -EFAULT;
> >> >
> >> >+      /*
> >> >+       * Move non-uaccess accesses out of uaccess region if not
> >strictly
> >> >+       * required; this also helps avoid objtool flagging these
> >accesses
> >> >with
> >> >+       * instrumentation enabled.
> >> >+       */
> >> >+      has_xsave = boot_cpu_has(X86_FEATURE_XSAVE);
> >> >       put_user_try {
> >> >               put_user_ex(sig, &frame->sig);
> >> >               put_user_ex(ptr_to_compat(&frame->info),
> >&frame->pinfo);
> >> >               put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
> >> >
> >> >               /* Create the ucontext.  */
> >> >-              if (boot_cpu_has(X86_FEATURE_XSAVE))
> >> >+              if (has_xsave)
> >> >                       put_user_ex(UC_FP_XSTATE,
> >&frame->uc.uc_flags);
> >> >               else
> >> >                       put_user_ex(0, &frame->uc.uc_flags);
> >>
> >> This was meant to use static_cpu_has(). Why did that get dropped?
> >
> >I couldn't find any mailing list thread referring to why this doesn't
> >use static_cpu_has, do you have any background?
> >
> >static_cpu_has also solves the UACCESS warning.
> >
> >If you confirm it is safe to change to static_cpu_has(), I will change
> >this patch. Note that I should then also change
> >arch/x86/kernel/signal.c to mirror the change for 32bit  (although
> >KASAN is not supported for 32bit x86).
> >
> >Thanks,
> >-- Marco
>
> I believe at some point the intent was that boot_cpu_has() was safer and could be used everywhere.
> --
> Sent from my Android device with K-9 Mail. Please excuse my brevity.

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2019-06-03  9:03 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-29 14:14 [PATCH v2 0/3] Bitops instrumentation for KASAN Marco Elver
2019-05-29 14:14 ` [PATCH v2 1/3] lib/test_kasan: Add bitops tests Marco Elver
2019-05-29 15:15   ` Mark Rutland
2019-05-29 14:15 ` [PATCH 2/3] x86: Move CPU feature test out of uaccess region Marco Elver
2019-05-29 14:29   ` hpa
2019-05-31  9:57     ` Marco Elver
2019-05-31 23:41       ` hpa
2019-06-03  9:03         ` Marco Elver
2019-05-29 14:15 ` [PATCH v2 3/3] asm-generic, x86: Add bitops instrumentation for KASAN Marco Elver
2019-05-29 15:32   ` Mark Rutland
2019-05-29 15:40     ` Marco Elver
2019-05-31 15:12 ` [PATCH v2 0/3] Bitops " Marco Elver

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).