[arch-commits] Commit in linux/trunk (0001-revert-avmfritz-breaker.patch PKGBUILD)

Tobias Powalowski tpowa at nymeria.archlinux.org
Wed Feb 12 07:36:44 UTC 2014


    Date: Wednesday, February 12, 2014 @ 08:36:43
  Author: tpowa
Revision: 205873

upgpkg: linux 3.13.2-2

revert avmfritz breaker

Added:
  linux/trunk/0001-revert-avmfritz-breaker.patch
Modified:
  linux/trunk/PKGBUILD

------------------------------------+
 0001-revert-avmfritz-breaker.patch |  417 +++++++++++++++++++++++++++++++++++
 PKGBUILD                           |    6 
 2 files changed, 422 insertions(+), 1 deletion(-)

Added: 0001-revert-avmfritz-breaker.patch
===================================================================
--- 0001-revert-avmfritz-breaker.patch	                        (rev 0)
+++ 0001-revert-avmfritz-breaker.patch	2014-02-12 07:36:43 UTC (rev 205873)
@@ -0,0 +1,417 @@
+From e0f6dec35f9286e78879fe1ac92803fd69fc4fdc Mon Sep 17 00:00:00 2001
+From: H. Peter Anvin <hpa at linux.intel.com>
+Date: Wed, 04 Dec 2013 22:31:28 +0000
+Subject: x86, bitops: Correct the assembly constraints to testing bitops
+
+In checkin:
+
+0c44c2d0f459 x86: Use asm goto to implement better modify_and_test() functions
+
+the various functions which do modify and test were unified and
+optimized using "asm goto".  However, this change missed the detail
+that the bitops require an "Ir" constraint rather than an "er"
+constraint ("I" = integer constant from 0-31, "e" = signed 32-bit
+integer constant).  This would cause code to miscompile if these
+functions were used on constant bit positions 32-255 and the build to
+fail if used on constant bit positions above 255.
+
+Add the constraints as a parameter to the GEN_BINARY_RMWcc() macro to
+avoid this problem.
+
+Reported-by: Jesse Brandeburg <jesse.brandeburg at intel.com>
+Signed-off-by: H. Peter Anvin <hpa at linux.intel.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Link: http://lkml.kernel.org/r/529E8719.4070202@zytor.com
+---
+diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
+index da31c8b..b17f4f4 100644
+--- a/arch/x86/include/asm/atomic.h
++++ b/arch/x86/include/asm/atomic.h
+@@ -77,7 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v)
+  */
+ static inline int atomic_sub_and_test(int i, atomic_t *v)
+ {
+-	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e");
++	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
+ }
+ 
+ /**
+@@ -141,7 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
+  */
+ static inline int atomic_add_negative(int i, atomic_t *v)
+ {
+-	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s");
++	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
+ }
+ 
+ /**
+diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
+index 3f065c9..46e9052 100644
+--- a/arch/x86/include/asm/atomic64_64.h
++++ b/arch/x86/include/asm/atomic64_64.h
+@@ -72,7 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
+  */
+ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+ {
+-	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e");
++	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
+ }
+ 
+ /**
+@@ -138,7 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
+  */
+ static inline int atomic64_add_negative(long i, atomic64_t *v)
+ {
+-	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s");
++	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
+ }
+ 
+ /**
+diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
+index 6d76d09..9fc1af7 100644
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
+  */
+ static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
+ {
+-	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c");
++	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
+ }
+ 
+ /**
+@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
+  */
+ static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
+ {
+-	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c");
++	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
+ }
+ 
+ /**
+@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
+  */
+ static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
+ {
+-	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c");
++	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
+ }
+ 
+ static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
+diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
+index 5b23e60..4ad6560 100644
+--- a/arch/x86/include/asm/local.h
++++ b/arch/x86/include/asm/local.h
+@@ -52,7 +52,7 @@ static inline void local_sub(long i, local_t *l)
+  */
+ static inline int local_sub_and_test(long i, local_t *l)
+ {
+-	GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e");
++	GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
+ }
+ 
+ /**
+@@ -92,7 +92,7 @@ static inline int local_inc_and_test(local_t *l)
+  */
+ static inline int local_add_negative(long i, local_t *l)
+ {
+-	GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s");
++	GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
+ }
+ 
+ /**
+diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
+index 1ff990f..8f7866a 100644
+--- a/arch/x86/include/asm/rmwcc.h
++++ b/arch/x86/include/asm/rmwcc.h
+@@ -16,8 +16,8 @@ cc_label:								\
+ #define GEN_UNARY_RMWcc(op, var, arg0, cc) 				\
+ 	__GEN_RMWcc(op " " arg0, var, cc)
+ 
+-#define GEN_BINARY_RMWcc(op, var, val, arg0, cc)			\
+-	__GEN_RMWcc(op " %1, " arg0, var, cc, "er" (val))
++#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)			\
++	__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
+ 
+ #else /* !CC_HAVE_ASM_GOTO */
+ 
+@@ -33,8 +33,8 @@ do {									\
+ #define GEN_UNARY_RMWcc(op, var, arg0, cc)				\
+ 	__GEN_RMWcc(op " " arg0, var, cc)
+ 
+-#define GEN_BINARY_RMWcc(op, var, val, arg0, cc)			\
+-	__GEN_RMWcc(op " %2, " arg0, var, cc, "er" (val))
++#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)			\
++	__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
+ 
+ #endif /* CC_HAVE_ASM_GOTO */
+ 
+--
+cgit v0.9.2
+
+From 0c44c2d0f459cd7e275242b72f500137c4fa834d Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Wed, 11 Sep 2013 13:19:24 +0000
+Subject: x86: Use asm goto to implement better modify_and_test() functions
+
+Linus suggested using asm goto to get rid of the typical SETcc + TEST
+instruction pair -- which also clobbers an extra register -- for our
+typical modify_and_test() functions.
+
+Because asm goto doesn't allow output fields it has to include an
+unconditinal memory clobber when it changes a memory variable to force
+a reload.
+
+Luckily all atomic ops already imply a compiler barrier to go along
+with their memory barrier semantics.
+
+Suggested-by: Linus Torvalds <torvalds at linux-foundation.org>
+Signed-off-by: Peter Zijlstra <peterz at infradead.org>
+Link: http://lkml.kernel.org/n/tip-0mtn9siwbeo1d33bap1422se@git.kernel.org
+Signed-off-by: Ingo Molnar <mingo at kernel.org>
+---
+diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
+index 722aa3b..da31c8b 100644
+--- a/arch/x86/include/asm/atomic.h
++++ b/arch/x86/include/asm/atomic.h
+@@ -6,6 +6,7 @@
+ #include <asm/processor.h>
+ #include <asm/alternative.h>
+ #include <asm/cmpxchg.h>
++#include <asm/rmwcc.h>
+ 
+ /*
+  * Atomic operations that C can't guarantee us.  Useful for
+@@ -76,12 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v)
+  */
+ static inline int atomic_sub_and_test(int i, atomic_t *v)
+ {
+-	unsigned char c;
+-
+-	asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
+-		     : "+m" (v->counter), "=qm" (c)
+-		     : "ir" (i) : "memory");
+-	return c;
++	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e");
+ }
+ 
+ /**
+@@ -118,12 +114,7 @@ static inline void atomic_dec(atomic_t *v)
+  */
+ static inline int atomic_dec_and_test(atomic_t *v)
+ {
+-	unsigned char c;
+-
+-	asm volatile(LOCK_PREFIX "decl %0; sete %1"
+-		     : "+m" (v->counter), "=qm" (c)
+-		     : : "memory");
+-	return c != 0;
++	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
+ }
+ 
+ /**
+@@ -136,12 +127,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
+  */
+ static inline int atomic_inc_and_test(atomic_t *v)
+ {
+-	unsigned char c;
+-
+-	asm volatile(LOCK_PREFIX "incl %0; sete %1"
+-		     : "+m" (v->counter), "=qm" (c)
+-		     : : "memory");
+-	return c != 0;
++	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
+ }
+ 
+ /**
+@@ -155,12 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
+  */
+ static inline int atomic_add_negative(int i, atomic_t *v)
+ {
+-	unsigned char c;
+-
+-	asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
+-		     : "+m" (v->counter), "=qm" (c)
+-		     : "ir" (i) : "memory");
+-	return c;
++	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s");
+ }
+ 
+ /**
+diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
+index 0e1cbfc..3f065c9 100644
+--- a/arch/x86/include/asm/atomic64_64.h
++++ b/arch/x86/include/asm/atomic64_64.h
+@@ -72,12 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
+  */
+ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+ {
+-	unsigned char c;
+-
+-	asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
+-		     : "=m" (v->counter), "=qm" (c)
+-		     : "er" (i), "m" (v->counter) : "memory");
+-	return c;
++	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e");
+ }
+ 
+ /**
+@@ -116,12 +111,7 @@ static inline void atomic64_dec(atomic64_t *v)
+  */
+ static inline int atomic64_dec_and_test(atomic64_t *v)
+ {
+-	unsigned char c;
+-
+-	asm volatile(LOCK_PREFIX "decq %0; sete %1"
+-		     : "=m" (v->counter), "=qm" (c)
+-		     : "m" (v->counter) : "memory");
+-	return c != 0;
++	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
+ }
+ 
+ /**
+@@ -134,12 +124,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
+  */
+ static inline int atomic64_inc_and_test(atomic64_t *v)
+ {
+-	unsigned char c;
+-
+-	asm volatile(LOCK_PREFIX "incq %0; sete %1"
+-		     : "=m" (v->counter), "=qm" (c)
+-		     : "m" (v->counter) : "memory");
+-	return c != 0;
++	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
+ }
+ 
+ /**
+@@ -153,12 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
+  */
+ static inline int atomic64_add_negative(long i, atomic64_t *v)
+ {
+-	unsigned char c;
+-
+-	asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
+-		     : "=m" (v->counter), "=qm" (c)
+-		     : "er" (i), "m" (v->counter) : "memory");
+-	return c;
++	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s");
+ }
+ 
+ /**
+diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
+index 41639ce..6d76d09 100644
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -14,6 +14,7 @@
+ 
+ #include <linux/compiler.h>
+ #include <asm/alternative.h>
++#include <asm/rmwcc.h>
+ 
+ #if BITS_PER_LONG == 32
+ # define _BITOPS_LONG_SHIFT 5
+@@ -204,12 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
+  */
+ static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
+ {
+-	int oldbit;
+-
+-	asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
+-		     "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
+-
+-	return oldbit;
++	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c");
+ }
+ 
+ /**
+@@ -255,13 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
+  */
+ static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
+ {
+-	int oldbit;
+-
+-	asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
+-		     "sbb %0,%0"
+-		     : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
+-
+-	return oldbit;
++	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c");
+ }
+ 
+ /**
+@@ -314,13 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
+  */
+ static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
+ {
+-	int oldbit;
+-
+-	asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
+-		     "sbb %0,%0"
+-		     : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
+-
+-	return oldbit;
++	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c");
+ }
+ 
+ static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
+diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
+index 2d89e39..5b23e60 100644
+--- a/arch/x86/include/asm/local.h
++++ b/arch/x86/include/asm/local.h
+@@ -52,12 +52,7 @@ static inline void local_sub(long i, local_t *l)
+  */
+ static inline int local_sub_and_test(long i, local_t *l)
+ {
+-	unsigned char c;
+-
+-	asm volatile(_ASM_SUB "%2,%0; sete %1"
+-		     : "+m" (l->a.counter), "=qm" (c)
+-		     : "ir" (i) : "memory");
+-	return c;
++	GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e");
+ }
+ 
+ /**
+@@ -70,12 +65,7 @@ static inline int local_sub_and_test(long i, local_t *l)
+  */
+ static inline int local_dec_and_test(local_t *l)
+ {
+-	unsigned char c;
+-
+-	asm volatile(_ASM_DEC "%0; sete %1"
+-		     : "+m" (l->a.counter), "=qm" (c)
+-		     : : "memory");
+-	return c != 0;
++	GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
+ }
+ 
+ /**
+@@ -88,12 +78,7 @@ static inline int local_dec_and_test(local_t *l)
+  */
+ static inline int local_inc_and_test(local_t *l)
+ {
+-	unsigned char c;
+-
+-	asm volatile(_ASM_INC "%0; sete %1"
+-		     : "+m" (l->a.counter), "=qm" (c)
+-		     : : "memory");
+-	return c != 0;
++	GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
+ }
+ 
+ /**
+@@ -107,12 +92,7 @@ static inline int local_inc_and_test(local_t *l)
+  */
+ static inline int local_add_negative(long i, local_t *l)
+ {
+-	unsigned char c;
+-
+-	asm volatile(_ASM_ADD "%2,%0; sets %1"
+-		     : "+m" (l->a.counter), "=qm" (c)
+-		     : "ir" (i) : "memory");
+-	return c;
++	GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s");
+ }
+ 
+ /**
+cgit v0.9.2

Modified: PKGBUILD
===================================================================
--- PKGBUILD	2014-02-12 07:31:26 UTC (rev 205872)
+++ PKGBUILD	2014-02-12 07:36:43 UTC (rev 205873)
@@ -6,7 +6,7 @@
 #pkgbase=linux-custom       # Build kernel with a different name
 _srcname=linux-3.13
 pkgver=3.13.2
-pkgrel=1
+pkgrel=2
 arch=('i686' 'x86_64')
 url="http://www.kernel.org/"
 license=('GPL2')
@@ -27,6 +27,7 @@
         '0005-sunrpc-add-an-info-file-for-the-dummy-gssd-pipe.patch'
         '0006-rpc_pipe-fix-cleanup-of-dummy-gssd-directory-when-no.patch'
         '0001-syscalls.h-use-gcc-alias-instead-of-assembler-aliase.patch'
+        '0001-revert-avmfritz-breaker.patch'
         'i8042-fix-aliases.patch'
         )
 md5sums=('0ecbaf65c00374eb4a826c2f9f37606f'
@@ -43,6 +44,7 @@
          'd5907a721b97299f0685c583499f7820'
          'a724515b350b29c53f20e631c6cf9a14'
          'e6fa278c092ad83780e2dd0568e24ca6'
+         'bc1917dd2a0f9e4f511f120c85fa0c49'
          '93dbf73af819b77f03453a9c6de2bb47')
 
 _kernelname=${pkgbase#linux}
@@ -85,6 +87,8 @@
 
   # Fix i8042 aliases
   patch -p1 -i "${srcdir}/i8042-fix-aliases.patch"
+  # Revert avmfritz breaker
+  patch -Rp1 -i "${srcdir}/0001-revert-avmfritz-breaker.patch"
 
   if [ "${CARCH}" = "x86_64" ]; then
     cat "${srcdir}/config.x86_64" > ./.config




More information about the arch-commits mailing list