[arch-commits] CVS update of extra/network/fcpcmcia (PKGBUILD string.h string64.h)

Tobias Powalowski tpowa at archlinux.org
Wed Nov 7 17:53:24 UTC 2007


    Date: Wednesday, November 7, 2007 @ 12:53:24
  Author: tpowa
    Path: /home/cvs-extra/extra/network/fcpcmcia

   Added: string.h (1.1) string64.h (1.1)
Modified: PKGBUILD (1.27 -> 1.28)

'upgpkg: fixed capi20 interface'


------------+
 PKGBUILD   |   26 ++-
 string.h   |  493 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 string64.h |   67 ++++++++
 3 files changed, 582 insertions(+), 4 deletions(-)


Index: extra/network/fcpcmcia/PKGBUILD
diff -u extra/network/fcpcmcia/PKGBUILD:1.27 extra/network/fcpcmcia/PKGBUILD:1.28
--- extra/network/fcpcmcia/PKGBUILD:1.27	Thu Oct 11 04:30:35 2007
+++ extra/network/fcpcmcia/PKGBUILD	Wed Nov  7 12:53:24 2007
@@ -1,11 +1,11 @@
-# $Id: PKGBUILD,v 1.27 2007/10/11 08:30:35 tpowa Exp $
+# $Id: PKGBUILD,v 1.28 2007/11/07 17:53:24 tpowa Exp $
 #Maintainer: Tobias Powalowski <t.powa at gmx.de>
 
 
 
 pkgname=fcpcmcia
 pkgver=31107
-pkgrel=31
+pkgrel=32
 _kernver=2.6.23-ARCH
 pkgdesc="AVM ISDN driver for FRITZ PCMCIA isdn cards. For stock arch 2.6 kernel"
 arch=(i686 x86_64)
@@ -13,9 +13,23 @@
 url="http://www.avm.de"
 depends=('capi4k-utils' 'kernel26')
 [ "$CARCH" = "i686" ] && source=(ftp://ftp.avm.de/cardware/fritzcrd.pcm/linux/suse.93/fcpcmcia-suse93-3.11-07.tar.gz \
-	pcmcia-2.6.17.patch atomic.patch kernel-2.6.19.patch kernel-2.6.20.patch kernel-2.6.22.patch)
+	pcmcia-2.6.17.patch atomic.patch kernel-2.6.19.patch kernel-2.6.20.patch kernel-2.6.22.patch string.h)
+md5sums=('96fc3e72afdc2087b4dafe41c069ec18'
+         'ec21dee3a5a09203dc55b399c29bc58c'
+         '258b13bb2ac46960b9c91635f02cd080'
+         '5c63dc9df0810bb1ba85f3425abaa0ed'
+         '890ec6e1219a3a4bae77f5815c3ed40d'
+         '4f4f03630ed994b16b4cd167f090a116'
+         '4be09efb2e81482fdc1d043ad164b2e3')
 [ "$CARCH" = "x86_64" ] && source=(ftp://ftp.avm.de/cardware/fritzcrd.pcm/linux_64bit/suse.10.0/fcpcmcia-suse10.0-64bit-3.11-07.tar.gz \
-	pcmcia-2.6.17.patch atomic.patch kernel-2.6.19.patch kernel-2.6.20.patch kernel-2.6.22.patch)
+	pcmcia-2.6.17.patch atomic.patch kernel-2.6.19.patch kernel-2.6.20.patch kernel-2.6.22.patch string64.h)
+md5sums=('4f461710cf57ebf50b3c4f2842288752'
+         'ec21dee3a5a09203dc55b399c29bc58c'
+         '258b13bb2ac46960b9c91635f02cd080'
+         '5c63dc9df0810bb1ba85f3425abaa0ed'
+         '890ec6e1219a3a4bae77f5815c3ed40d'
+         '4f4f03630ed994b16b4cd167f090a116'
+         'ff330f2d76e82b6e257d9cd76fd4808d')
 install=(fcpcmcia.install)
 options=(!makeflags)
 build() {
@@ -25,6 +39,10 @@
   patch -Np0 -i ../../kernel-2.6.19.patch || return 1
   patch -Np0 -i ../../kernel-2.6.20.patch || return 1
   patch -Np0 -i ../../kernel-2.6.22.patch || return 1
+  [ "$CARCH" = "i686" ] && cp $startdir/src/string.h .
+  [ "$CARCH" = "x86_64" ] && cp $startdir/src/string64.h string.h
+  sed -i 's#<linux/string.h>#"string.h"#' driver.c main.c tools.c fcpcmcia_cs.c
+  sed -i 's#tools.h#tools.h string.h#' Makefile
   cd $startdir/src/fritz
   make KDIR=/lib/modules/${_kernver}/build LIBDIR=$startdir/pkg/var/lib/fritz all || return 1
   mkdir -p $startdir/pkg/lib/modules/${_kernver}/kernel/drivers/isdn/hardware/avm/
Index: extra/network/fcpcmcia/string.h
diff -u /dev/null extra/network/fcpcmcia/string.h:1.1
--- /dev/null	Wed Nov  7 12:53:24 2007
+++ extra/network/fcpcmcia/string.h	Wed Nov  7 12:53:24 2007
@@ -0,0 +1,493 @@
+#ifndef _I386_STRING_H_
+#define _I386_STRING_H_
+
+#ifdef __KERNEL__
+/*
+ * On a 486 or Pentium, we are better off not using the
+ * byte string operations. But on a 386 or a PPro the
+ * byte string ops are faster than doing it by hand
+ * (MUCH faster on a Pentium).
+ */
+
+/*
+ * This string-include defines all string functions as inline
+ * functions. Use gcc. It also assumes ds=es=data space, this should be
+ * normal. Most of the string-functions are rather heavily hand-optimized,
+ * see especially strsep,strstr,str[c]spn. They should work, but are not
+ * very easy to understand. Everything is done entirely within the register
+ * set, making the functions fast and clean. String instructions have been
+ * used through-out, making for "slightly" unclear code :-)
+ *
+ *		NO Copyright (C) 1991, 1992 Linus Torvalds,
+ *		consider these trivial functions to be PD.
+ */
+
+/* AK: in fact I bet it would be better to move this stuff all out of line.
+ */
+
+#define __HAVE_ARCH_STRCPY
+static inline char * strcpy(char * dest,const char *src)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+	"1:\tlodsb\n\t"
+	"stosb\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b"
+	: "=&S" (d0), "=&D" (d1), "=&a" (d2)
+	:"0" (src),"1" (dest) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+static inline char * strncpy(char * dest,const char *src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+	"1:\tdecl %2\n\t"
+	"js 2f\n\t"
+	"lodsb\n\t"
+	"stosb\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b\n\t"
+	"rep\n\t"
+	"stosb\n"
+	"2:"
+	: "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
+	:"0" (src),"1" (dest),"2" (count) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCAT
+static inline char * strcat(char * dest,const char * src)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+	"repne\n\t"
+	"scasb\n\t"
+	"decl %1\n"
+	"1:\tlodsb\n\t"
+	"stosb\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b"
+	: "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+	: "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu):"memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCAT
+static inline char * strncat(char * dest,const char * src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+	"repne\n\t"
+	"scasb\n\t"
+	"decl %1\n\t"
+	"movl %8,%3\n"
+	"1:\tdecl %3\n\t"
+	"js 2f\n\t"
+	"lodsb\n\t"
+	"stosb\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b\n"
+	"2:\txorl %2,%2\n\t"
+	"stosb"
+	: "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+	: "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count)
+	: "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCMP
+static inline int strcmp(const char * cs,const char * ct)
+{
+int d0, d1;
+register int __res;
+__asm__ __volatile__(
+	"1:\tlodsb\n\t"
+	"scasb\n\t"
+	"jne 2f\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b\n\t"
+	"xorl %%eax,%%eax\n\t"
+	"jmp 3f\n"
+	"2:\tsbbl %%eax,%%eax\n\t"
+	"orb $1,%%al\n"
+	"3:"
+	:"=a" (__res), "=&S" (d0), "=&D" (d1)
+	:"1" (cs),"2" (ct)
+	:"memory");
+return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+static inline int strncmp(const char * cs,const char * ct,size_t count)
+{
+register int __res;
+int d0, d1, d2;
+__asm__ __volatile__(
+	"1:\tdecl %3\n\t"
+	"js 2f\n\t"
+	"lodsb\n\t"
+	"scasb\n\t"
+	"jne 3f\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b\n"
+	"2:\txorl %%eax,%%eax\n\t"
+	"jmp 4f\n"
+	"3:\tsbbl %%eax,%%eax\n\t"
+	"orb $1,%%al\n"
+	"4:"
+	:"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
+	:"1" (cs),"2" (ct),"3" (count)
+	:"memory");
+return __res;
+}
+
+#define __HAVE_ARCH_STRCHR
+static inline char * strchr(const char * s, int c)
+{
+int d0;
+register char * __res;
+__asm__ __volatile__(
+	"movb %%al,%%ah\n"
+	"1:\tlodsb\n\t"
+	"cmpb %%ah,%%al\n\t"
+	"je 2f\n\t"
+	"testb %%al,%%al\n\t"
+	"jne 1b\n\t"
+	"movl $1,%1\n"
+	"2:\tmovl %1,%0\n\t"
+	"decl %0"
+	:"=a" (__res), "=&S" (d0)
+	:"1" (s),"0" (c)
+	:"memory");
+return __res;
+}
+
+#define __HAVE_ARCH_STRRCHR
+static inline char * strrchr(const char * s, int c)
+{
+int d0, d1;
+register char * __res;
+__asm__ __volatile__(
+	"movb %%al,%%ah\n"
+	"1:\tlodsb\n\t"
+	"cmpb %%ah,%%al\n\t"
+	"jne 2f\n\t"
+	"leal -1(%%esi),%0\n"
+	"2:\ttestb %%al,%%al\n\t"
+	"jne 1b"
+	:"=g" (__res), "=&S" (d0), "=&a" (d1)
+	:"0" (0),"1" (s),"2" (c)
+	:"memory");
+return __res;
+}
+
+#define __HAVE_ARCH_STRLEN
+static inline size_t strlen(const char * s)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+	"repne\n\t"
+	"scasb\n\t"
+	"notl %0\n\t"
+	"decl %0"
+	:"=c" (__res), "=&D" (d0)
+	:"1" (s),"a" (0), "0" (0xffffffffu)
+	:"memory");
+return __res;
+}
+
+static __always_inline void * __memcpy(void * to, const void * from, size_t n)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+	"rep ; movsl\n\t"
+	"movl %4,%%ecx\n\t"
+	"andl $3,%%ecx\n\t"
+#if 1	/* want to pay 2 byte penalty for a chance to skip microcoded rep? */
+	"jz 1f\n\t"
+#endif
+	"rep ; movsb\n\t"
+	"1:"
+	: "=&c" (d0), "=&D" (d1), "=&S" (d2)
+	: "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from)
+	: "memory");
+return (to);
+}
+
+/*
+ * This looks ugly, but the compiler can optimize it totally,
+ * as the count is constant.
+ */
+static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n)
+{
+	long esi, edi;
+	if (!n) return to;
+#if 1	/* want to do small copies with non-string ops? */
+	switch (n) {
+		case 1: *(char*)to = *(char*)from; return to;
+		case 2: *(short*)to = *(short*)from; return to;
+		case 4: *(int*)to = *(int*)from; return to;
+#if 1	/* including those doable with two moves? */
+		case 3: *(short*)to = *(short*)from;
+			*((char*)to+2) = *((char*)from+2); return to;
+		case 5: *(int*)to = *(int*)from;
+			*((char*)to+4) = *((char*)from+4); return to;
+		case 6: *(int*)to = *(int*)from;
+			*((short*)to+2) = *((short*)from+2); return to;
+		case 8: *(int*)to = *(int*)from;
+			*((int*)to+1) = *((int*)from+1); return to;
+#endif
+	}
+#endif
+	esi = (long) from;
+	edi = (long) to;
+	if (n >= 5*4) {
+		/* large block: use rep prefix */
+		int ecx;
+		__asm__ __volatile__(
+			"rep ; movsl"
+			: "=&c" (ecx), "=&D" (edi), "=&S" (esi)
+			: "0" (n/4), "1" (edi),"2" (esi)
+			: "memory"
+		);
+	} else {
+		/* small block: don't clobber ecx + smaller code */
+		if (n >= 4*4) __asm__ __volatile__("movsl"
+			:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
+		if (n >= 3*4) __asm__ __volatile__("movsl"
+			:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
+		if (n >= 2*4) __asm__ __volatile__("movsl"
+			:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
+		if (n >= 1*4) __asm__ __volatile__("movsl"
+			:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
+	}
+	switch (n % 4) {
+		/* tail */
+		case 0: return to;
+		case 1: __asm__ __volatile__("movsb"
+			:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
+			return to;
+		case 2: __asm__ __volatile__("movsw"
+			:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
+			return to;
+		default: __asm__ __volatile__("movsw\n\tmovsb"
+			:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
+			return to;
+	}
+}
+
+#define __HAVE_ARCH_MEMCPY
+
+#ifdef CONFIG_X86_USE_3DNOW
+
+#include <asm/mmx.h>
+
+/*
+ *	This CPU favours 3DNow strongly (eg AMD Athlon)
+ */
+
+static inline void * __constant_memcpy3d(void * to, const void * from, size_t len)
+{
+	if (len < 512)
+		return __constant_memcpy(to, from, len);
+	return _mmx_memcpy(to, from, len);
+}
+
+static __inline__ void *__memcpy3d(void *to, const void *from, size_t len)
+{
+	if (len < 512)
+		return __memcpy(to, from, len);
+	return _mmx_memcpy(to, from, len);
+}
+
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy3d((t),(f),(n)) : \
+ __memcpy3d((t),(f),(n)))
+
+#else
+
+/*
+ *	No 3D Now!
+ */
+ 
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy((t),(f),(n)) : \
+ __memcpy((t),(f),(n)))
+
+#endif
+
+#define __HAVE_ARCH_MEMMOVE
+void *memmove(void * dest,const void * src, size_t n);
+
+#define memcmp __builtin_memcmp
+
+#define __HAVE_ARCH_MEMCHR
+static inline void * memchr(const void * cs,int c,size_t count)
+{
+int d0;
+register void * __res;
+if (!count)
+	return NULL;
+__asm__ __volatile__(
+	"repne\n\t"
+	"scasb\n\t"
+	"je 1f\n\t"
+	"movl $1,%0\n"
+	"1:\tdecl %0"
+	:"=D" (__res), "=&c" (d0)
+	:"a" (c),"0" (cs),"1" (count)
+	:"memory");
+return __res;
+}
+
+static inline void * __memset_generic(void * s, char c,size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+	"rep\n\t"
+	"stosb"
+	: "=&c" (d0), "=&D" (d1)
+	:"a" (c),"1" (s),"0" (count)
+	:"memory");
+return s;
+}
+
+/* we might want to write optimized versions of these later */
+#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
+
+/*
+ * memset(x,0,y) is a reasonably common thing to do, so we want to fill
+ * things 32 bits at a time even when we don't know the size of the
+ * area at compile-time..
+ */
+static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+	"rep ; stosl\n\t"
+	"testb $2,%b3\n\t"
+	"je 1f\n\t"
+	"stosw\n"
+	"1:\ttestb $1,%b3\n\t"
+	"je 2f\n\t"
+	"stosb\n"
+	"2:"
+	:"=&c" (d0), "=&D" (d1)
+	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
+	:"memory");
+return (s);	
+}
+
+/* Added by Gertjan van Wingerde to make minix and sysv module work */
+#define __HAVE_ARCH_STRNLEN
+static inline size_t strnlen(const char * s, size_t count)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+	"movl %2,%0\n\t"
+	"jmp 2f\n"
+	"1:\tcmpb $0,(%0)\n\t"
+	"je 3f\n\t"
+	"incl %0\n"
+	"2:\tdecl %1\n\t"
+	"cmpl $-1,%1\n\t"
+	"jne 1b\n"
+	"3:\tsubl %2,%0"
+	:"=a" (__res), "=&d" (d0)
+	:"c" (s),"1" (count)
+	:"memory");
+return __res;
+}
+/* end of additional stuff */
+
+#define __HAVE_ARCH_STRSTR
+
+extern char *strstr(const char *cs, const char *ct);
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as we by now know that both pattern and count is constant..
+ */
+static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+{
+	switch (count) {
+		case 0:
+			return s;
+		case 1:
+			*(unsigned char *)s = pattern;
+			return s;
+		case 2:
+			*(unsigned short *)s = pattern;
+			return s;
+		case 3:
+			*(unsigned short *)s = pattern;
+			*(2+(unsigned char *)s) = pattern;
+			return s;
+		case 4:
+			*(unsigned long *)s = pattern;
+			return s;
+	}
+#define COMMON(x) \
+__asm__  __volatile__( \
+	"rep ; stosl" \
+	x \
+	: "=&c" (d0), "=&D" (d1) \
+	: "a" (pattern),"0" (count/4),"1" ((long) s) \
+	: "memory")
+{
+	int d0, d1;
+	switch (count % 4) {
+		case 0: COMMON(""); return s;
+		case 1: COMMON("\n\tstosb"); return s;
+		case 2: COMMON("\n\tstosw"); return s;
+		default: COMMON("\n\tstosw\n\tstosb"); return s;
+	}
+}
+  
+#undef COMMON
+}
+
+#define __constant_c_x_memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_c_and_count_memset((s),(c),(count)) : \
+ __constant_c_memset((s),(c),(count)))
+
+#define __memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_count_memset((s),(c),(count)) : \
+ __memset_generic((s),(c),(count)))
+
+#define __HAVE_ARCH_MEMSET
+#define memset(s, c, count) \
+(__builtin_constant_p(c) ? \
+ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
+ __memset((s),(c),(count)))
+
+/*
+ * find the first occurrence of byte 'c', or 1 past the area if none
+ */
+#define __HAVE_ARCH_MEMSCAN
+static inline void * memscan(void * addr, int c, size_t size)
+{
+	if (!size)
+		return addr;
+	__asm__("repnz; scasb\n\t"
+		"jnz 1f\n\t"
+		"dec %%edi\n"
+		"1:"
+		: "=D" (addr), "=c" (size)
+		: "0" (addr), "1" (size), "a" (c)
+		: "memory");
+	return addr;
+}
+
+#endif /* __KERNEL__ */
+
+#endif
Index: extra/network/fcpcmcia/string64.h
diff -u /dev/null extra/network/fcpcmcia/string64.h:1.1
--- /dev/null	Wed Nov  7 12:53:24 2007
+++ extra/network/fcpcmcia/string64.h	Wed Nov  7 12:53:24 2007
@@ -0,0 +1,67 @@
+#ifndef _X86_64_STRING_H_
+#define _X86_64_STRING_H_
+
+#ifdef __KERNEL__
+
+/* Written 2002 by Andi Kleen */ 
+
+/* Only used for special circumstances. Stolen from i386/string.h */ 
+static inline void * __inline_memcpy(void * to, const void * from, size_t n)
+{
+unsigned long d0, d1, d2;
+__asm__ __volatile__(
+	"rep ; movsl\n\t"
+	"testb $2,%b4\n\t"
+	"je 1f\n\t"
+	"movsw\n"
+	"1:\ttestb $1,%b4\n\t"
+	"je 2f\n\t"
+	"movsb\n"
+	"2:"
+	: "=&c" (d0), "=&D" (d1), "=&S" (d2)
+	:"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
+	: "memory");
+return (to);
+}
+
+/* Even with __builtin_ the compiler may decide to use the out of line
+   function. */
+
+#define __HAVE_ARCH_MEMCPY 1
+extern void *__memcpy(void *to, const void *from, size_t len); 
+#define memcpy(dst,src,len) \
+	({ size_t __len = (len);				\
+	   void *__ret;						\
+	   if (__builtin_constant_p(len) && __len >= 64)	\
+		 __ret = __memcpy((dst),(src),__len);		\
+	   else							\
+		 __ret = __builtin_memcpy((dst),(src),__len);	\
+	   __ret; }) 
+
+
+#define __HAVE_ARCH_MEMSET
+#define memset __builtin_memset
+
+#define __HAVE_ARCH_MEMMOVE
+void * memmove(void * dest,const void *src,size_t count);
+
+/* Use C out of line version for memcmp */ 
+#define memcmp __builtin_memcmp
+int memcmp(const void * cs,const void * ct,size_t count);
+
+/* out of line string functions use always C versions */ 
+#define strlen __builtin_strlen
+size_t strlen(const char * s);
+
+#define strcpy __builtin_strcpy
+char * strcpy(char * dest,const char *src);
+
+#define strcat __builtin_strcat
+char * strcat(char * dest, const char * src);
+
+#define strcmp __builtin_strcmp
+int strcmp(const char * cs,const char * ct);
+
+#endif /* __KERNEL__ */
+
+#endif




More information about the arch-commits mailing list