aboutsummaryrefslogtreecommitdiff
path: root/linux/dev/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'linux/dev/include/asm-i386')
-rw-r--r--linux/dev/include/asm-i386/page.h59
-rw-r--r--linux/dev/include/asm-i386/smp.h8
-rw-r--r--linux/dev/include/asm-i386/string.h487
-rw-r--r--linux/dev/include/asm-i386/system.h356
-rw-r--r--linux/dev/include/asm-i386/uaccess.h1
5 files changed, 911 insertions, 0 deletions
diff --git a/linux/dev/include/asm-i386/page.h b/linux/dev/include/asm-i386/page.h
new file mode 100644
index 0000000..be81848
--- /dev/null
+++ b/linux/dev/include/asm-i386/page.h
@@ -0,0 +1,59 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+#include <mach/vm_param.h>
+
+#ifdef __KERNEL__
+
+#define STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* This handles the memory map.. */
+#define PAGE_OFFSET 0
+#define MAP_NR(addr) (((unsigned long)(addr)) >> PAGE_SHIFT)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff --git a/linux/dev/include/asm-i386/smp.h b/linux/dev/include/asm-i386/smp.h
new file mode 100644
index 0000000..fabe01d
--- /dev/null
+++ b/linux/dev/include/asm-i386/smp.h
@@ -0,0 +1,8 @@
+#ifndef _I386_SMP_H
+#define _I386_SMP_H
+
+#include <machine/cpu_number.h>
+
+#define smp_processor_id() cpu_number()
+
+#endif /* _I386_SMP_H */
diff --git a/linux/dev/include/asm-i386/string.h b/linux/dev/include/asm-i386/string.h
new file mode 100644
index 0000000..f41ca5c
--- /dev/null
+++ b/linux/dev/include/asm-i386/string.h
@@ -0,0 +1,487 @@
+#ifndef _I386_STRING_H_
+#define _I386_STRING_H_
+
+/*
+ * On a 486 or Pentium, we are better off not using the
+ * byte string operations. But on a 386 or a PPro the
+ * byte string ops are faster than doing it by hand
+ * (MUCH faster on a Pentium).
+ *
+ * Also, the byte strings actually work correctly. Forget
+ * the i486 routines for now as they may be broken..
+ */
+#if FIXED_486_STRING && (CPU == 486 || CPU == 586)
+#include <asm/string-486.h>
+#else
+
+/*
+ * This string-include defines all string functions as inline
+ * functions. Use gcc. It also assumes ds=es=data space, this should be
+ * normal. Most of the string-functions are rather heavily hand-optimized,
+ * see especially strtok,strstr,str[c]spn. They should work, but are not
+ * very easy to understand. Everything is done entirely within the register
+ * set, making the functions fast and clean. String instructions have been
+ * used through-out, making for "slightly" unclear code :-)
+ *
+ * NO Copyright (C) 1991, 1992 Linus Torvalds,
+ * consider these trivial functions to be PD.
+ */
+
+#define __HAVE_ARCH_STRCPY
+static inline char * strcpy(char * dest,const char *src)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2)
+ :"0" (src),"1" (dest) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+static inline char * strncpy(char * dest,const char *src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %2\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "rep\n\t"
+ "stosb\n"
+ "2:"
+ : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
+ :"0" (src),"1" (dest),"2" (count) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCAT
+static inline char * strcat(char * dest,const char * src)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+ : "0" (src), "1" (dest), "2" (0), "3" (0xffffffff):"memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCAT
+static inline char * strncat(char * dest,const char * src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n\t"
+ "movl %8,%3\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %2,%2\n\t"
+ "stosb"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+ : "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count)
+ : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCMP
+static inline int strcmp(const char * cs,const char * ct)
+{
+int d0, d1;
+register int __res;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "scasb\n\t"
+ "jne 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "jmp 3f\n"
+ "2:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "3:"
+ :"=a" (__res), "=&S" (d0), "=&D" (d1)
+ :"1" (cs),"2" (ct));
+return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+static inline int strncmp(const char * cs,const char * ct,size_t count)
+{
+register int __res;
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "scasb\n\t"
+ "jne 3f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %%eax,%%eax\n\t"
+ "jmp 4f\n"
+ "3:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "4:"
+ :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
+ :"1" (cs),"2" (ct),"3" (count));
+return __res;
+}
+
+#define __HAVE_ARCH_STRCHR
+static inline char * strchr(const char * s, int c)
+{
+int d0;
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "je 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "movl $1,%1\n"
+ "2:\tmovl %1,%0\n\t"
+ "decl %0"
+ :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRRCHR
+static inline char * strrchr(const char * s, int c)
+{
+int d0, d1;
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "jne 2f\n\t"
+ "leal -1(%%esi),%0\n"
+ "2:\ttestb %%al,%%al\n\t"
+ "jne 1b"
+ :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRLEN
+static inline size_t strlen(const char * s)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %0\n\t"
+ "decl %0"
+ :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffff));
+return __res;
+}
+
+static inline void * __memcpy(void * to, const void * from, size_t n)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; movsl\n\t"
+ "testb $2,%b4\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b4\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
+ : "memory");
+return (to);
+}
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as the count is constant.
+ */
+static inline void * __constant_memcpy(void * to, const void * from, size_t n)
+{
+ switch (n) {
+ case 0:
+ return to;
+ case 1:
+ *(unsigned char *)to = *(const unsigned char *)from;
+ return to;
+ case 2:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ return to;
+ case 3:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ *(2+(unsigned char *)to) = *(2+(const unsigned char *)from);
+ return to;
+ case 4:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ return to;
+ case 6: /* for Ethernet addresses */
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(2+(unsigned short *)to) = *(2+(const unsigned short *)from);
+ return to;
+ case 8:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ return to;
+ case 12:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ return to;
+ case 16:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ return to;
+ case 20:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ *(4+(unsigned long *)to) = *(4+(const unsigned long *)from);
+ return to;
+ }
+#define COMMON(x) \
+__asm__ __volatile__( \
+ "cld\n\t" \
+ "rep ; movsl" \
+ x \
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2) \
+ : "0" (n/4),"1" ((long) to),"2" ((long) from) \
+ : "memory");
+{
+ int d0, d1, d2;
+ switch (n % 4) {
+ case 0: COMMON(""); return to;
+ case 1: COMMON("\n\tmovsb"); return to;
+ case 2: COMMON("\n\tmovsw"); return to;
+ default: COMMON("\n\tmovsw\n\tmovsb"); return to;
+ }
+}
+
+#undef COMMON
+}
+
+#define __HAVE_ARCH_MEMCPY
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy((t),(f),(n)) : \
+ __memcpy((t),(f),(n)))
+
+#define __HAVE_ARCH_MEMMOVE
+static inline void * memmove(void * dest,const void * src, size_t n)
+{
+int d0, d1, d2;
+if (dest<src)
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "movsb"
+ : "=&c" (d0), "=&S" (d1), "=&D" (d2)
+ :"0" (n),"1" (src),"2" (dest)
+ : "memory");
+else
+__asm__ __volatile__(
+ "std\n\t"
+ "rep\n\t"
+ "movsb\n\t"
+ "cld"
+ : "=&c" (d0), "=&S" (d1), "=&D" (d2)
+ :"0" (n),
+ "1" (n-1+(const char *)src),
+ "2" (n-1+(char *)dest)
+ :"memory");
+return dest;
+}
+
+#define memcmp __builtin_memcmp
+
+#define __HAVE_ARCH_MEMCHR
+static inline void * memchr(const void * cs,int c,size_t count)
+{
+int d0;
+register void * __res;
+if (!count)
+ return NULL;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "je 1f\n\t"
+ "movl $1,%0\n"
+ "1:\tdecl %0"
+ :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
+return __res;
+}
+
+static inline void * __memset_generic(void * s, char c,size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "stosb"
+ : "=&c" (d0), "=&D" (d1)
+ :"a" (c),"1" (s),"0" (count)
+ :"memory");
+return s;
+}
+
+/* we might want to write optimized versions of these later */
+#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
+
+/*
+ * memset(x,0,y) is a reasonably common thing to do, so we want to fill
+ * things 32 bits at a time even when we don't know the size of the
+ * area at compile-time..
+ */
+static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; stosl\n\t"
+ "testb $2,%b3\n\t"
+ "je 1f\n\t"
+ "stosw\n"
+ "1:\ttestb $1,%b3\n\t"
+ "je 2f\n\t"
+ "stosb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1)
+ :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
+ :"memory");
+return (s);
+}
+
+/* Added by Gertjan van Wingerde to make minix and sysv module work */
+#define __HAVE_ARCH_STRNLEN
+static inline size_t strnlen(const char * s, size_t count)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+ "movl %2,%0\n\t"
+ "jmp 2f\n"
+ "1:\tcmpb $0,(%0)\n\t"
+ "je 3f\n\t"
+ "incl %0\n"
+ "2:\tdecl %1\n\t"
+ "cmpl $-1,%1\n\t"
+ "jne 1b\n"
+ "3:\tsubl %2,%0"
+ :"=a" (__res), "=&d" (d0)
+ :"c" (s),"1" (count));
+return __res;
+}
+/* end of additional stuff */
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as we by now know that both pattern and count is constant..
+ */
+static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+{
+ switch (count) {
+ case 0:
+ return s;
+ case 1:
+ *(unsigned char *)s = pattern;
+ return s;
+ case 2:
+ *(unsigned short *)s = pattern;
+ return s;
+ case 3:
+ *(unsigned short *)s = pattern;
+ *(2+(unsigned char *)s) = pattern;
+ return s;
+ case 4:
+ *(unsigned long *)s = pattern;
+ return s;
+ }
+#define COMMON(x) \
+__asm__ __volatile__("cld\n\t" \
+ "rep ; stosl" \
+ x \
+ : "=&c" (d0), "=&D" (d1) \
+ : "a" (pattern),"0" (count/4),"1" ((long) s) \
+ : "memory")
+{
+ int d0, d1;
+ switch (count % 4) {
+ case 0: COMMON(""); return s;
+ case 1: COMMON("\n\tstosb"); return s;
+ case 2: COMMON("\n\tstosw"); return s;
+ default: COMMON("\n\tstosw\n\tstosb"); return s;
+ }
+}
+
+#undef COMMON
+}
+
+#define __constant_c_x_memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_c_and_count_memset((s),(c),(count)) : \
+ __constant_c_memset((s),(c),(count)))
+
+#define __memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_count_memset((s),(c),(count)) : \
+ __memset_generic((s),(c),(count)))
+
+#define __HAVE_ARCH_MEMSET
+#define memset(s, c, count) \
+(__builtin_constant_p(c) ? \
+ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
+ __memset((s),(c),(count)))
+
+/*
+ * find the first occurrence of byte 'c', or 1 past the area if none
+ */
+#define __HAVE_ARCH_MEMSCAN
+static inline void * memscan(void * addr, int c, size_t size)
+{
+ if (!size)
+ return addr;
+ __asm__("cld\n"
+ "repnz; scasb\n"
+ "jnz 1f\n"
+ "dec %%edi\n"
+ "1:\n"
+ : "=D" (addr), "=c" (size)
+ : "0" (addr), "1" (size), "a" (c));
+ return addr;
+}
+
+#endif
+#endif
diff --git a/linux/dev/include/asm-i386/system.h b/linux/dev/include/asm-i386/system.h
new file mode 100644
index 0000000..5187c5e
--- /dev/null
+++ b/linux/dev/include/asm-i386/system.h
@@ -0,0 +1,356 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <i386/ipl.h> /* curr_ipl[], splx */
+#include <kern/cpu_number.h>
+
+#include <asm/segment.h>
+
+/*
+ * Entry into gdt where to find first TSS. GDT layout:
+ * 0 - null
+ * 1 - not used
+ * 2 - kernel code segment
+ * 3 - kernel data segment
+ * 4 - user code segment
+ * 5 - user data segment
+ * ...
+ * 8 - TSS #0
+ * 9 - LDT #0
+ * 10 - TSS #1
+ * 11 - LDT #1
+ */
+#define FIRST_TSS_ENTRY 8
+#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
+#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
+#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
+#define load_TR(n) __asm__("ltr %%ax": /* no output */ :"a" (_TSS(n)))
+#define load_ldt(n) __asm__("lldt %%ax": /* no output */ :"a" (_LDT(n)))
+#define store_TR(n) \
+__asm__("str %%ax\n\t" \
+ "subl %2,%%eax\n\t" \
+ "shrl $4,%%eax" \
+ :"=a" (n) \
+ :"0" (0),"i" (FIRST_TSS_ENTRY<<3))
+
+/* This special macro can be used to load a debugging register */
+
+#define loaddebug(tsk,register) \
+ __asm__("movl %0,%%edx\n\t" \
+ "movl %%edx,%%db" #register "\n\t" \
+ : /* no output */ \
+ :"m" (tsk->debugreg[register]) \
+ :"dx");
+
+
+/*
+ * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ * This also clears the TS-flag if the task we switched to has used
+ * the math co-processor latest.
+ *
+ * It also reloads the debug regs if necessary..
+ */
+
+
+#ifdef __SMP__
+ /*
+ * Keep the lock depth straight. If we switch on an interrupt from
+ * kernel->user task we need to lose a depth, and if we switch the
+ * other way we need to gain a depth. Same layer switches come out
+ * the same.
+ *
+ * We spot a switch in user mode because the kernel counter is the
+ * same as the interrupt counter depth. (We never switch during the
+ * message/invalidate IPI).
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process.
+ */
+
+#define switch_to(prev,next) do { \
+ cli();\
+ if(prev->flags&PF_USEDFPU) \
+ { \
+ __asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \
+ __asm__ __volatile__("fwait"); \
+ prev->flags&=~PF_USEDFPU; \
+ } \
+ prev->lock_depth=syscall_count; \
+ kernel_counter+=next->lock_depth-prev->lock_depth; \
+ syscall_count=next->lock_depth; \
+__asm__("pushl %%edx\n\t" \
+ "movl "SYMBOL_NAME_STR(apic_reg)",%%edx\n\t" \
+ "movl 0x20(%%edx), %%edx\n\t" \
+ "shrl $22,%%edx\n\t" \
+ "and $0x3C,%%edx\n\t" \
+ "movl %%ecx,"SYMBOL_NAME_STR(current_set)"(,%%edx)\n\t" \
+ "popl %%edx\n\t" \
+ "ljmp %0\n\t" \
+ "sti\n\t" \
+ : /* no output */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "c" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+
+#else
+#define switch_to(prev,next) do { \
+__asm__("movl %2,"SYMBOL_NAME_STR(current_set)"\n\t" \
+ "ljmp %0\n\t" \
+ "cmpl %1,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \
+ "jne 1f\n\t" \
+ "clts\n" \
+ "1:" \
+ : /* no outputs */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "r" (prev), "r" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+#endif
+
+#define _set_base(addr,base) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%1\n\t" \
+ "movb %%dh,%2" \
+ : /* no output */ \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "d" (base) \
+ :"dx")
+
+#define _set_limit(addr,limit) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %1,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%1" \
+ : /* no output */ \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "d" (limit) \
+ :"dx")
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , base )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , (limit-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
+#define stts() \
+__asm__ __volatile__ ( \
+ "movl %%cr0,%%eax\n\t" \
+ "orl $8,%%eax\n\t" \
+ "movl %%eax,%%cr0" \
+ : /* no outputs */ \
+ : /* no inputs */ \
+ :"ax")
+
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+#define mb() __asm__ __volatile__ ("" : : :"memory")
+#define __sti() __asm__ __volatile__ ("sti": : :"memory")
+#define __cli() __asm__ __volatile__ ("cli": : :"memory")
+#define __save_flags(x) (x = ((curr_ipl[cpu_number()] > 0) ? 0 : (1 << 9)))
+#define __restore_flags(x) splx((x & (1 << 9)) ? 0 : 7)
+
+#ifdef __SMP__
+
+extern void __global_cli(void);
+extern void __global_sti(void);
+extern unsigned long __global_save_flags(void);
+extern void __global_restore_flags(unsigned long);
+#define cli() __global_cli()
+#define sti() __global_sti()
+#define save_flags(x) ((x)=__global_save_flags())
+#define restore_flags(x) __global_restore_flags(x)
+
+#else
+
+#define cli() __cli()
+#define sti() __sti()
+#define save_flags(x) __save_flags(x)
+#define restore_flags(x) __restore_flags(x)
+
+#endif
+
+
+#define iret() __asm__ __volatile__ ("iret": : :"memory")
+
+#define _set_gate(gate_addr,type,dpl,addr) \
+__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
+ "movw %2,%%dx\n\t" \
+ "movl %%eax,%0\n\t" \
+ "movl %%edx,%1" \
+ :"=m" (*((long *) (gate_addr))), \
+ "=m" (*(1+(long *) (gate_addr))) \
+ :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
+ "d" ((char *) (addr)),"a" (KERNEL_CS << 16) \
+ :"ax","dx")
+
+#define set_intr_gate(n,addr) \
+ _set_gate(&idt[n],14,0,addr)
+
+#define set_trap_gate(n,addr) \
+ _set_gate(&idt[n],15,0,addr)
+
+#define set_system_gate(n,addr) \
+ _set_gate(&idt[n],15,3,addr)
+
+#define set_call_gate(a,addr) \
+ _set_gate(a,12,3,addr)
+
+#define _set_seg_desc(gate_addr,type,dpl,base,limit) {\
+ *((gate_addr)+1) = ((base) & 0xff000000) | \
+ (((base) & 0x00ff0000)>>16) | \
+ ((limit) & 0xf0000) | \
+ ((dpl)<<13) | \
+ (0x00408000) | \
+ ((type)<<8); \
+ *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
+ ((limit) & 0x0ffff); }
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw $" #limit ",%1\n\t" \
+ "movw %%ax,%2\n\t" \
+ "rorl $16,%%eax\n\t" \
+ "movb %%al,%3\n\t" \
+ "movb $" type ",%4\n\t" \
+ "movb $0x00,%5\n\t" \
+ "movb %%ah,%6\n\t" \
+ "rorl $16,%%eax" \
+ : /* no output */ \
+ :"a" (addr+0xc0000000), "m" (*(n)), "m" (*(n+2)), "m" (*(n+4)), \
+ "m" (*(n+5)), "m" (*(n+6)), "m" (*(n+7)) \
+ )
+
+#define set_tss_desc(n,addr) _set_tssldt_desc(((char *) (n)),((int)(addr)),235,"0x89")
+#define set_ldt_desc(n,addr,size) \
+ _set_tssldt_desc(((char *) (n)),((int)(addr)),((size << 3) - 1),"0x82")
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt;
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#ifndef MACH
+#define HAVE_DISABLE_HLT
+#endif
+void disable_hlt(void);
+void enable_hlt(void);
+
+static __inline__ unsigned long long rdmsr(unsigned int msr)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdmsr"
+ : "=A" (ret)
+ : "c" (msr));
+ return ret;
+}
+
+static __inline__ void wrmsr(unsigned int msr,unsigned long long val)
+{
+ __asm__ __volatile__("wrmsr"
+ : /* no Outputs */
+ : "c" (msr), "A" (val));
+}
+
+
+static __inline__ unsigned long long rdtsc(void)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdtsc"
+ : "=A" (ret)
+ : /* no inputs */);
+ return ret;
+}
+
+static __inline__ unsigned long long rdpmc(unsigned int counter)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdpmc"
+ : "=A" (ret)
+ : "c" (counter));
+ return ret;
+}
+
+#endif
diff --git a/linux/dev/include/asm-i386/uaccess.h b/linux/dev/include/asm-i386/uaccess.h
new file mode 100644
index 0000000..9d841c9
--- /dev/null
+++ b/linux/dev/include/asm-i386/uaccess.h
@@ -0,0 +1 @@
+/* Dummy file. */