aboutsummaryrefslogtreecommitdiff
path: root/patches/glibc
diff options
context:
space:
mode:
authorPasha <pasha@member.fsf.org>2024-02-29 19:30:30 +0000
committerPasha <pasha@member.fsf.org>2024-02-29 19:30:30 +0000
commit84d199451cf33734003c00c043a1480920f5563b (patch)
tree4655f03b3637184bfb363b4e86d376756e85c9e3 /patches/glibc
downloadcross-hurd-84d199451cf33734003c00c043a1480920f5563b.tar.gz
cross-hurd-84d199451cf33734003c00c043a1480920f5563b.tar.bz2
initial commit
Diffstat (limited to 'patches/glibc')
-rw-r--r--patches/glibc/local-clock_gettime_MONOTONIC.diff133
-rw-r--r--patches/glibc/local-static_pthread_setcancelstate.diff87
-rw-r--r--patches/glibc/submitted-AF_LINK.diff29
-rw-r--r--patches/glibc/tg-bits_atomic.h_multiple_threads.diff279
-rw-r--r--patches/glibc/tg-mach-hurd-link.diff32
-rw-r--r--patches/glibc/tg-unlockpt-chroot.diff22
-rw-r--r--patches/glibc/unsubmitted-getaux_at_secure.diff30
-rw-r--r--patches/glibc/unsubmitted-prof-eintr.diff21
8 files changed, 633 insertions, 0 deletions
diff --git a/patches/glibc/local-clock_gettime_MONOTONIC.diff b/patches/glibc/local-clock_gettime_MONOTONIC.diff
new file mode 100644
index 0000000..3207a94
--- /dev/null
+++ b/patches/glibc/local-clock_gettime_MONOTONIC.diff
@@ -0,0 +1,133 @@
+Use the realtime clock for the monotonic clock. This is of course not a proper
+implementation (which is being done in Mach), but will permit to fix at least
+the iceweasel stack.
+
+vlc however doesn't build when _POSIX_CLOCK_SELECTION is enabled but
+_POSIX_TIMERS is not, and they refuse to fix that (see #765578), so disable the
+former.
+
+---
+ sysdeps/mach/hurd/bits/posix_opt.h | 2 +-
+ sysdeps/unix/clock_gettime.c | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+Index: glibc-2.27/sysdeps/mach/clock_gettime.c
+===================================================================
+--- glibc-2.27.orig/sysdeps/mach/clock_gettime.c
++++ glibc-2.27/sysdeps/mach/clock_gettime.c
+@@ -31,6 +31,10 @@ __clock_gettime (clockid_t clock_id, str
+ switch (clock_id) {
+
+ case CLOCK_REALTIME:
++ case CLOCK_MONOTONIC:
++ case CLOCK_MONOTONIC_RAW:
++ case CLOCK_REALTIME_COARSE:
++ case CLOCK_MONOTONIC_COARSE:
+ {
+ /* __host_get_time can only fail if passed an invalid host_t.
+ __mach_host_self could theoretically fail (producing an
+Index: glibc-2.27/rt/timer_create.c
+===================================================================
+--- glibc-2.27.orig/rt/timer_create.c
++++ glibc-2.27/rt/timer_create.c
+@@ -48,7 +48,7 @@ timer_create (clockid_t clock_id, struct
+ return -1;
+ }
+
+- if (clock_id != CLOCK_REALTIME)
++ if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_MONOTONIC && clock_id != CLOCK_MONOTONIC_RAW && clock_id != CLOCK_REALTIME_COARSE && clock_id != CLOCK_MONOTONIC_COARSE)
+ {
+ __set_errno (EINVAL);
+ return -1;
+Index: glibc-2.27/sysdeps/mach/hurd/bits/posix_opt.h
+===================================================================
+--- glibc-2.27.orig/sysdeps/mach/hurd/bits/posix_opt.h
++++ glibc-2.27/sysdeps/mach/hurd/bits/posix_opt.h
+@@ -163,10 +163,10 @@
+ #define _POSIX_THREAD_PROCESS_SHARED -1
+
+ /* The monotonic clock might be available. */
+-#define _POSIX_MONOTONIC_CLOCK 0
++#define _POSIX_MONOTONIC_CLOCK 200809L
+
+-/* The clock selection interfaces are available. */
+-#define _POSIX_CLOCK_SELECTION 200809L
++/* The clock selection interfaces are not really available yet. */
++#define _POSIX_CLOCK_SELECTION -1
+
+ /* Advisory information interfaces could be available in future. */
+ #define _POSIX_ADVISORY_INFO 0
+Index: glibc-upstream/sysdeps/posix/clock_getres.c
+===================================================================
+--- glibc-upstream.orig/sysdeps/posix/clock_getres.c
++++ glibc-upstream/sysdeps/posix/clock_getres.c
+@@ -52,6 +52,10 @@ __clock_getres (clockid_t clock_id, stru
+ switch (clock_id)
+ {
+ case CLOCK_REALTIME:
++ case CLOCK_MONOTONIC:
++ case CLOCK_MONOTONIC_RAW:
++ case CLOCK_REALTIME_COARSE:
++ case CLOCK_MONOTONIC_COARSE:
+ retval = realtime_getres (res);
+ break;
+
+--- ./sysdeps/mach/clock_nanosleep.c.original 2020-07-21 00:31:35.226113142 +0200
++++ ./sysdeps/mach/clock_nanosleep.c 2020-07-21 00:31:49.026185761 +0200
+@@ -62,7 +62,7 @@
+ __clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
+ struct timespec *rem)
+ {
+- if (clock_id != CLOCK_REALTIME
++ if ((clock_id != CLOCK_REALTIME && clock_id != CLOCK_MONOTONIC && clock_id != CLOCK_MONOTONIC_RAW && clock_id != CLOCK_REALTIME_COARSE && clock_id != CLOCK_MONOTONIC_COARSE)
+ || !valid_nanoseconds (req->tv_nsec)
+ || (flags != 0 && flags != TIMER_ABSTIME))
+ return EINVAL;
+Index: glibc-2.32/hurd/hurdlock.c
+===================================================================
+--- glibc-2.32.orig/hurd/hurdlock.c
++++ glibc-2.32/hurd/hurdlock.c
+@@ -47,7 +47,7 @@ int
+ __lll_abstimed_wait (void *ptr, int val,
+ const struct timespec *tsp, int flags, int clk)
+ {
+- if (clk != CLOCK_REALTIME)
++ if (clk != CLOCK_REALTIME && clk != CLOCK_MONOTONIC)
+ return EINVAL;
+
+ int mlsec = compute_reltime (tsp, clk);
+@@ -59,7 +59,7 @@ int
+ __lll_abstimed_wait_intr (void *ptr, int val,
+ const struct timespec *tsp, int flags, int clk)
+ {
+- if (clk != CLOCK_REALTIME)
++ if (clk != CLOCK_REALTIME && clk != CLOCK_MONOTONIC)
+ return EINVAL;
+
+ int mlsec = compute_reltime (tsp, clk);
+@@ -79,7 +79,7 @@ int
+ __lll_abstimed_xwait (void *ptr, int lo, int hi,
+ const struct timespec *tsp, int flags, int clk)
+ {
+- if (clk != CLOCK_REALTIME)
++ if (clk != CLOCK_REALTIME && clk != CLOCK_MONOTONIC)
+ return EINVAL;
+
+ int mlsec = compute_reltime (tsp, clk);
+@@ -91,7 +91,7 @@ int
+ __lll_abstimed_lock (void *ptr,
+ const struct timespec *tsp, int flags, int clk)
+ {
+- if (clk != CLOCK_REALTIME)
++ if (clk != CLOCK_REALTIME && clk != CLOCK_MONOTONIC)
+ return EINVAL;
+
+ if (__lll_trylock (ptr) == 0)
+@@ -177,7 +177,7 @@ __lll_robust_abstimed_lock (void *ptr,
+ int wait_time = 25;
+ unsigned int val;
+
+- if (clk != CLOCK_REALTIME)
++ if (clk != CLOCK_REALTIME && clk != CLOCK_MONOTONIC)
+ return EINVAL;
+
+ while (1)
diff --git a/patches/glibc/local-static_pthread_setcancelstate.diff b/patches/glibc/local-static_pthread_setcancelstate.diff
new file mode 100644
index 0000000..18684a0
--- /dev/null
+++ b/patches/glibc/local-static_pthread_setcancelstate.diff
@@ -0,0 +1,87 @@
+since the move of libpthread functions to libc, glibc dropped the use
+of __libc_ptf_call. But htl hasn't made the move yet, so we have to use
+__libc_ptf_call there for now.
+
+Index: glibc-2.36/misc/error.c
+===================================================================
+--- glibc-2.36.orig/misc/error.c
++++ glibc-2.36/misc/error.c
+@@ -240,7 +240,8 @@ __error_internal (int status, int errnum
+ /* We do not want this call to be cut short by a thread
+ cancellation. Therefore disable cancellation for now. */
+ int state = PTHREAD_CANCEL_ENABLE;
+- __pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &state);
++ __libc_ptf_call (__pthread_setcancelstate,
++ (PTHREAD_CANCEL_DISABLE, &state), 0);
+ #endif
+
+ flush_stdout ();
+@@ -262,7 +263,7 @@ __error_internal (int status, int errnum
+
+ #ifdef _LIBC
+ _IO_funlockfile (stderr);
+- __pthread_setcancelstate (state, NULL);
++ __libc_ptf_call (__pthread_setcancelstate, (state, NULL), 0);
+ #endif
+ }
+
+@@ -306,7 +307,9 @@ __error_at_line_internal (int status, in
+ /* We do not want this call to be cut short by a thread
+ cancellation. Therefore disable cancellation for now. */
+ int state = PTHREAD_CANCEL_ENABLE;
+- __pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &state);
++ __libc_ptf_call (__pthread_setcancelstate,
++ (PTHREAD_CANCEL_DISABLE, &state),
++ 0);
+ #endif
+
+ flush_stdout ();
+@@ -336,7 +339,7 @@ __error_at_line_internal (int status, in
+
+ #ifdef _LIBC
+ _IO_funlockfile (stderr);
+- __pthread_setcancelstate (state, NULL);
++ __libc_ptf_call (__pthread_setcancelstate, (state, NULL), 0);
+ #endif
+ }
+
+Index: glibc-2.36/libio/iopopen.c
+===================================================================
+--- glibc-2.36.orig/libio/iopopen.c
++++ glibc-2.36/libio/iopopen.c
+@@ -281,9 +281,10 @@ _IO_new_proc_close (FILE *fp)
+ do
+ {
+ int state;
+- __pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &state);
++ __libc_ptf_call (__pthread_setcancelstate,
++ (PTHREAD_CANCEL_DISABLE, &state), 0);
+ wait_pid = __waitpid (((_IO_proc_file *) fp)->pid, &wstatus, 0);
+- __pthread_setcancelstate (state, NULL);
++ __libc_ptf_call (__pthread_setcancelstate, (state, NULL), 0);
+ }
+ while (wait_pid == -1 && errno == EINTR);
+ if (wait_pid == -1)
+Index: glibc-2.36/stdlib/fmtmsg.c
+===================================================================
+--- glibc-2.36.orig/stdlib/fmtmsg.c
++++ glibc-2.36/stdlib/fmtmsg.c
+@@ -124,7 +124,8 @@ fmtmsg (long int classification, const c
+ /* We do not want this call to be cut short by a thread
+ cancellation. Therefore disable cancellation for now. */
+ int state = PTHREAD_CANCEL_ENABLE;
+- __pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &state);
++ __libc_ptf_call (__pthread_setcancelstate,
++ (PTHREAD_CANCEL_DISABLE, &state), 0);
+
+ __libc_lock_lock (lock);
+
+@@ -193,7 +194,7 @@ fmtmsg (long int classification, const c
+
+ __libc_lock_unlock (lock);
+
+- __pthread_setcancelstate (state, NULL);
++ __libc_ptf_call (__pthread_setcancelstate, (state, NULL), 0);
+
+ return result;
+ }
diff --git a/patches/glibc/submitted-AF_LINK.diff b/patches/glibc/submitted-AF_LINK.diff
new file mode 100644
index 0000000..e95a987
--- /dev/null
+++ b/patches/glibc/submitted-AF_LINK.diff
@@ -0,0 +1,29 @@
+Hurd: comment PF_LINK/AF_LINK defines
+
+Comment out the PF_LINK and AF_LINK defines, since they are usually associated
+with struct sockaddr_dl, which is not available on Hurd.
+
+2012-06-22 Pino Toscano <toscano.pino@tiscali.it>
+
+ * sysdeps/mach/hurd/bits/socket.h (PF_LINK): Comment out.
+ (AF_LINK): Likewise.
+--- a/sysdeps/mach/hurd/bits/socket.h
++++ b/sysdeps/mach/hurd/bits/socket.h
+@@ -97,7 +97,7 @@ enum __socket_type
+ #define PF_HYLINK 15 /* NSC Hyperchannel protocol. */
+ #define PF_APPLETALK 16 /* Don't use this. */
+ #define PF_ROUTE 17 /* Internal Routing Protocol. */
+-#define PF_LINK 18 /* Link layer interface. */
++/* #define PF_LINK 18 Link layer interface. */
+ #define PF_XTP 19 /* eXpress Transfer Protocol (no AF). */
+ #define PF_COIP 20 /* Connection-oriented IP, aka ST II. */
+ #define PF_CNT 21 /* Computer Network Technology. */
+@@ -130,7 +130,7 @@ enum __socket_type
+ #define AF_HYLINK PF_HYLINK
+ #define AF_APPLETALK PF_APPLETALK
+ #define AF_ROUTE PF_ROUTE
+-#define AF_LINK PF_LINK
++/* #define AF_LINK PF_LINK */
+ #ifdef __USE_MISC
+ # define pseudo_AF_XTP PF_XTP
+ #endif
diff --git a/patches/glibc/tg-bits_atomic.h_multiple_threads.diff b/patches/glibc/tg-bits_atomic.h_multiple_threads.diff
new file mode 100644
index 0000000..bc52b29
--- /dev/null
+++ b/patches/glibc/tg-bits_atomic.h_multiple_threads.diff
@@ -0,0 +1,279 @@
+From: Thomas Schwinge <thomas@schwinge.name>
+Subject: [PATCH] bits_atomic.h_multiple_threads
+
+TODO. bits/atomic.h for GNU Hurd.
+
+Source: Debian, eglibc-2.10/debian/patches/hurd-i386/local-atomic-no-multiple_threads.diff, r3536.
+Author: Samuel Thibault <samuel.thibault@ens-lyon.org>
+
+We always at least start the sigthread anyway. For now, let's avoid forking
+the file (which would mean having to maintain it).
+
+Need to override sysdeps/i386/i486/bits/atomic.h to remove Linuxisms.
+
+---
+ sysdeps/i386/atomic-machine.h | 107 +++++++++++++++---------------------------
+ 1 file changed, 37 insertions(+), 70 deletions(-)
+
+Index: glibc-2.23/sysdeps/x86/atomic-machine.h
+===================================================================
+--- glibc-2.23.orig/sysdeps/x86/atomic-machine.h
++++ glibc-2.23/sysdeps/x86/atomic-machine.h
+@@ -66,35 +66,26 @@ typedef uintmax_t uatomic_max_t;
+
+ #define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+- __asm __volatile ("cmpl $0, %%" SEG_REG ":%P5\n\t" \
+- "je 0f\n\t" \
+- "lock\n" \
+- "0:\tcmpxchgb %b2, %1" \
++ __asm __volatile ("lock\n" \
++ "\tcmpxchgb %b2, %1" \
+ : "=a" (ret), "=m" (*mem) \
+- : BR_CONSTRAINT (newval), "m" (*mem), "0" (oldval), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : BR_CONSTRAINT (newval), "m" (*mem), "0" (oldval)); \
+ ret; })
+
+ #define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+- __asm __volatile ("cmpl $0, %%" SEG_REG ":%P5\n\t" \
+- "je 0f\n\t" \
+- "lock\n" \
+- "0:\tcmpxchgw %w2, %1" \
++ __asm __volatile ("lock\n" \
++ "\tcmpxchgw %w2, %1" \
+ : "=a" (ret), "=m" (*mem) \
+- : BR_CONSTRAINT (newval), "m" (*mem), "0" (oldval), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : BR_CONSTRAINT (newval), "m" (*mem), "0" (oldval)); \
+ ret; })
+
+ #define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+ ({ __typeof (*mem) ret; \
+- __asm __volatile ("cmpl $0, %%" SEG_REG ":%P5\n\t" \
+- "je 0f\n\t" \
+- "lock\n" \
+- "0:\tcmpxchgl %2, %1" \
++ __asm __volatile ("lock\n" \
++ "\tcmpxchgl %2, %1" \
+ : "=a" (ret), "=m" (*mem) \
+- : BR_CONSTRAINT (newval), "m" (*mem), "0" (oldval), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : BR_CONSTRAINT (newval), "m" (*mem), "0" (oldval)); \
+ ret; })
+
+ #ifdef __x86_64__
+@@ -210,24 +195,20 @@ typedef uintmax_t uatomic_max_t;
+ if (sizeof (*mem) == 1) \
+ __asm __volatile (lock "xaddb %b0, %1" \
+ : "=q" (__result), "=m" (*mem) \
+- : "0" (__addval), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "0" (__addval), "m" (*mem)); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile (lock "xaddw %w0, %1" \
+ : "=r" (__result), "=m" (*mem) \
+- : "0" (__addval), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "0" (__addval), "m" (*mem)); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile (lock "xaddl %0, %1" \
+ : "=r" (__result), "=m" (*mem) \
+- : "0" (__addval), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "0" (__addval), "m" (*mem)); \
+ else if (__HAVE_64B_ATOMICS) \
+ __asm __volatile (lock "xaddq %q0, %1" \
+ : "=r" (__result), "=m" (*mem) \
+ : "0" ((int64_t) cast_to_integer (__addval)), \
+- "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ "m" (*mem)); \
+ else \
+ __result = do_exchange_and_add_val_64_acq (pfx, (mem), __addval); \
+ __result; })
+@@ -238,7 +220,7 @@ typedef uintmax_t uatomic_max_t;
+ __sync_fetch_and_add (mem, value)
+
+ #define __arch_exchange_and_add_cprefix \
+- "cmpl $0, %%" SEG_REG ":%P4\n\tje 0f\n\tlock\n0:\t"
++ "lock\n\t"
+
+ #define catomic_exchange_and_add(mem, value) \
+ __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, __arch_c, \
+@@ -254,24 +236,20 @@ typedef uintmax_t uatomic_max_t;
+ else if (sizeof (*mem) == 1) \
+ __asm __volatile (lock "addb %b1, %0" \
+ : "=m" (*mem) \
+- : IBR_CONSTRAINT (value), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : IBR_CONSTRAINT (value), "m" (*mem)); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile (lock "addw %w1, %0" \
+ : "=m" (*mem) \
+- : "ir" (value), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "ir" (value), "m" (*mem)); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile (lock "addl %1, %0" \
+ : "=m" (*mem) \
+- : "ir" (value), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "ir" (value), "m" (*mem)); \
+ else if (__HAVE_64B_ATOMICS) \
+ __asm __volatile (lock "addq %q1, %0" \
+ : "=m" (*mem) \
+ : "ir" ((int64_t) cast_to_integer (value)), \
+- "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ "m" (*mem)); \
+ else \
+ do_add_val_64_acq (apfx, (mem), (value)); \
+ } while (0)
+@@ -283,7 +262,7 @@ typedef uintmax_t uatomic_max_t;
+ __arch_add_body (LOCK_PREFIX, atomic, __arch, mem, value)
+
+ #define __arch_add_cprefix \
+- "cmpl $0, %%" SEG_REG ":%P3\n\tje 0f\n\tlock\n0:\t"
++ "lock\n\t"
+
+ #define catomic_add(mem, value) \
+ __arch_add_body (__arch_add_cprefix, atomic, __arch_c, mem, value)
+@@ -332,23 +311,19 @@ typedef uintmax_t uatomic_max_t;
+ if (sizeof (*mem) == 1) \
+ __asm __volatile (lock "incb %b0" \
+ : "=m" (*mem) \
+- : "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "m" (*mem)); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile (lock "incw %w0" \
+ : "=m" (*mem) \
+- : "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "m" (*mem)); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile (lock "incl %0" \
+ : "=m" (*mem) \
+- : "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "m" (*mem)); \
+ else if (__HAVE_64B_ATOMICS) \
+ __asm __volatile (lock "incq %q0" \
+ : "=m" (*mem) \
+- : "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "m" (*mem)); \
+ else \
+ do_add_val_64_acq (pfx, mem, 1); \
+ } while (0)
+@@ -359,7 +335,7 @@ typedef uintmax_t uatomic_max_t;
+ #define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, __arch, mem)
+
+ #define __arch_increment_cprefix \
+- "cmpl $0, %%" SEG_REG ":%P2\n\tje 0f\n\tlock\n0:\t"
++ "lock\n\t"
+
+ #define catomic_increment(mem) \
+ __arch_increment_body (__arch_increment_cprefix, __arch_c, mem)
+@@ -389,23 +365,19 @@ typedef uintmax_t uatomic_max_t;
+ if (sizeof (*mem) == 1) \
+ __asm __volatile (lock "decb %b0" \
+ : "=m" (*mem) \
+- : "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "m" (*mem)); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile (lock "decw %w0" \
+ : "=m" (*mem) \
+- : "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "m" (*mem)); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile (lock "decl %0" \
+ : "=m" (*mem) \
+- : "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "m" (*mem)); \
+ else if (__HAVE_64B_ATOMICS) \
+ __asm __volatile (lock "decq %q0" \
+ : "=m" (*mem) \
+- : "m" (*mem), \
++ : "m" (*mem)); \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
+ else \
+ do_add_val_64_acq (pfx, mem, -1); \
+ } while (0)
+@@ -416,7 +389,7 @@ typedef uintmax_t uatomic_max_t;
+ #define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem)
+
+ #define __arch_decrement_cprefix \
+- "cmpl $0, %%" SEG_REG ":%P2\n\tje 0f\n\tlock\n0:\t"
++ "lock\n\t"
+
+ #define catomic_decrement(mem) \
+ __arch_decrement_body (__arch_decrement_cprefix, __arch_c, mem)
+@@ -487,29 +460,25 @@ typedef uintmax_t uatomic_max_t;
+ if (sizeof (*mem) == 1) \
+ __asm __volatile (lock "andb %b1, %0" \
+ : "=m" (*mem) \
+- : IBR_CONSTRAINT (mask), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : IBR_CONSTRAINT (mask), "m" (*mem)); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile (lock "andw %w1, %0" \
+ : "=m" (*mem) \
+- : "ir" (mask), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "ir" (mask), "m" (*mem)); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile (lock "andl %1, %0" \
+ : "=m" (*mem) \
+- : "ir" (mask), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "ir" (mask), "m" (*mem)); \
+ else if (__HAVE_64B_ATOMICS) \
+ __asm __volatile (lock "andq %q1, %0" \
+ : "=m" (*mem) \
+- : "ir" (mask), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "ir" (mask), "m" (*mem)); \
+ else \
+ __atomic_link_error (); \
+ } while (0)
+
+ #define __arch_cprefix \
+- "cmpl $0, %%" SEG_REG ":%P3\n\tje 0f\n\tlock\n0:\t"
++ "lock\n\t"
+
+ #define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask)
+
+@@ -516,23 +486,19 @@ typedef uintmax_t uatomic_max_t;
+ if (sizeof (*mem) == 1) \
+ __asm __volatile (lock "orb %b1, %0" \
+ : "=m" (*mem) \
+- : IBR_CONSTRAINT (mask), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : IBR_CONSTRAINT (mask), "m" (*mem)); \
+ else if (sizeof (*mem) == 2) \
+ __asm __volatile (lock "orw %w1, %0" \
+ : "=m" (*mem) \
+- : "ir" (mask), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "ir" (mask), "m" (*mem)); \
+ else if (sizeof (*mem) == 4) \
+ __asm __volatile (lock "orl %1, %0" \
+ : "=m" (*mem) \
+- : "ir" (mask), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "ir" (mask), "m" (*mem)); \
+ else if (__HAVE_64B_ATOMICS) \
+ __asm __volatile (lock "orq %q1, %0" \
+ : "=m" (*mem) \
+- : "ir" (mask), "m" (*mem), \
+- "i" (offsetof (tcbhead_t, multiple_threads))); \
++ : "ir" (mask), "m" (*mem)); \
+ else \
+ __atomic_link_error (); \
+ } while (0)
diff --git a/patches/glibc/tg-mach-hurd-link.diff b/patches/glibc/tg-mach-hurd-link.diff
new file mode 100644
index 0000000..6ee98ed
--- /dev/null
+++ b/patches/glibc/tg-mach-hurd-link.diff
@@ -0,0 +1,32 @@
+From: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Subject: [PATCH] Add -lmachuser -lhurduser to libc.so on GNU/Hurd.
+
+http://lists.gnu.org/archive/html/bug-hurd/2011-03/msg00112.html
+
+2011-03-29 Samuel Thibault <samuel.thibault@ens-lyon.org>
+
+ * Makerules ($(inst_libdir)/libc.so): Add -lmachuser -lhurduser to
+ libc.so on GNU/Hurd.
+
+It's still unclear what we want to aim for.
+
+---
+ Makerules | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/Makerules b/Makerules
+index 53eabfaba8..b0f5e1b3a0 100644
+--- a/Makerules
++++ b/Makerules
+@@ -1066,6 +1066,9 @@ $(inst_libdir)/libc.so: $(common-objpfx)format.lds \
+ '$(libdir)/$(patsubst %,$(libtype.oS),$(libprefix)$(libc-name))'\
+ ' AS_NEEDED (' $(rtlddir)/$(rtld-installed-name) ') )' \
+ ) > $@.new
++ifeq ($(patsubst gnu%,,$(config-os)),)
++ echo 'INPUT ( AS_NEEDED ( -lmachuser -lhurduser ) )' >> $@.new
++endif
+ mv -f $@.new $@
+
+ endif
+--
+tg: (7bb5f8a836..) t/mach-hurd-link (depends on: baseline)
diff --git a/patches/glibc/tg-unlockpt-chroot.diff b/patches/glibc/tg-unlockpt-chroot.diff
new file mode 100644
index 0000000..a8f0d17
--- /dev/null
+++ b/patches/glibc/tg-unlockpt-chroot.diff
@@ -0,0 +1,22 @@
+From: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Subject: [PATCH] Fix pty path in chroot
+
+when e.g. using a chroot, the pty path is not so short.
+
+---
+ sysdeps/unix/bsd/unlockpt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: eglibc-2.16/sysdeps/unix/bsd/unlockpt.c
+===================================================================
+--- eglibc-2.16.orig/sysdeps/unix/bsd/unlockpt.c 2012-07-23 00:23:14.000000000 +0200
++++ eglibc-2.16/sysdeps/unix/bsd/unlockpt.c 2012-07-23 00:24:55.000000000 +0200
+@@ -27,7 +27,7 @@
+ int
+ unlockpt (int fd)
+ {
+- char buf[sizeof (_PATH_TTY) + 2];
++ char buf[1024]; /* XXX */
+
+ /* BSD doesn't have a lock, but it does have `revoke'. */
+ if (__ptsname_r (fd, buf, sizeof (buf)))
diff --git a/patches/glibc/unsubmitted-getaux_at_secure.diff b/patches/glibc/unsubmitted-getaux_at_secure.diff
new file mode 100644
index 0000000..0ecd5f7
--- /dev/null
+++ b/patches/glibc/unsubmitted-getaux_at_secure.diff
@@ -0,0 +1,30 @@
+FIXME: sysdeps/mach/hurd/i386/init-first.c should instead pass an auxv
+to __libc_start_main
+
+Index: glibc-2.33/misc/getauxval.c
+===================================================================
+--- glibc-2.33.orig/misc/getauxval.c
++++ glibc-2.33/misc/getauxval.c
+@@ -19,6 +19,7 @@
+ #include <errno.h>
+ #include <ldsodefs.h>
+ #include <stdbool.h>
++#include <unistd.h>
+
+ bool
+ __getauxval2 (unsigned long int type, unsigned long int *result)
+@@ -27,6 +28,14 @@ __getauxval2 (unsigned long int type, un
+ ElfW(auxv_t) *p;
+ #endif
+
++#ifdef AT_SECURE
++ if (type == AT_SECURE)
++ {
++ *result = __libc_enable_secure;
++ return true;
++ }
++#endif
++
+ if (type == AT_HWCAP)
+ {
+ *result = GLRO(dl_hwcap);
diff --git a/patches/glibc/unsubmitted-prof-eintr.diff b/patches/glibc/unsubmitted-prof-eintr.diff
new file mode 100644
index 0000000..1b0808c
--- /dev/null
+++ b/patches/glibc/unsubmitted-prof-eintr.diff
@@ -0,0 +1,21 @@
+When profiling ext2fs, the shutdown stops all RPC, which thus interrupts this
+open, and we have to retry here.
+
+TODO: is open really supposed to expose such EINTR? Should the generic gmon
+loop around EINTR (since there might be signals happening indeed)
+
+diff --git a/gmon/gmon.c b/gmon/gmon.c
+index 6439ed1caa..507ad0c9fc 100644
+--- a/gmon/gmon.c
++++ b/gmon/gmon.c
+@@ -390,8 +390,10 @@ write_gmon (void)
+
+ if (fd == -1)
+ {
++ do
+ fd = __open_nocancel ("gmon.out", O_CREAT | O_TRUNC | O_WRONLY
+ | O_NOFOLLOW | O_CLOEXEC, 0666);
++ while (fd < 0 && errno == EINTR);
+ if (fd < 0)
+ {
+ char buf[300];