[maemo-commits] [maemo-commits] r14937 - in projects/haf/trunk/glib: . glib

From: subversion at stage.maemo.org subversion at stage.maemo.org
Date: Mon Dec 10 14:55:42 EET 2007
Author: xan
Date: 2007-12-10 14:55:36 +0200 (Mon, 10 Dec 2007)
New Revision: 14937

Modified:
   projects/haf/trunk/glib/configure.in
   projects/haf/trunk/glib/glib/gatomic.c
Log:
Atomic operations for ARM.

Part of bug NB#76856


Modified: projects/haf/trunk/glib/configure.in
===================================================================
--- projects/haf/trunk/glib/configure.in	2007-12-10 12:53:55 UTC (rev 14936)
+++ projects/haf/trunk/glib/configure.in	2007-12-10 12:55:36 UTC (rev 14937)
@@ -2089,6 +2089,35 @@
 		   	 [s390 atomic implementation])
       glib_memory_barrier_needed=no
       ;;	
+    arm*)
+      AC_MSG_RESULT([arm])
+      AC_MSG_CHECKING(arm atomic operations type)
+      if test x$glib_os_linux != xyes; then
+        AC_MSG_RESULT(inline asm)
+        glib_save_CFLAGS=$CFLAGS
+        AC_MSG_CHECKING(architecture supports ARMv6 instructions)
+          cp $srcdir/armv6-test.c conftest.$ac_ext
+          CFLAGS="$CFLAGS -march=armv6"
+          AC_TRY_RUN(ac_compile,
+            [AC_DEFINE_UNQUOTED(G_ATOMIC_ARM, 6,
+                                [armv6 atomic implementation])
+             glib_memory_barrier_needed=yes
+             add_flags="-march=armv6"
+             AC_MSG_RESULT(armv6)],
+            [AC_DEFINE_UNQUOTED(G_ATOMIC_ARM, 1,
+                                [arm atomic implementation])
+             glib_memory_barrier_needed=no
+             add_flags=""
+             AC_MSG_RESULT(pre-armv6)])
+        CFLAGS=$glib_save_CFLAGS
+        CFLAGS="$CFLAGS $add_flags"
+      else
+        AC_MSG_RESULT(kernel helper)
+        AC_DEFINE_UNQUOTED(G_ATOMIC_ARM_LINUX, 1,
+                           [special arm linux implementation])
+        glib_memory_barrier_needed=yes
+      fi
+      ;;
     *)
       AC_MSG_RESULT([none])
       glib_memory_barrier_needed=yes

Modified: projects/haf/trunk/glib/glib/gatomic.c
===================================================================
--- projects/haf/trunk/glib/glib/gatomic.c	2007-12-10 12:53:55 UTC (rev 14936)
+++ projects/haf/trunk/glib/glib/gatomic.c	2007-12-10 12:55:36 UTC (rev 14937)
@@ -3,6 +3,7 @@
  *
  * g_atomic_*: atomic operations.
  * Copyright (C) 2003 Sebastian Wilhelmi
+ * Copyright (C) 2007 Nokia Corporation
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
@@ -19,9 +20,13 @@
  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  * Boston, MA 02111-1307, USA.
  */
- 
+
 #include "config.h"
 
+#if defined (G_ATOMIC_ARM)
+#include <sched.h>
+#endif
+
 #include "glib.h"
 #include "gthreadprivate.h"
 #include "galias.h"
@@ -482,7 +487,282 @@
 #  else /* What's that */
 #    error "Your system has an unsupported pointer size"
 #  endif /* GLIB_SIZEOF_VOID_P */
-# else /* !G_ATOMIC_IA64 */
+# elif defined (G_ATOMIC_ARM)
+#  if (G_ATOMIC_ARM < 6)
+static volatile int atomic_spin = 0;
+
+static int atomic_spin_trylock (void)
+{
+  int result;
+
+  asm volatile (
+    "swp %0, %1, [%2]\n"
+    : "=&r,&r" (result)
+    : "r,0" (1), "r,r" (&atomic_spin)
+    : "memory");
+  if (result == 0)
+    return 0;
+  else
+    return -1;
+}
+
+static void atomic_spin_lock (void)
+{
+  while (atomic_spin_trylock())
+    sched_yield();
+}
+
+static void atomic_spin_unlock (void)
+{
+  atomic_spin = 0;
+}
+
+gint
+g_atomic_int_exchange_and_add (volatile gint *atomic, 
+			       gint           val)
+{
+  gint result;
+ 
+  atomic_spin_lock();  
+  result = *atomic;
+  *atomic += val;
+  atomic_spin_unlock();
+
+  return result;
+}
+
+void
+g_atomic_int_add (volatile gint *atomic,
+		  gint           val)
+{
+  atomic_spin_lock();
+  *atomic += val;
+  atomic_spin_unlock();
+}
+
+gboolean
+g_atomic_int_compare_and_exchange (volatile gint *atomic, 
+				   gint           oldval, 
+				   gint           newval)
+{
+  gboolean result;
+
+  atomic_spin_lock();
+  if (*atomic == oldval)
+    {
+      result = TRUE;
+      *atomic = newval;
+    }
+  else
+    result = FALSE;
+  atomic_spin_unlock();
+
+  return result;
+}
+
+gboolean
+g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
+				       gpointer           oldval, 
+				       gpointer           newval)
+{
+  gboolean result;
+ 
+  atomic_spin_lock();
+  if (*atomic == oldval)
+    {
+      result = TRUE;
+      *atomic = newval;
+    }
+  else
+    result = FALSE;
+  atomic_spin_unlock();
+
+  return result;
+}
+#  else /* G_ATOMIC_ARM < 6 */
+gint
+g_atomic_int_exchange_and_add (volatile gint *atomic, 
+			       gint           val)
+{
+  unsigned long result;
+  int old, tmp;
+
+  do {
+    asm volatile (
+      "ldrex %0, [%3]\n"
+      "add %1, %0, %4\n"
+      "strex %2, %1, [%3]\n"
+      : "=&r" (old), "=&r" (tmp), "=&r" (result)
+      : "r" (atomic), "Ir" (val)
+      : "cc", "memory");
+  } while (result);
+  return old;
+}
+
+void
+g_atomic_int_add (volatile gint *atomic,
+		  gint           val)
+{
+  unsigned long result;
+  int tmp;
+
+  do {
+    asm volatile (
+      "ldrex %0, [%2]\n"
+      "add %0, %0, %3\n"
+      "strex %1, %0, [%2]\n"
+      : "=&r" (tmp), "=&r" (result)
+      : "r" (atomic), "Ir" (val)
+      : "cc", "memory");
+  } while (result);
+}
+
+gboolean
+g_atomic_int_compare_and_exchange (volatile gint *atomic, 
+				   gint           oldval, 
+				   gint           newval)
+{
+  unsigned long result;
+  int old;
+
+  asm volatile (
+    "ldrex %1, [%2]\n"
+    "mov %0, #0\n"
+    "teq %1, %3\n"
+    "strexeq %0, %4, [%2]\n"
+    : "=&r" (result), "=&r" (old)
+    : "r" (atomic), "Ir" (oldval), "r" (newval)
+    : "cc", "memory");
+  return (result) ? FALSE : TRUE;
+}
+
+gboolean
+g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
+				       gpointer           oldval, 
+				       gpointer           newval)
+{
+  unsigned long result;
+  void *old;
+
+  asm volatile (
+    "ldrex %1, [%2]\n"
+    "mov %0, #0\n"
+    "teq %1, %3\n"
+    "strexeq %0, %4, [%2]\n"
+    : "=&r" (result), "=&r" (old)
+    : "r" (atomic), "Ir" (oldval), "r" (newval)
+    : "cc", "memory");
+  return (result) ? FALSE : TRUE;
+}
+
+gint
+g_atomic_int_get (volatile gint *atomic)
+{
+  return *atomic;
+}
+
+void
+g_atomic_int_set (volatile gint *atomic,
+                  gint           newval)
+{
+  unsigned long result;
+
+  do {
+    asm volatile (
+      "ldrex %0, [%1]\n"
+      "strex %0, %2, [%1]\n"
+      : "=&r" (result)
+      : "r" (atomic), "r" (newval)
+      : "cc", "memory");
+  } while (result);
+}
+
+gpointer
+g_atomic_pointer_get (volatile gpointer *atomic)
+{
+  return *atomic;
+}   
+
+void
+g_atomic_pointer_set (volatile gpointer *atomic,
+                      gpointer           newval)
+{
+  unsigned long result;
+
+  do {
+    asm volatile (
+      "ldrex %0, [%1]\n"
+      "strex %0, %2, [%1]\n"
+      : "=&r" (result)
+      : "r" (atomic), "r" (newval)
+      : "cc", "memory");
+  } while (result);
+}
+#  endif /* G_ATOMIC_ARM < 6 */
+# elif defined(G_ATOMIC_ARM_LINUX)
+/* use special helper functions provided by the linux kernel */
+
+typedef void (_khelper_barrier_t)(void);
+#define _khelper_barrier (*(_khelper_barrier_t *)0xffff0fa0)
+/*#define G_ATOMIC_MEMORY_BARRIER _khelper_barrier()*/
+/* scratchbox/qemu explodes on barrier */
+#define G_ATOMIC_MEMORY_BARRIER while(0)
+typedef int (_khelper_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
+#define _khelper_cmpxchg (*(_khelper_cmpxchg_t *)0xffff0fc0)
+
+gint
+g_atomic_int_exchange_and_add (volatile gint *atomic, 
+			       gint           val)
+{
+  int result;
+  int old, new;
+
+  do {
+    old = *atomic;
+    new = old + val;
+    result = _khelper_cmpxchg(old, new, atomic);
+  } while (result);
+  return old;
+}
+
+void
+g_atomic_int_add (volatile gint *atomic,
+		  gint           val)
+{
+  int result;
+  int old, new;
+
+  do {
+    old = *atomic;
+    new = old + val;
+    result = _khelper_cmpxchg(old, new, atomic);
+  } while (result);
+}
+
+gboolean
+g_atomic_int_compare_and_exchange (volatile gint *atomic, 
+				   gint           oldval, 
+				   gint           newval)
+{
+  int result;
+
+  result = _khelper_cmpxchg(oldval, newval, atomic);
+  return (result) ? FALSE : TRUE;
+}
+
+gboolean
+g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
+				       gpointer           oldval, 
+				       gpointer           newval)
+{
+  int result;
+
+  result = _khelper_cmpxchg(*((int *) &oldval),
+                            *((int *) &newval),
+                            (int *) atomic);
+  return (result) ? FALSE : TRUE;
+}
+# else /* !G_ATOMIC_ARM_LINUX */
 #  define DEFINE_WITH_MUTEXES
 # endif /* G_ATOMIC_IA64 */
 #else /* !__GNUC__ */
@@ -663,7 +943,7 @@
   g_mutex_unlock (g_atomic_mutex);
 }
 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */   
-#elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
+#elif (defined(G_ATOMIC_OP_MEMORY_BARRIER_NEEDED) && !defined(G_ATOMIC_ARM))
 gint
 g_atomic_int_get (volatile gint *atomic)
 {


More information about the maemo-commits mailing list