]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - lib/dec_and_lock.c
Merge Paulus' tree
[karo-tx-linux.git] / lib / dec_and_lock.c
index 2377af057d099ebd628bd9f7039cd4f54c9bf229..305a9663aee39aba26532082319433e0a4e9ae33 100644 (file)
@@ -1,7 +1,41 @@
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <asm/atomic.h>
+#include <asm/system.h>
 
+#ifdef __HAVE_ARCH_CMPXCHG
+/*
+ * This is an implementation of the notion of "decrement a
+ * reference count, and return locked if it decremented to zero".
+ *
+ * This implementation can be used on any architecture that
+ * has a cmpxchg, and where atomic->value is an int holding
+ * the value of the atomic (i.e. the high bits aren't used
+ * for a lock or anything like that).
+ */
+int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
+{
+       int counter;
+       int newcount;
+
+       for (;;) {
+               counter = atomic_read(atomic);
+               newcount = counter - 1;
+               if (!newcount)
+                       break;          /* do it the slow way */
+
+               newcount = cmpxchg(&atomic->counter, counter, newcount);
+               if (newcount == counter)
+                       return 0;
+       }
+
+       spin_lock(lock);
+       if (atomic_dec_and_test(atomic))
+               return 1;
+       spin_unlock(lock);
+       return 0;
+}
+#else
 /*
  * This is an architecture-neutral, but slow,
  * implementation of the notion of "decrement
@@ -33,5 +67,6 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
        spin_unlock(lock);
        return 0;
 }
+#endif
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);