]> git.hungrycats.org Git - linux/commitdiff
[PATCH] read-write semaphore downgrade and trylock
authorDavid Howells <dhowells@redhat.com>
Thu, 25 Jul 2002 01:52:25 +0000 (18:52 -0700)
committerLinus Torvalds <torvalds@home.transmeta.com>
Thu, 25 Jul 2002 01:52:25 +0000 (18:52 -0700)
Here's a patch from Christoph Hellwig and myself to supply write->read
semaphore downgrade, and also from Brian Watson to supply trylock for rwsems.

include/asm-i386/rwsem.h
include/linux/rwsem.h
lib/rwsem.c

index 5d994a46f9d7c2c2e8023e7350240fc0c02a71e2..72f2ae078a3616d5206f65b040d3a63d2c9b413f 100644 (file)
@@ -46,6 +46,7 @@ struct rwsem_waiter;
 extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
 extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
 extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
+extern struct rw_semaphore *FASTCALL(rwsem_downgrade_write(struct rw_semaphore *sem));
 
 /*
  * the semaphore definition
@@ -195,6 +196,31 @@ LOCK_PREFIX        "  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 ->
                : "memory", "cc", "edx");
 }
 
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+       __asm__ __volatile__(
+               "# beginning __downgrade_write\n\t"
+LOCK_PREFIX    "  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+               "  js        2f\n\t" /* jump if the lock is being waited upon */
+               "1:\n\t"
+               LOCK_SECTION_START("")
+               "2:\n\t"
+               "  pushl     %%ecx\n\t"
+               "  pushl     %%edx\n\t"
+               "  call      rwsem_downgrade_wake\n\t"
+               "  popl      %%edx\n\t"
+               "  popl      %%ecx\n\t"
+               "  jmp       1b\n"
+               LOCK_SECTION_END
+               "# ending __downgrade_write\n"
+               : "=m"(sem->count)
+               : "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
+               : "memory", "cc");
+}
+
 /*
  * implement atomic add functionality
  */
index c74cc975cab6272f9405c2eee382082a86a93f99..320138d6643d9823d1fb16304e49fdeaa803ffe9 100644 (file)
@@ -75,6 +75,16 @@ static inline void up_write(struct rw_semaphore *sem)
        rwsemtrace(sem,"Leaving up_write");
 }
 
+/*
+ * downgrade write lock to read lock
+ */
+static inline void downgrade_write(struct rw_semaphore *sem)
+{
+       rwsemtrace(sem,"Entering downgrade_write");
+       __downgrade_write(sem);
+       rwsemtrace(sem,"Leaving downgrade_write");
+}
+
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_RWSEM_H */
index 1acf30ae566bd681dd32ad15cb2dffe7a7de2804..d0d93847c7854916c3720c77b472474186f62d11 100644 (file)
@@ -34,8 +34,9 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str)
  *   - there must be someone on the queue
  * - the spinlock must be held by the caller
  * - woken process blocks are discarded from the list after having flags zeroised
+ * - writers are only woken if wakewrite is non-zero
  */
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
+static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
 {
        struct rwsem_waiter *waiter;
        struct list_head *next;
@@ -44,6 +45,9 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
 
        rwsemtrace(sem,"Entering __rwsem_do_wake");
 
+       if (!wakewrite)
+               goto dont_wake_writers;
+
        /* only wake someone up if we can transition the active part of the count from 0 -> 1 */
  try_again:
        oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
@@ -64,6 +68,12 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
        wake_up_process(waiter->task);
        goto out;
 
+       /* don't want to wake any writers */
+ dont_wake_writers:
+       waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+       if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
+               goto out;
+
        /* grant an infinite number of read locks to the readers at the front of the queue
         * - note we increment the 'active part' of the count by the number of readers (less one
         *   for the activity decrement we've already done) before waking any processes up
@@ -132,7 +142,7 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore
         * - it might even be this process, since the waker takes a more active part
         */
        if (!(count & RWSEM_ACTIVE_MASK))
-               sem = __rwsem_do_wake(sem);
+               sem = __rwsem_do_wake(sem,1);
 
        spin_unlock(&sem->wait_lock);
 
@@ -193,7 +203,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
 
        /* do nothing if list empty */
        if (!list_empty(&sem->wait_list))
-               sem = __rwsem_do_wake(sem);
+               sem = __rwsem_do_wake(sem,1);
 
        spin_unlock(&sem->wait_lock);
 
@@ -202,6 +212,27 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
        return sem;
 }
 
+/*
+ * downgrade a write lock into a read lock
+ * - caller incremented waiting part of count, and discovered it to be still negative
+ * - just wake up any readers at the front of the queue
+ */
+struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+{
+       rwsemtrace(sem,"Entering rwsem_downgrade_wake");
+
+       spin_lock(&sem->wait_lock);
+
+       /* do nothing if list empty */
+       if (!list_empty(&sem->wait_list))
+               sem = __rwsem_do_wake(sem,0);
+
+       spin_unlock(&sem->wait_lock);
+
+       rwsemtrace(sem,"Leaving rwsem_downgrade_wake");
+       return sem;
+}
+
 EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed);
 EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed);
 EXPORT_SYMBOL_NOVERS(rwsem_wake);