extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
+extern struct rw_semaphore *FASTCALL(rwsem_downgrade_write(struct rw_semaphore *sem));
/*
* the semaphore definition
: "memory", "cc", "edx");
}
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ __asm__ __volatile__(
+ "# beginning __downgrade_write\n\t"
+LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+ " js 2f\n\t" /* jump if the lock is being waited upon */
+ "1:\n\t"
+ LOCK_SECTION_START("")
+ "2:\n\t"
+ " pushl %%ecx\n\t"
+ " pushl %%edx\n\t"
+ " call rwsem_downgrade_wake\n\t"
+ " popl %%edx\n\t"
+ " popl %%ecx\n\t"
+ " jmp 1b\n"
+ LOCK_SECTION_END
+ "# ending __downgrade_write\n"
+ : "=m"(sem->count)
+ : "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
+ : "memory", "cc");
+}
+
/*
* implement atomic add functionality
*/
* - there must be someone on the queue
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having flags zeroised
+ * - writers are only woken if wakewrite is non-zero
*/
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
+static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
struct rwsem_waiter *waiter;
struct list_head *next;
rwsemtrace(sem,"Entering __rwsem_do_wake");
+ if (!wakewrite)
+ goto dont_wake_writers;
+
/* only wake someone up if we can transition the active part of the count from 0 -> 1 */
try_again:
oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
wake_up_process(waiter->task);
goto out;
+ /* don't want to wake any writers */
+ dont_wake_writers:
+ waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+ if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
+ goto out;
+
/* grant an infinite number of read locks to the readers at the front of the queue
* - note we increment the 'active part' of the count by the number of readers (less one
* for the activity decrement we've already done) before waking any processes up
* - it might even be this process, since the waker takes a more active part
*/
if (!(count & RWSEM_ACTIVE_MASK))
- sem = __rwsem_do_wake(sem);
+ sem = __rwsem_do_wake(sem,1);
spin_unlock(&sem->wait_lock);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem);
+ sem = __rwsem_do_wake(sem,1);
spin_unlock(&sem->wait_lock);
return sem;
}
+/*
+ * downgrade a write lock into a read lock
+ * - caller incremented waiting part of count, and discovered it to be still negative
+ * - just wake up any readers at the front of the queue
+ */
+struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+{
+ rwsemtrace(sem,"Entering rwsem_downgrade_wake");
+
+ spin_lock(&sem->wait_lock);
+
+ /* do nothing if list empty */
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem,0);
+
+ spin_unlock(&sem->wait_lock);
+
+ rwsemtrace(sem,"Leaving rwsem_downgrade_wake");
+ return sem;
+}
+
EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed);
EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed);
EXPORT_SYMBOL_NOVERS(rwsem_wake);