| /* |
| * rwsem.h: R/W semaphores implemented using CAS |
| * |
| * Written by David S. Miller (davem@redhat.com), 2001. |
| * Derived from asm-i386/rwsem.h |
| */ |
| #ifndef _SPARC64_RWSEM_H |
| #define _SPARC64_RWSEM_H |
| |
| #ifndef _LINUX_RWSEM_H |
| #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" |
| #endif |
| |
| #ifdef __KERNEL__ |
| |
| #define RWSEM_UNLOCKED_VALUE 0x00000000L |
| #define RWSEM_ACTIVE_BIAS 0x00000001L |
| #define RWSEM_ACTIVE_MASK 0xffffffffL |
| #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) |
| #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
| #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
| |
| /* |
| * lock for reading |
| */ |
| static inline void __down_read(struct rw_semaphore *sem) |
| { |
| if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L)) |
| rwsem_down_read_failed(sem); |
| } |
| |
| static inline int __down_read_trylock(struct rw_semaphore *sem) |
| { |
| long tmp; |
| |
| while ((tmp = sem->count) >= 0L) { |
| if (tmp == cmpxchg(&sem->count, tmp, |
| tmp + RWSEM_ACTIVE_READ_BIAS)) { |
| return 1; |
| } |
| } |
| return 0; |
| } |
| |
| /* |
| * lock for writing |
| */ |
| static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) |
| { |
| long tmp; |
| |
| tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS, |
| (atomic64_t *)(&sem->count)); |
| if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) |
| rwsem_down_write_failed(sem); |
| } |
| |
| static inline void __down_write(struct rw_semaphore *sem) |
| { |
| __down_write_nested(sem, 0); |
| } |
| |
| static inline int __down_write_trylock(struct rw_semaphore *sem) |
| { |
| long tmp; |
| |
| tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, |
| RWSEM_ACTIVE_WRITE_BIAS); |
| return tmp == RWSEM_UNLOCKED_VALUE; |
| } |
| |
| /* |
| * unlock after reading |
| */ |
| static inline void __up_read(struct rw_semaphore *sem) |
| { |
| long tmp; |
| |
| tmp = atomic64_dec_return((atomic64_t *)(&sem->count)); |
| if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L)) |
| rwsem_wake(sem); |
| } |
| |
| /* |
| * unlock after writing |
| */ |
| static inline void __up_write(struct rw_semaphore *sem) |
| { |
| if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS, |
| (atomic64_t *)(&sem->count)) < 0L)) |
| rwsem_wake(sem); |
| } |
| |
| /* |
| * implement atomic add functionality |
| */ |
| static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) |
| { |
| atomic64_add(delta, (atomic64_t *)(&sem->count)); |
| } |
| |
| /* |
| * downgrade write lock to read lock |
| */ |
| static inline void __downgrade_write(struct rw_semaphore *sem) |
| { |
| long tmp; |
| |
| tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count)); |
| if (tmp < 0L) |
| rwsem_downgrade_wake(sem); |
| } |
| |
| /* |
| * implement exchange and add functionality |
| */ |
| static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) |
| { |
| return atomic64_add_return(delta, (atomic64_t *)(&sem->count)); |
| } |
| |
| #endif /* __KERNEL__ */ |
| |
| #endif /* _SPARC64_RWSEM_H */ |