@@ -67,4 +67,39 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu);
void lg_global_lock(struct lglock *lg);
void lg_global_unlock(struct lglock *lg);
+struct lgrwlock {
+ unsigned long __percpu *reader_refcnt;
+ struct lglock lglock;
+ rwlock_t fallback_rwlock;
+};
+
+#define __DEFINE_LGRWLOCK_PERCPU_DATA(name) \
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
+ static DEFINE_PER_CPU(unsigned long, name ## _refcnt);
+
+#define __LGRWLOCK_INIT(name) \
+ { \
+ .reader_refcnt = &name ## _refcnt, \
+ .lglock = { .lock = &name ## _lock }, \
+ .fallback_rwlock = __RW_LOCK_UNLOCKED(name.fallback_rwlock)\
+ }
+
+#define DEFINE_LGRWLOCK(name) \
+ __DEFINE_LGRWLOCK_PERCPU_DATA(name) \
+ struct lgrwlock name = __LGRWLOCK_INIT(name)
+
+#define DEFINE_STATIC_LGRWLOCK(name) \
+ __DEFINE_LGRWLOCK_PERCPU_DATA(name) \
+ static struct lgrwlock name = __LGRWLOCK_INIT(name)
+
+static inline void lg_rwlock_init(struct lgrwlock *lgrw, char *name)
+{
+ lg_lock_init(&lgrw->lglock, name);
+}
+
+void lg_rwlock_local_read_lock(struct lgrwlock *lgrw);
+void lg_rwlock_local_read_unlock(struct lgrwlock *lgrw);
+void lg_rwlock_global_write_lock(struct lgrwlock *lgrw);
+void lg_rwlock_global_write_unlock(struct lgrwlock *lgrw);
#endif
@@ -87,3 +87,55 @@ void lg_global_unlock(struct lglock *lg)
preempt_enable();
}
EXPORT_SYMBOL(lg_global_unlock);
+
+#define FALLBACK_BASE (1UL << 30)
+
+void lg_rwlock_local_read_lock(struct lgrwlock *lgrw)
+{
+ struct lglock *lg = &lgrw->lglock;
+
+ preempt_disable();
+ rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ if (likely(!__this_cpu_read(*lgrw->reader_refcnt))) {
+ if (!arch_spin_trylock(this_cpu_ptr(lg->lock))) {
+ read_lock(&lgrw->fallback_rwlock);
+ __this_cpu_add(*lgrw->reader_refcnt, FALLBACK_BASE);
+ }
+ }
+
+ __this_cpu_inc(*lgrw->reader_refcnt);
+}
+EXPORT_SYMBOL(lg_rwlock_local_read_lock);
+
+void lg_rwlock_local_read_unlock(struct lgrwlock *lgrw)
+{
+ switch (__this_cpu_dec_return(*lgrw->reader_refcnt)) {
+ case 0:
+ lg_local_unlock(&lgrw->lglock);
+ return;
+ case FALLBACK_BASE:
+ __this_cpu_sub(*lgrw->reader_refcnt, FALLBACK_BASE);
+ read_unlock(&lgrw->fallback_rwlock);
+ break;
+ default:
+ break;
+ }
+
+ rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ preempt_enable();
+}
+EXPORT_SYMBOL(lg_rwlock_local_read_unlock);
+
+void lg_rwlock_global_write_lock(struct lgrwlock *lgrw)
+{
+ lg_global_lock(&lgrw->lglock);
+ write_lock(&lgrw->fallback_rwlock);
+}
+EXPORT_SYMBOL(lg_rwlock_global_write_lock);
+
+void lg_rwlock_global_write_unlock(struct lgrwlock *lgrw)
+{
+ write_unlock(&lgrw->fallback_rwlock);
+ lg_global_unlock(&lgrw->lglock);
+}
+EXPORT_SYMBOL(lg_rwlock_global_write_unlock);