diff mbox

[v2,1/2] refcnt: introduce Qref to abstract the refcnt interface

Message ID 1373949390-3642-1-git-send-email-pingfank@linux.vnet.ibm.com
State New
Headers show

Commit Message

pingfan liu July 16, 2013, 4:36 a.m. UTC
Qref is similar to kref. It hides the refcnt detail and provides
a common interface. And this patch is based on the idiom of refcnt,
and adopts some optimization about memory model, which finally
falls back on gcc implementation.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
v2:
    put Qref to a dedicated file.
    rename qref_get/_put as qref_inc/_dec
---
 include/qemu/refcnt.h | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 57 insertions(+)
 create mode 100644 include/qemu/refcnt.h
diff mbox

Patch

diff --git a/include/qemu/refcnt.h b/include/qemu/refcnt.h
new file mode 100644
index 0000000..815534a
--- /dev/null
+++ b/include/qemu/refcnt.h
@@ -0,0 +1,57 @@ 
+/*
+ * refcount management routine
+ *
+ * Copyright IBM, Corp. 2013
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_REFCNT_H
+#define QEMU_REFCNT_H
+
+#include "qemu/osdep.h"
+
+/*
+ * Qref defines the interface for thread-safe refcount management.
+ * As to atomic ops, it falls back on gcc implementation, and do some
+ * optimization if gcc supports C11 relaxed memory model, otherwise
+ * just using seq_cst memory model.
+ */
+typedef struct Qref {
+    int32_t count;
+} Qref;
+
+static inline void qref_inc(Qref *qref)
+{
+#ifdef __ATOMIC_RELAXED
+    __atomic_fetch_add(&qref->count, 1, __ATOMIC_RELAXED);
+#else
+    __sync_fetch_and_add(&qref->count, 1);
+#endif
+}
+
+typedef void (*ReleaseFn)(Qref *);
+
+/* When refcnt comes to zero, @release will be called */
+static inline void qref_dec(Qref *qref, ReleaseFn release)
+{
+    int32_t i;
+
+#ifdef __ATOMIC_RELAXED
+    i = __atomic_fetch_add(&qref->count, -1, __ATOMIC_RELAXED);
+    g_assert(i > 0);
+    if (unlikely(i == 1)) {
+        __atomic_thread_fence(__ATOMIC_SEQ_CST);
+        release(qref);
+    }
+#else
+    i = __sync_fetch_and_add(&qref->count, -1);
+    g_assert(i > 0);
+    if (unlikely(i == 1)) {
+        release(qref);
+    }
+#endif
+}
+#endif