@@ -376,8 +376,11 @@ found:
void *mem_alloc(struct mem_region *region, size_t size, size_t align,
const char *location)
{
- void *r = __mem_alloc(region, size, align, location);
+ void *r;
+ assert(lock_held_by_me(®ion->free_list_lock));
+
+ r = __mem_alloc(region, size, align, location);
if (r)
return r;
@@ -394,6 +397,8 @@ void mem_free(struct mem_region *region, void *mem, const char *location)
/* This should be a constant. */
assert(is_rodata(location));
+ assert(lock_held_by_me(®ion->free_list_lock));
+
/* Freeing NULL is always a noop. */
if (!mem)
return;
@@ -426,6 +431,8 @@ bool mem_resize(struct mem_region *region, void *mem, size_t len,
/* This should be a constant. */
assert(is_rodata(location));
+ assert(lock_held_by_me(®ion->free_list_lock));
+
/* Get header. */
hdr = mem - sizeof(*hdr);
if (hdr->free)
@@ -67,6 +67,11 @@ void unlock(struct lock *l)
l->lock_val = 0;
}
+bool lock_held_by_me(struct lock *l)
+{
+ return l->lock_val;
+}
+
#define TEST_HEAP_ORDER 27
#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
@@ -69,6 +69,11 @@ void unlock(struct lock *l)
l->lock_val = 0;
}
+bool lock_held_by_me(struct lock *l)
+{
+ return l->lock_val;
+}
+
static bool heap_empty(void)
{
const struct alloc_hdr *h = region_start(&skiboot_heap);
@@ -72,6 +72,11 @@ void unlock(struct lock *l)
l->lock_val--;
}
+bool lock_held_by_me(struct lock *l __attribute__((unused)))
+{
+ return true;
+}
+
#define TEST_HEAP_ORDER 12
#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
@@ -75,6 +75,11 @@ void unlock(struct lock *l)
l->lock_val = 0;
}
+bool lock_held_by_me(struct lock *l)
+{
+ return l->lock_val;
+}
+
/* We actually need a lot of room for the bitmaps! */
#define TEST_HEAP_ORDER 27
#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
@@ -70,6 +70,11 @@ void unlock(struct lock *l)
l->lock_val--;
}
+bool lock_held_by_me(struct lock *l)
+{
+ return l->lock_val;
+}
+
#define TEST_HEAP_ORDER 12
#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
@@ -130,7 +135,10 @@ int main(void)
assert(mem_check(other));
/* Allocate 1k from other region. */
+ lock(&other->free_list_lock);
mem_alloc(other, 1024, 1, "1k");
+ unlock(&other->free_list_lock);
+
mem_region_release_unused();
assert(mem_check(&skiboot_heap));
@@ -70,6 +70,11 @@ void unlock(struct lock *l)
l->lock_val--;
}
+bool lock_held_by_me(struct lock *l)
+{
+ return l->lock_val;
+}
+
#define TEST_HEAP_ORDER 12
#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
This change adds asserts to the mem_region calls that should have the per-region lock held. To keep the tests working, they need the lock_held_by_me() function. The run-mem_region.c test has a bogus implementation of this, as it doesn't do any locking at the moment. This will be addressed in a later change. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> --- core/mem_region.c | 9 ++++++++- core/test/run-malloc-speed.c | 5 +++++ core/test/run-malloc.c | 5 +++++ core/test/run-mem_region.c | 5 +++++ core/test/run-mem_region_init.c | 5 +++++ core/test/run-mem_region_release_unused.c | 8 ++++++++ core/test/run-mem_region_release_unused_noalloc.c | 5 +++++ 7 files changed, 41 insertions(+), 1 deletion(-)