@@ -24,18 +24,14 @@ struct QemuLockable {
QemuLockUnlockFunc *unlock;
};
-/* This function gives an error if an invalid, non-NULL pointer type is passed
- * to QEMU_MAKE_LOCKABLE. For optimized builds, we can rely on dead-code elimination
- * from the compiler, and give the errors already at link time.
+/*
+ * If unknown_lock_type() is referenced, it means we have tried to passed something
+ * not recognized as lockable to the macros below. Use QLNULL to intentionally pass
+ * a null lockable. Using NULL will cause (unused) references to unknown_lock_type()
+ * which may or may not be eliminated by optimization.
*/
-#if defined(__OPTIMIZE__) && !defined(__SANITIZE_ADDRESS__)
+#define QLNULL ((QemuLockable *)NULL)
void unknown_lock_type(void *);
-#else
-static inline void unknown_lock_type(void *unused)
-{
- abort();
-}
-#endif
static inline __attribute__((__always_inline__)) QemuLockable *
qemu_make_lockable(void *x, QemuLockable *lockable)
@@ -46,7 +42,7 @@ qemu_make_lockable(void *x, QemuLockable *lockable)
return x ? lockable : NULL;
}
-/* Auxiliary macros to simplify QEMU_MAKE_LOCABLE. */
+/* Auxiliary macros to simplify QEMU_MAKE_LOCKABLE. */
#define QEMU_LOCK_FUNC(x) ((QemuLockUnlockFunc *) \
QEMU_GENERIC(x, \
(QemuMutex *, qemu_mutex_lock), \
@@ -79,7 +75,7 @@ qemu_make_lockable(void *x, QemuLockable *lockable)
*
* Returns a QemuLockable object that can be passed around
* to a function that can operate with locks of any kind, or
- * NULL if @x is %NULL.
+ * NULL if @x is %QLNULL.
*/
#define QEMU_MAKE_LOCKABLE(x) \
QEMU_GENERIC(x, \
@@ -1174,7 +1174,7 @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
if (blk->quiesce_counter && !blk->disable_request_queuing) {
blk_dec_in_flight(blk);
- qemu_co_queue_wait(&blk->queued_requests, NULL);
+ qemu_co_queue_wait(&blk->queued_requests, QLNULL);
blk_inc_in_flight(blk);
}
}
@@ -2367,7 +2367,7 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
if (blk->dev_ops && blk->dev_ops->drained_end) {
blk->dev_ops->drained_end(blk->dev_opaque);
}
- while (qemu_co_enter_next(&blk->queued_requests, NULL)) {
+ while (qemu_co_enter_next(&blk->queued_requests, QLNULL)) {
/* Resume all queued requests */
}
}
@@ -120,7 +120,7 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
return false;
}
- qemu_co_queue_wait(&task->wait_queue, NULL);
+ qemu_co_queue_wait(&task->wait_queue, QLNULL);
return true;
}
@@ -157,7 +157,7 @@ static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
if (ranges_overlap(self_start_chunk, self_nb_chunks,
op_start_chunk, op_nb_chunks))
{
- qemu_co_queue_wait(&op->waiting_requests, NULL);
+ qemu_co_queue_wait(&op->waiting_requests, QLNULL);
break;
}
}
@@ -297,7 +297,7 @@ mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
if (!op->is_pseudo_op && op->is_in_flight &&
op->is_active_write == active)
{
- qemu_co_queue_wait(&op->waiting_requests, NULL);
+ qemu_co_queue_wait(&op->waiting_requests, QLNULL);
return;
}
}
@@ -22,13 +22,13 @@
static void fsdev_throttle_read_timer_cb(void *opaque)
{
FsThrottle *fst = opaque;
- qemu_co_enter_next(&fst->throttled_reqs[false], NULL);
+ qemu_co_enter_next(&fst->throttled_reqs[false], QLNULL);
}
static void fsdev_throttle_write_timer_cb(void *opaque)
{
FsThrottle *fst = opaque;
- qemu_co_enter_next(&fst->throttled_reqs[true], NULL);
+ qemu_co_enter_next(&fst->throttled_reqs[true], QLNULL);
}
int fsdev_throttle_parse_opts(QemuOpts *opts, FsThrottle *fst, Error **errp)
@@ -100,7 +100,7 @@ void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst, bool is_write,
if (throttle_enabled(&fst->cfg)) {
if (throttle_schedule_timer(&fst->ts, &fst->tt, is_write) ||
!qemu_co_queue_empty(&fst->throttled_reqs[is_write])) {
- qemu_co_queue_wait(&fst->throttled_reqs[is_write], NULL);
+ qemu_co_queue_wait(&fst->throttled_reqs[is_write], QLNULL);
}
throttle_account(&fst->ts, is_write, iov_size(iov, iovcnt));
@@ -2888,7 +2888,7 @@ static void coroutine_fn v9fs_flush(void *opaque)
/*
* Wait for pdu to complete.
*/
- qemu_co_queue_wait(&cancel_pdu->complete, NULL);
+ qemu_co_queue_wait(&cancel_pdu->complete, QLNULL);
if (!qemu_co_queue_next(&cancel_pdu->complete)) {
cancel_pdu->cancelled = 0;
pdu_free(cancel_pdu);
@@ -64,7 +64,7 @@ void coroutine_fn co_get_from_shres(SharedResource *s, uint64_t n)
{
assert(n <= s->total);
while (!co_try_get_from_shres(s, n)) {
- qemu_co_queue_wait(&s->queue, NULL);
+ qemu_co_queue_wait(&s->queue, QLNULL);
}
}
Allows us to build with -Og and optimizations that do not clean up dead-code. If we use QLNULL for null lockables, we can always use referencing unknown_lock_type as a link time error indicator. Signed-off-by: Joe Slater <joe.slater@windriver.com> --- include/qemu/lockable.h | 20 ++++++++------------ block/block-backend.c | 4 ++-- block/block-copy.c | 2 +- block/mirror.c | 4 ++-- fsdev/qemu-fsdev-throttle.c | 6 +++--- hw/9pfs/9p.c | 2 +- util/qemu-co-shared-resource.c | 2 +- 7 files changed, 18 insertions(+), 22 deletions(-)