From patchwork Thu Sep 18 03:08:19 2008 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yuji Mano X-Patchwork-Id: 485 X-Patchwork-Delegate: yuji.mano@am.sony.com Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from ozlabs.org (localhost [127.0.0.1]) by ozlabs.org (Postfix) with ESMTP id 60D20DDFEC for ; Thu, 18 Sep 2008 13:09:22 +1000 (EST) X-Original-To: cbe-oss-dev@ozlabs.org Delivered-To: cbe-oss-dev@ozlabs.org Received: from IE1EHSOBE002.bigfish.com (outbound-dub.frontbridge.com [213.199.154.16]) by ozlabs.org (Postfix) with ESMTP id 87A04DDF14 for ; Thu, 18 Sep 2008 13:09:09 +1000 (EST) Received: from mail156-dub-R.bigfish.com (10.5.252.3) by IE1EHSOBE002.bigfish.com (10.5.252.22) with Microsoft SMTP Server id 8.1.291.1; Thu, 18 Sep 2008 03:09:04 +0000 Received: from mail156-dub (localhost.localdomain [127.0.0.1]) by mail156-dub-R.bigfish.com (Postfix) with ESMTP id B02AB1258263 for ; Thu, 18 Sep 2008 03:09:03 +0000 (UTC) X-BigFish: VS4(zzzz10c0j10d3izz1497iz2dh6bh61h) X-Spam-TCS-SCL: 0:0 Received: by mail156-dub (MessageSwitch) id 1221707335557904_28217; Thu, 18 Sep 2008 03:08:55 +0000 (UCT) Received: from mail8.fw-bc.sony.com (mail8.fw-bc.sony.com [160.33.98.75]) by mail156-dub.bigfish.com (Postfix) with ESMTP id BB808F80065 for ; Thu, 18 Sep 2008 03:08:21 +0000 (UTC) Received: from mail3.sjc.in.sel.sony.com (mail3.sjc.in.sel.sony.com [43.134.1.211]) by mail8.fw-bc.sony.com (8.14.2/8.14.2) with ESMTP id m8I38KCB023034 for ; Thu, 18 Sep 2008 03:08:20 GMT Received: from USSDIXIM02.am.sony.com (ussdixim02.am.sony.com [43.130.140.34]) by mail3.sjc.in.sel.sony.com (8.12.11/8.12.11) with ESMTP id m8I38KT1005062 for ; Thu, 18 Sep 2008 03:08:20 GMT Received: from ussdixms03.am.sony.com ([43.130.140.23]) by USSDIXIM02.am.sony.com with Microsoft SMTPSVC(5.0.2195.6713); Wed, 17 Sep 2008 20:08:20 -0700 Received: from [43.135.148.175] ([43.135.148.175]) by ussdixms03.am.sony.com with Microsoft SMTPSVC(5.0.2195.6713); Wed, 17 Sep 2008 20:08:19 -0700 Message-ID: <48D1C623.5000807@am.sony.com> Date: Wed, 17 Sep 2008 20:08:19 -0700 From: Yuji Mano User-Agent: Thunderbird 2.0.0.5 (X11/20070719) MIME-Version: 1.0 To: CBE Development References: <48C972C3.2060906@am.sony.com> In-Reply-To: <48C972C3.2060906@am.sony.com> X-Enigmail-Version: 0.95.7 X-OriginalArrivalTime: 18 Sep 2008 03:08:19.0599 (UTC) FILETIME=[CD5D79F0:01C9193B] X-SEL-encryption-scan: scanned Subject: [Cbe-oss-dev] [PATCH 02/11 v3]MARS: Workload queue block replace bit fields X-BeenThere: cbe-oss-dev@ozlabs.org X-Mailman-Version: 2.1.11 Precedence: list List-Id: Discussion about Open Source Software for the Cell Broadband Engine List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: cbe-oss-dev-bounces+patchwork-incoming=ozlabs.org@ozlabs.org Errors-To: cbe-oss-dev-bounces+patchwork-incoming=ozlabs.org@ozlabs.org This replaces the bit fields usage for the workload queue block bits with explicit bitwise shift/mask operations for better portability. Signed-off-by: Yuji Mano Acked-by: Kazunori Asayama --- a/include/common/mars/mars_workload_types.h +++ b/include/common/mars/mars_workload_types.h @@ -76,7 +76,39 @@ extern "C" { #define MARS_WORKLOAD_QUEUE_ALIGN 128 /* align to 128 bytes */ #define MARS_WORKLOAD_QUEUE_HEADER_ALIGN 128 /* align to 128 bytes */ #define MARS_WORKLOAD_QUEUE_BLOCK_ALIGN 128 /* align to 128 bytes */ -#define MARS_WORKLOAD_QUEUE_BLOCK_BITS_ALIGN 8 /* align to 8 bytes */ + +/* + * MARS workload queue block bits + * ---------------------------------------------------------------------------- + * |[63...60]|[59...56]|[55....48]|[ 47 ]|[46....32]|[31.....16]|[15......0]| + * ---------------------------------------------------------------------------- + * | 4-bits | 4-bits | 8-bits | 1-bit | 15-bits | 16-bits | 16-bits | + * ---------------------------------------------------------------------------- + * | TYPE | STATE | PRIORITY | SIGNAL | RESERVED | WAIT_ID | COUNTER | + * ---------------------------------------------------------------------------- + */ +#define MARS_BITS_SIZE 64 + +#define MARS_BITS_SHIFT_TYPE 60 +#define MARS_BITS_SHIFT_STATE 56 +#define MARS_BITS_SHIFT_PRIORITY 48 +#define MARS_BITS_SHIFT_SIGNAL 47 +#define MARS_BITS_SHIFT_WAIT_ID 16 +#define MARS_BITS_SHIFT_COUNTER 0 + +#define MARS_BITS_MASK_TYPE 0xf000000000000000ULL +#define MARS_BITS_MASK_STATE 0x0f00000000000000ULL +#define MARS_BITS_MASK_PRIORITY 0x00ff000000000000ULL +#define MARS_BITS_MASK_SIGNAL 0x0000800000000000ULL +#define MARS_BITS_MASK_WAIT_ID 0x00000000ffff0000ULL +#define MARS_BITS_MASK_COUNTER 0x000000000000ffffULL + +#define MARS_BITS_GET(bits, name) \ + ((*(bits) & MARS_BITS_MASK_##name) >> MARS_BITS_SHIFT_##name) + +#define MARS_BITS_SET(bits, name, val) \ + (*bits) = ((*(bits) & ~MARS_BITS_MASK_##name) | \ + ((uint64_t)(val) << MARS_BITS_SHIFT_##name)) /* mars workload context */ struct mars_workload_context { @@ -90,21 +122,11 @@ struct mars_workload_queue_header { uint8_t flag; } __attribute__((aligned(MARS_WORKLOAD_QUEUE_HEADER_ALIGN))); -/* 8 byte workload queue block bits structure */ -struct mars_workload_queue_block_bits { - uint64_t type:4; - uint64_t state:4; - uint64_t priority:8; - uint64_t signal:1; - uint64_t wait:16; - uint64_t counter:16; -} __attribute__((aligned(MARS_WORKLOAD_QUEUE_BLOCK_BITS_ALIGN))); - /* 128 byte workload queue block structure */ struct mars_workload_queue_block { uint32_t lock; uint32_t pad; - struct mars_workload_queue_block_bits bits[MARS_WORKLOAD_PER_BLOCK]; + uint64_t bits[MARS_WORKLOAD_PER_BLOCK]; } __attribute__((aligned(MARS_WORKLOAD_QUEUE_BLOCK_ALIGN))); /* mars workload queue structure */ --- a/src/host/lib/mars_workload_queue.c +++ b/src/host/lib/mars_workload_queue.c @@ -63,12 +63,20 @@ int workload_queue_initialize(struct mar mars_mutex_lock((struct mars_mutex *)p); for (index = 0; index < MARS_WORKLOAD_PER_BLOCK; index++) { - p->bits[index].type = MARS_WORKLOAD_TYPE_NONE; - p->bits[index].state = MARS_WORKLOAD_STATE_NONE; - p->bits[index].priority = MARS_WORKLOAD_PRIORITY_MIN; - p->bits[index].counter = MARS_WORKLOAD_COUNTER_MIN; - p->bits[index].signal = MARS_WORKLOAD_SIGNAL_OFF; - p->bits[index].wait = MARS_WORKLOAD_ID_NONE; + uint64_t *bits = &p->bits[index]; + + MARS_BITS_SET(bits, TYPE, + MARS_WORKLOAD_TYPE_NONE); + MARS_BITS_SET(bits, STATE, + MARS_WORKLOAD_STATE_NONE); + MARS_BITS_SET(bits, PRIORITY, + MARS_WORKLOAD_PRIORITY_MIN); + MARS_BITS_SET(bits, COUNTER, + MARS_WORKLOAD_COUNTER_MIN); + MARS_BITS_SET(bits, SIGNAL, + MARS_WORKLOAD_SIGNAL_OFF); + MARS_BITS_SET(bits, WAIT_ID, + MARS_WORKLOAD_ID_NONE); } mars_mutex_unlock((struct mars_mutex *)p); @@ -91,7 +99,7 @@ int workload_queue_finalize(struct mars_ block = id / MARS_WORKLOAD_PER_BLOCK; index = id % MARS_WORKLOAD_PER_BLOCK; - if (queue->block[block].bits[index].state != + if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) != MARS_WORKLOAD_STATE_NONE) break; @@ -117,12 +125,13 @@ int workload_queue_add_begin(struct mars int block = 0; int index = 0; + uint64_t *bits = &queue->block[block].bits[index]; + *id = 0; mars_mutex_lock((struct mars_mutex *)&queue->block[block]); - while (queue->block[block].bits[index].state != - MARS_WORKLOAD_STATE_NONE) { + while (MARS_BITS_GET(bits, STATE) != MARS_WORKLOAD_STATE_NONE) { (*id)++; index++; if (index == MARS_WORKLOAD_PER_BLOCK) { @@ -137,16 +146,17 @@ int workload_queue_add_begin(struct mars mars_mutex_lock( (struct mars_mutex *)&queue->block[block]); } + bits = &queue->block[block].bits[index]; } - MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state == - MARS_WORKLOAD_STATE_NONE, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(bits, STATE) == MARS_WORKLOAD_STATE_NONE, mars_mutex_unlock((struct mars_mutex *)&queue->block[block]), MARS_ERROR_STATE); /* set type and set state to adding */ - queue->block[block].bits[index].type = type; - queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_ADDING; + MARS_BITS_SET(bits, TYPE, type); + MARS_BITS_SET(bits, STATE, MARS_WORKLOAD_STATE_ADDING); mars_mutex_unlock((struct mars_mutex *)&queue->block[block]); @@ -165,15 +175,17 @@ int workload_queue_add_end(struct mars_w int block = id / MARS_WORKLOAD_PER_BLOCK; int index = id % MARS_WORKLOAD_PER_BLOCK; + uint64_t *bits = &queue->block[block].bits[index]; + mars_mutex_lock((struct mars_mutex *)&queue->block[block]); - MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state == - MARS_WORKLOAD_STATE_ADDING, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(bits, STATE) == MARS_WORKLOAD_STATE_ADDING, mars_mutex_unlock((struct mars_mutex *)&queue->block[block]), MARS_ERROR_STATE); /* reset workload queue bits and set state to finished state */ - queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_FINISHED; + MARS_BITS_SET(bits, STATE, MARS_WORKLOAD_STATE_FINISHED); mars_mutex_unlock((struct mars_mutex *)&queue->block[block]); @@ -190,13 +202,15 @@ int workload_queue_add_cancel(struct mar mars_mutex_lock((struct mars_mutex *)&queue->block[block]); - MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state == - MARS_WORKLOAD_STATE_ADDING, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(&queue->block[block].bits[index], STATE) == + MARS_WORKLOAD_STATE_ADDING, mars_mutex_unlock((struct mars_mutex *)&queue->block[block]), MARS_ERROR_STATE); /* set state back to none state */ - queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_NONE; + MARS_BITS_SET(&queue->block[block].bits[index], STATE, + MARS_WORKLOAD_STATE_NONE); mars_mutex_unlock((struct mars_mutex *)&queue->block[block]); @@ -214,13 +228,15 @@ int workload_queue_remove_begin(struct m mars_mutex_lock((struct mars_mutex *)&queue->block[block]); - MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state == - MARS_WORKLOAD_STATE_FINISHED, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(&queue->block[block].bits[index], STATE) == + MARS_WORKLOAD_STATE_FINISHED, mars_mutex_unlock((struct mars_mutex *)&queue->block[block]), MARS_ERROR_STATE); /* set state to removing */ - queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_REMOVING; + MARS_BITS_SET(&queue->block[block].bits[index], STATE, + MARS_WORKLOAD_STATE_REMOVING); mars_mutex_unlock((struct mars_mutex *)&queue->block[block]); @@ -241,13 +257,15 @@ int workload_queue_remove_end(struct mar mars_mutex_lock((struct mars_mutex *)&queue->block[block]); - MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state == - MARS_WORKLOAD_STATE_REMOVING, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(&queue->block[block].bits[index], STATE) == + MARS_WORKLOAD_STATE_REMOVING, mars_mutex_unlock((struct mars_mutex *)&queue->block[block]), MARS_ERROR_STATE); /* set state to none */ - queue->block[block].bits[index].type = MARS_WORKLOAD_TYPE_NONE; + MARS_BITS_SET(&queue->block[block].bits[index], TYPE, + MARS_WORKLOAD_TYPE_NONE); mars_mutex_unlock((struct mars_mutex *)&queue->block[block]); @@ -264,13 +282,15 @@ int workload_queue_remove_cancel(struct mars_mutex_lock((struct mars_mutex *)&queue->block[block]); - MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state == - MARS_WORKLOAD_STATE_REMOVING, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(&queue->block[block].bits[index], STATE) == + MARS_WORKLOAD_STATE_REMOVING, mars_mutex_unlock((struct mars_mutex *)&queue->block[block]), MARS_ERROR_STATE); /* set state back to finished */ - queue->block[block].bits[index].type = MARS_WORKLOAD_STATE_FINISHED; + MARS_BITS_SET(&queue->block[block].bits[index], TYPE, + MARS_WORKLOAD_STATE_FINISHED); mars_mutex_unlock((struct mars_mutex *)&queue->block[block]); @@ -289,17 +309,23 @@ int workload_queue_schedule_begin(struct mars_mutex_lock((struct mars_mutex *)&queue->block[block]); - MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state == - MARS_WORKLOAD_STATE_FINISHED, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(&queue->block[block].bits[index], STATE) == + MARS_WORKLOAD_STATE_FINISHED, mars_mutex_unlock((struct mars_mutex *)&queue->block[block]), MARS_ERROR_STATE); /* reset workload queue bits and set state to scheduling */ - queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_SCHEDULING; - queue->block[block].bits[index].priority = priority; - queue->block[block].bits[index].counter = MARS_WORKLOAD_COUNTER_MIN; - queue->block[block].bits[index].signal = MARS_WORKLOAD_SIGNAL_OFF; - queue->block[block].bits[index].wait = MARS_WORKLOAD_ID_NONE; + MARS_BITS_SET(&queue->block[block].bits[index], STATE, + MARS_WORKLOAD_STATE_SCHEDULING); + MARS_BITS_SET(&queue->block[block].bits[index], PRIORITY, + priority); + MARS_BITS_SET(&queue->block[block].bits[index], COUNTER, + MARS_WORKLOAD_COUNTER_MIN); + MARS_BITS_SET(&queue->block[block].bits[index], SIGNAL, + MARS_WORKLOAD_SIGNAL_OFF); + MARS_BITS_SET(&queue->block[block].bits[index], WAIT_ID, + MARS_WORKLOAD_ID_NONE); mars_mutex_unlock((struct mars_mutex *)&queue->block[block]); @@ -320,13 +346,15 @@ int workload_queue_schedule_end(struct m mars_mutex_lock((struct mars_mutex *)&queue->block[block]); - MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state == - MARS_WORKLOAD_STATE_SCHEDULING, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(&queue->block[block].bits[index], STATE) == + MARS_WORKLOAD_STATE_SCHEDULING, mars_mutex_unlock((struct mars_mutex *)&queue->block[block]), MARS_ERROR_STATE); /* set state to ready */ - queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_READY; + MARS_BITS_SET(&queue->block[block].bits[index], STATE, + MARS_WORKLOAD_STATE_READY); mars_mutex_unlock((struct mars_mutex *)&queue->block[block]); @@ -344,13 +372,15 @@ int workload_queue_schedule_cancel(struc mars_mutex_lock((struct mars_mutex *)&queue->block[block]); - MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state == - MARS_WORKLOAD_STATE_SCHEDULING, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(&queue->block[block].bits[index], STATE) == + MARS_WORKLOAD_STATE_SCHEDULING, mars_mutex_unlock((struct mars_mutex *)&queue->block[block]), MARS_ERROR_STATE); /* set state back to finished */ - queue->block[block].bits[index].state = MARS_WORKLOAD_STATE_FINISHED; + MARS_BITS_SET(&queue->block[block].bits[index], STATE, + MARS_WORKLOAD_STATE_FINISHED); mars_mutex_unlock((struct mars_mutex *)&queue->block[block]); @@ -366,10 +396,11 @@ int workload_queue_wait(struct mars_work int block = id / MARS_WORKLOAD_PER_BLOCK; int index = id % MARS_WORKLOAD_PER_BLOCK; - while (queue->block[block].bits[index].state != + while (MARS_BITS_GET(&queue->block[block].bits[index], STATE) != MARS_WORKLOAD_STATE_FINISHED) { - MARS_CHECK_RET(queue->block[block].bits[index].state != - MARS_WORKLOAD_STATE_NONE, + MARS_CHECK_RET( + MARS_BITS_GET(&queue->block[block].bits[index], STATE) + != MARS_WORKLOAD_STATE_NONE, MARS_ERROR_STATE); sched_yield(); } @@ -390,11 +421,12 @@ int workload_queue_try_wait(struct mars_ int block = id / MARS_WORKLOAD_PER_BLOCK; int index = id % MARS_WORKLOAD_PER_BLOCK; - MARS_CHECK_RET(queue->block[block].bits[index].state != - MARS_WORKLOAD_STATE_NONE, + MARS_CHECK_RET( + MARS_BITS_GET(&queue->block[block].bits[index], STATE) != + MARS_WORKLOAD_STATE_NONE, MARS_ERROR_STATE); - if (queue->block[block].bits[index].state != + if (MARS_BITS_GET(&queue->block[block].bits[index], STATE) != MARS_WORKLOAD_STATE_FINISHED) return MARS_ERROR_BUSY; @@ -415,12 +447,14 @@ int workload_queue_signal_send(struct ma mars_mutex_lock((struct mars_mutex *)&queue->block[block]); - MARS_CHECK_CLEANUP_RET(queue->block[block].bits[index].state != - MARS_WORKLOAD_STATE_NONE, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(&queue->block[block].bits[index], STATE) != + MARS_WORKLOAD_STATE_NONE, mars_mutex_unlock((struct mars_mutex *)&queue->block[block]), MARS_ERROR_STATE); - queue->block[block].bits[index].signal = MARS_WORKLOAD_SIGNAL_ON; + MARS_BITS_SET(&queue->block[block].bits[index], SIGNAL, + MARS_WORKLOAD_SIGNAL_ON); mars_mutex_unlock((struct mars_mutex *)&queue->block[block]); --- a/src/mpu/kernel/mars_kernel_scheduler.c +++ b/src/mpu/kernel/mars_kernel_scheduler.c @@ -65,42 +65,42 @@ static int search_block(int block) { int i; int index = -1; - int max_count = -1; - int max_priority = -1; + uint16_t max_counter = 0; + uint16_t max_priority = 0; /* search through workload queue for next workload to run * while incrementing wait counter for all waiting workloads * and pick the workload that has been waiting the longest */ for (i = 0; i < MARS_WORKLOAD_PER_BLOCK; i++) { - struct mars_workload_queue_block_bits *bits - = &queue_block.bits[i]; + uint64_t *bits = &queue_block.bits[i]; + uint8_t signal = MARS_BITS_GET(bits, SIGNAL); + uint8_t priority = MARS_BITS_GET(bits, PRIORITY); + uint16_t wait_id = MARS_BITS_GET(bits, WAIT_ID); + uint16_t counter = MARS_BITS_GET(bits, COUNTER); - switch (bits->state) { + switch (MARS_BITS_GET(bits, STATE)) { case MARS_WORKLOAD_STATE_READY: - /* priority greater than max priority so select */ - if ((int)bits->priority > max_priority) { + /* compare priority and counter with previous ones */ + if (index < 0 || priority > max_priority || + (priority == max_priority && counter > max_counter)) { index = i; - max_count = bits->counter; - max_priority = bits->priority; - /* priority equal and wait counter greater so select */ - } else if ((int)bits->priority == max_priority && - (int)bits->counter > max_count) { - index = i; - max_count = bits->counter; + max_counter = counter; + max_priority = priority; } /* increment wait counter without overflowing */ - if (bits->counter < MARS_WORKLOAD_COUNTER_MAX) - bits->counter++; + if (counter < MARS_WORKLOAD_COUNTER_MAX) + MARS_BITS_SET(bits, COUNTER, counter + 1); break; case MARS_WORKLOAD_STATE_WAITING: /* waiting for workload to finish so check status */ - if (bits->wait != MARS_WORKLOAD_ID_NONE) { + if (wait_id != MARS_WORKLOAD_ID_NONE) { struct mars_workload_queue_block wait_block; struct mars_workload_queue_block *p_wait_block; + uint8_t wait_state; - int bl = bits->wait / MARS_WORKLOAD_PER_BLOCK; - int id = bits->wait % MARS_WORKLOAD_PER_BLOCK; + int bl = wait_id / MARS_WORKLOAD_PER_BLOCK; + int id = wait_id % MARS_WORKLOAD_PER_BLOCK; /* check if workload id is in the same block */ if (block != bl) { @@ -113,16 +113,24 @@ static int search_block(int block) p_wait_block = &queue_block; } + wait_state = + MARS_BITS_GET(&p_wait_block->bits[id], + STATE); + /* check if workload is finished and reset */ - if (p_wait_block->bits[id].state == + if (wait_state == MARS_WORKLOAD_STATE_FINISHED) { - bits->wait = MARS_WORKLOAD_ID_NONE; - bits->state = MARS_WORKLOAD_STATE_READY; + MARS_BITS_SET(bits, WAIT_ID, + MARS_WORKLOAD_ID_NONE); + MARS_BITS_SET(bits, STATE, + MARS_WORKLOAD_STATE_READY); } /* waiting for signal so check signal bit and reset */ - } else if (bits->signal == MARS_WORKLOAD_SIGNAL_ON) { - bits->signal = MARS_WORKLOAD_SIGNAL_OFF; - bits->state = MARS_WORKLOAD_STATE_READY; + } else if (signal == MARS_WORKLOAD_SIGNAL_ON) { + MARS_BITS_SET(bits, SIGNAL, + MARS_WORKLOAD_SIGNAL_OFF); + MARS_BITS_SET(bits, STATE, + MARS_WORKLOAD_STATE_READY); i--; } break; @@ -148,10 +156,12 @@ static int reserve_block(int block) index = search_block(block); if (index >= 0) { /* update the current state of the workload */ - queue_block.bits[index].state = MARS_WORKLOAD_STATE_RUNNING; + MARS_BITS_SET(&queue_block.bits[index], STATE, + MARS_WORKLOAD_STATE_RUNNING); /* reset the counter for reserved workload */ - queue_block.bits[index].counter = MARS_WORKLOAD_COUNTER_MIN; + MARS_BITS_SET(&queue_block.bits[index], COUNTER, + MARS_WORKLOAD_COUNTER_MIN); } mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block); @@ -168,7 +178,7 @@ static void release_block(int block, int mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block); /* update current workload state in workload queue block */ - queue_block.bits[index].state = workload_state; + MARS_BITS_SET(&queue_block.bits[index], STATE, workload_state); mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block); } @@ -191,15 +201,13 @@ int reserve_workload(void) /* set global workload info based on workload block and index */ workload_index = MARS_WORKLOAD_PER_BLOCK * block + index; - workload_type = queue_block.bits[index].type; + workload_type = MARS_BITS_GET(&queue_block.bits[index], TYPE); workload_ea = queue_header.context_ea + workload_index * sizeof(struct mars_workload_context); /* dma the workload context code into LS from main memory */ - mars_dma_get_and_wait((void *)&workload, - workload_ea, - sizeof(struct mars_workload_context), - MARS_DMA_TAG); + mars_dma_get_and_wait(&workload, workload_ea, + sizeof(struct mars_workload_context), MARS_DMA_TAG); return MARS_WORKLOAD_RESERVED; } @@ -211,10 +219,8 @@ void release_workload(void) /* dma updated workload context back to main memory if not finished */ if (workload_state != MARS_WORKLOAD_STATE_FINISHED) { - mars_dma_put_and_wait((void *)&workload, - workload_ea, - sizeof(struct mars_workload_context), - MARS_DMA_TAG); + mars_dma_put_and_wait(&workload, workload_ea, + sizeof(struct mars_workload_context), MARS_DMA_TAG); } /* release block reservation */ --- a/src/mpu/kernel/mars_kernel_workload.c +++ b/src/mpu/kernel/mars_kernel_workload.c @@ -139,14 +139,15 @@ int workload_schedule(uint16_t workload_ mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block); /* make sure workload is in the correct state */ - if (queue_block.bits[index].state != MARS_WORKLOAD_STATE_FINISHED) { + if (MARS_BITS_GET(&queue_block.bits[index], STATE) != + MARS_WORKLOAD_STATE_FINISHED) { mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block); return MARS_ERROR_STATE; } /* get information of workload to schedule */ - schedule_workload_type = queue_block.bits[index].type; + schedule_workload_type = MARS_BITS_GET(&queue_block.bits[index], TYPE); schedule_workload_ea = queue_header.context_ea + workload_id * sizeof(struct mars_workload_context); @@ -170,11 +171,16 @@ int workload_schedule(uint16_t workload_ sizeof(struct mars_workload_context), MARS_DMA_TAG); - queue_block.bits[index].state = MARS_WORKLOAD_STATE_READY; - queue_block.bits[index].priority = priority; - queue_block.bits[index].counter = MARS_WORKLOAD_COUNTER_MIN; - queue_block.bits[index].signal = MARS_WORKLOAD_SIGNAL_OFF; - queue_block.bits[index].wait = MARS_WORKLOAD_ID_NONE; + MARS_BITS_SET(&queue_block.bits[index], STATE, + MARS_WORKLOAD_STATE_READY); + MARS_BITS_SET(&queue_block.bits[index], PRIORITY, + priority); + MARS_BITS_SET(&queue_block.bits[index], COUNTER, + MARS_WORKLOAD_COUNTER_MIN); + MARS_BITS_SET(&queue_block.bits[index], SIGNAL, + MARS_WORKLOAD_SIGNAL_OFF); + MARS_BITS_SET(&queue_block.bits[index], WAIT_ID, + MARS_WORKLOAD_ID_NONE); mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block); @@ -196,14 +202,15 @@ int workload_wait(uint16_t workload_id) mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block); /* make sure workload is initialized */ - if (queue_block.bits[index].state == MARS_WORKLOAD_STATE_NONE) { + if (MARS_BITS_GET(&queue_block.bits[index], STATE) == + MARS_WORKLOAD_STATE_NONE) { mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block); return MARS_ERROR_STATE; } /* set the workload id to wait for */ - queue_block.bits[index].wait = workload_id; + MARS_BITS_SET(&queue_block.bits[index], WAIT_ID, workload_id); mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block); @@ -224,15 +231,17 @@ int workload_try_wait(uint16_t workload_ mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block); - MARS_CHECK_CLEANUP_RET(queue_block.bits[index].state != - MARS_WORKLOAD_STATE_NONE, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(&queue_block.bits[index], STATE) != + MARS_WORKLOAD_STATE_NONE, mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block), MARS_ERROR_STATE); mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block); - if (queue_block.bits[index].state != MARS_WORKLOAD_STATE_FINISHED) + if (MARS_BITS_GET(&queue_block.bits[index], STATE) != + MARS_WORKLOAD_STATE_FINISHED) return MARS_ERROR_BUSY; return MARS_SUCCESS; @@ -253,14 +262,16 @@ int workload_signal_send(uint16_t worklo mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block); /* make sure workload is initialized */ - if (queue_block.bits[index].state == MARS_WORKLOAD_STATE_NONE) { + if (MARS_BITS_GET(&queue_block.bits[index], STATE) == + MARS_WORKLOAD_STATE_NONE) { mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block); return MARS_ERROR_STATE; } /* set the workload signal */ - queue_block.bits[index].signal = MARS_WORKLOAD_SIGNAL_ON; + MARS_BITS_SET(&queue_block.bits[index], SIGNAL, + MARS_WORKLOAD_SIGNAL_ON); mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block); @@ -295,8 +306,9 @@ int workload_signal_try_wait(void) mars_mutex_lock_get(block_ea, (struct mars_mutex *)&queue_block); - MARS_CHECK_CLEANUP_RET(queue_block.bits[index].state != - MARS_WORKLOAD_STATE_NONE, + MARS_CHECK_CLEANUP_RET( + MARS_BITS_GET(&queue_block.bits[index], STATE) != + MARS_WORKLOAD_STATE_NONE, mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block), MARS_ERROR_STATE); @@ -304,7 +316,8 @@ int workload_signal_try_wait(void) mars_mutex_unlock_put(block_ea, (struct mars_mutex *)&queue_block); /* return busy if task has not received signal */ - if (queue_block.bits[index].signal != MARS_WORKLOAD_SIGNAL_ON) + if (MARS_BITS_GET(&queue_block.bits[index], SIGNAL) != + MARS_WORKLOAD_SIGNAL_ON) return MARS_ERROR_BUSY; return MARS_SUCCESS;