From patchwork Wed Jul 3 17:28:08 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Max Reitz X-Patchwork-Id: 1127020 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (mailfrom) smtp.mailfrom=nongnu.org (client-ip=209.51.188.17; helo=lists.gnu.org; envelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=redhat.com Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 45f7hC19F5z9s8m for ; Thu, 4 Jul 2019 03:41:31 +1000 (AEST) Received: from localhost ([::1]:38082 helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.86_2) (envelope-from ) id 1hijG5-0002HP-7d for incoming@patchwork.ozlabs.org; Wed, 03 Jul 2019 13:41:29 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]:49006) by lists.gnu.org with esmtp (Exim 4.86_2) (envelope-from ) id 1hij3v-0007hx-1j for qemu-devel@nongnu.org; Wed, 03 Jul 2019 13:28:56 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1hij3t-0008S5-Ak for qemu-devel@nongnu.org; Wed, 03 Jul 2019 13:28:54 -0400 Received: from mx1.redhat.com ([209.132.183.28]:59754) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1hij3p-000807-Uc; Wed, 03 Jul 2019 13:28:50 -0400 Received: from smtp.corp.redhat.com (int-mx08.intmail.prod.int.phx2.redhat.com [10.5.11.23]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id C61774DB10; Wed, 3 Jul 2019 17:28:31 +0000 (UTC) Received: from localhost (ovpn-204-65.brq.redhat.com [10.40.204.65]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 5D7FD1969B; Wed, 3 Jul 2019 17:28:31 +0000 (UTC) From: Max Reitz To: qemu-block@nongnu.org Date: Wed, 3 Jul 2019 19:28:08 +0200 Message-Id: <20190703172813.6868-8-mreitz@redhat.com> In-Reply-To: <20190703172813.6868-1-mreitz@redhat.com> References: <20190703172813.6868-1-mreitz@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.23 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.29]); Wed, 03 Jul 2019 17:28:31 +0000 (UTC) X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.2.x-3.x [generic] X-Received-From: 209.132.183.28 Subject: [Qemu-devel] [PATCH v2 07/12] iotests: Fix throttling in 030 X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Kevin Wolf , qemu-devel@nongnu.org, Max Reitz Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: "Qemu-devel" Currently, TestParallelOps in 030 creates images that are too small for job throttling to be effective. This is reflected by the fact that it never undoes the throttling. Increase the image size and undo the throttling when the job should be completed. Also, add throttling in test_overlapping_4, or the jobs may not be so overlapping after all. In fact, the error usually emitted here is that node2 simply does not exist, not that overlapping jobs are not allowed -- the fact that this job ignores the exact error messages and just checks the error class is something that should be fixed in a follow-up patch. Signed-off-by: Max Reitz Tested-by: Andrey Shinkevich Reviewed-by: Alberto Garcia --- tests/qemu-iotests/030 | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030 index c6311d1825..2cf8d54dc5 100755 --- a/tests/qemu-iotests/030 +++ b/tests/qemu-iotests/030 @@ -154,7 +154,7 @@ class TestSingleDrive(iotests.QMPTestCase): class TestParallelOps(iotests.QMPTestCase): num_ops = 4 # Number of parallel block-stream operations num_imgs = num_ops * 2 + 1 - image_len = num_ops * 512 * 1024 + image_len = num_ops * 4 * 1024 * 1024 imgs = [] def setUp(self): @@ -176,11 +176,11 @@ class TestParallelOps(iotests.QMPTestCase): # Put data into the images we are copying data from odd_img_indexes = [x for x in reversed(range(self.num_imgs)) if x % 2 == 1] for i in range(len(odd_img_indexes)): - # Alternate between 256KB and 512KB. + # Alternate between 2MB and 4MB. # This way jobs will not finish in the same order they were created - num_kb = 256 + 256 * (i % 2) + num_mb = 2 + 2 * (i % 2) qemu_io('-f', iotests.imgfmt, - '-c', 'write -P 0xFF %dk %dk' % (i * 512, num_kb), + '-c', 'write -P 0xFF %dM %dM' % (i * 4, num_mb), self.imgs[odd_img_indexes[i]]) # Attach the drive to the VM @@ -213,6 +213,10 @@ class TestParallelOps(iotests.QMPTestCase): result = self.vm.qmp('block-stream', device=node_name, job_id=job_id, base=self.imgs[i-2], speed=512*1024) self.assert_qmp(result, 'return', {}) + for job in pending_jobs: + result = self.vm.qmp('block-job-set-speed', device=job, speed=0) + self.assert_qmp(result, 'return', {}) + # Wait for all jobs to be finished. while len(pending_jobs) > 0: for event in self.vm.get_qmp_events(wait=True): @@ -260,6 +264,9 @@ class TestParallelOps(iotests.QMPTestCase): result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[0], top=self.imgs[1], job_id='commit-node0') self.assert_qmp(result, 'error/class', 'GenericError') + result = self.vm.qmp('block-job-set-speed', device='stream-node4', speed=0) + self.assert_qmp(result, 'return', {}) + self.wait_until_completed(drive='stream-node4') self.assert_no_active_block_jobs() @@ -289,6 +296,9 @@ class TestParallelOps(iotests.QMPTestCase): result = self.vm.qmp('block-stream', device='drive0', base=self.imgs[5], job_id='stream-drive0') self.assert_qmp(result, 'error/class', 'GenericError') + result = self.vm.qmp('block-job-set-speed', device='commit-node3', speed=0) + self.assert_qmp(result, 'return', {}) + self.wait_until_completed(drive='commit-node3') # Similar to test_overlapping_2, but here block-commit doesn't use the 'top' parameter. @@ -309,6 +319,9 @@ class TestParallelOps(iotests.QMPTestCase): self.assert_qmp(event, 'data/type', 'commit') self.assert_qmp_absent(event, 'data/error') + result = self.vm.qmp('block-job-set-speed', device='commit-drive0', speed=0) + self.assert_qmp(result, 'return', {}) + result = self.vm.qmp('block-job-complete', device='commit-drive0') self.assert_qmp(result, 'return', {}) @@ -321,13 +334,18 @@ class TestParallelOps(iotests.QMPTestCase): self.assert_no_active_block_jobs() # Commit from node2 into node0 - result = self.vm.qmp('block-commit', device='drive0', top=self.imgs[2], base=self.imgs[0]) + result = self.vm.qmp('block-commit', device='drive0', + top=self.imgs[2], base=self.imgs[0], + speed=1024*1024) self.assert_qmp(result, 'return', {}) # Stream from node2 into node4 result = self.vm.qmp('block-stream', device='node4', base_node='node2', job_id='node4') self.assert_qmp(result, 'error/class', 'GenericError') + result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0) + self.assert_qmp(result, 'return', {}) + self.wait_until_completed() self.assert_no_active_block_jobs() @@ -378,6 +396,10 @@ class TestParallelOps(iotests.QMPTestCase): result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[5], speed=1024*1024) self.assert_qmp(result, 'return', {}) + for job in ['drive0', 'node4']: + result = self.vm.qmp('block-job-set-speed', device=job, speed=0) + self.assert_qmp(result, 'return', {}) + # Wait for all jobs to be finished. pending_jobs = ['node4', 'drive0'] while len(pending_jobs) > 0: