diff mbox

[11/11] iotests: 124 - transactional failure test

Message ID 1425528911-10300-12-git-send-email-jsnow@redhat.com
State New
Headers show

Commit Message

John Snow March 5, 2015, 4:15 a.m. UTC
Use a transaction to request an incremental backup across two drives.
Coerce one of the jobs to fail, and then re-run the transaction.

Verify that no bitmap data was lost due to the partial transaction
failure.

Signed-off-by: John Snow <jsnow@redhat.com>
---
 tests/qemu-iotests/124     | 119 +++++++++++++++++++++++++++++++++++++++++++++
 tests/qemu-iotests/124.out |   4 +-
 2 files changed, 121 insertions(+), 2 deletions(-)

Comments

Max Reitz March 17, 2015, 8:59 p.m. UTC | #1
On 2015-03-04 at 23:15, John Snow wrote:
> Use a transaction to request an incremental backup across two drives.
> Coerce one of the jobs to fail, and then re-run the transaction.
>
> Verify that no bitmap data was lost due to the partial transaction
> failure.
>
> Signed-off-by: John Snow <jsnow@redhat.com>
> ---
>   tests/qemu-iotests/124     | 119 +++++++++++++++++++++++++++++++++++++++++++++
>   tests/qemu-iotests/124.out |   4 +-
>   2 files changed, 121 insertions(+), 2 deletions(-)
>
> diff --git a/tests/qemu-iotests/124 b/tests/qemu-iotests/124
> index 4afdca1..48571a5 100644
> --- a/tests/qemu-iotests/124
> +++ b/tests/qemu-iotests/124
> @@ -331,6 +331,125 @@ class TestIncrementalBackup(iotests.QMPTestCase):
>           self.create_incremental()
>   
>   
> +    def test_transaction_failure(self):
> +        '''Test: Verify backups made from a transaction that partially fails.
> +
> +        Add a second drive with its own unique pattern, and add a bitmap to each
> +        drive. Use blkdebug to interfere with the backup on just one drive and
> +        attempt to create a coherent incremental backup across both drives.
> +
> +        verify a failure in one but not both, then delete the failed stubs and
> +        re-run the same transaction.
> +
> +        verify that both incrementals are created successfully.
> +        '''
> +
> +        # Create a second drive, with pattern:
> +        drive1 = self.add_node('drive1')
> +        self.img_create(drive1['file'], drive1['fmt'])
> +        io_write_patterns(drive1['file'], (('0x14', 0, 512),
> +                                           ('0x5d', '1M', '32k'),
> +                                           ('0xcd', '32M', '124k')))
> +
> +        # Create a blkdebug interface to this img as 'drive1'
> +        result = self.vm.qmp('blockdev-add', options={
> +            'id': drive1['id'],
> +            'driver': drive1['fmt'],
> +            'file': {
> +                'driver': 'blkdebug',
> +                'image': {
> +                    'driver': 'file',
> +                    'filename': drive1['file']
> +                },
> +                'set-state': [{
> +                    'event': 'flush_to_disk',
> +                    'state': 1,
> +                    'new_state': 2
> +                }],
> +                'inject-error': [{
> +                    'event': 'read_aio',
> +                    'errno': 5,
> +                    'state': 2,
> +                    'immediately': False,
> +                    'once': True
> +                }],
> +            }
> +        })
> +        self.assert_qmp(result, 'return', {})
> +
> +        # Create bitmaps and full backups for both drives
> +        drive0 = self.drives[0]
> +        dr0bm0 = self.add_bitmap('bitmap0', drive0)
> +        dr1bm0 = self.add_bitmap('bitmap0', drive1)
> +        self.create_full_backup(drive0)
> +        self.create_full_backup(drive1)
> +        self.assert_no_active_block_jobs()
> +        self.assertFalse(self.vm.get_qmp_events(wait=False))
> +
> +        # Emulate some writes
> +        self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
> +                                          ('0xfe', '16M', '256k'),
> +                                          ('0x64', '32736k', '64k')))
> +        self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
> +                                          ('0xef', '16M', '256k'),
> +                                          ('0x46', '32736k', '64k')))
> +
> +        # Create incremental backup targets
> +        target0 = self.prepare_backup(dr0bm0)
> +        target1 = self.prepare_backup(dr1bm0)
> +
> +        # Ask for a new incremental backup per-each drive,
> +        # expecting drive1's backup to fail:
> +        transaction = [
> +            {
> +                'type': 'drive-backup',
> +                'data': { 'device': drive0['id'],
> +                          'sync': 'dirty-bitmap',
> +                          'format': drive0['fmt'],
> +                          'target': target0,
> +                          'mode': 'existing',
> +                          'bitmap': dr0bm0.name },
> +            },
> +            {
> +                'type': 'drive-backup',
> +                'data': { 'device': drive1['id'],
> +                          'sync': 'dirty-bitmap',
> +                          'format': drive1['fmt'],
> +                          'target': target1,
> +                          'mode': 'existing',
> +                          'bitmap': dr1bm0.name }
> +            }
> +        ]
> +        result = self.vm.qmp('transaction', actions=transaction)
> +        self.assert_qmp(result, 'return', {})
> +
> +        # Observe that drive0's backup completes, but drive1's does not.
> +        # Consume drive1's error and ensure all pending actions are completed.
> +        self.wait_incremental(dr0bm0, validate=True)
> +        self.wait_incremental(dr1bm0, validate=False)
> +        error = self.vm.event_wait('BLOCK_JOB_ERROR')
> +        self.assert_qmp(error, 'data', {'device': drive1['id'],
> +                                        'action': 'report',
> +                                        'operation': 'read'})
> +        self.assertFalse(self.vm.get_qmp_events(wait=False))
> +        self.assert_no_active_block_jobs()
> +
> +        # Delete drive0's (successful) backup and create two new empty
> +        # targets to re-run the transaction.
> +        dr0bm0.del_target()
> +        target0 = self.prepare_backup(dr0bm0)
> +        target1 = self.prepare_backup(dr1bm0)
> +
> +        # Re-run the exact same transaction.
> +        result = self.vm.qmp('transaction', actions=transaction)
> +        self.assert_qmp(result, 'return', {})
> +        # Both should complete successfully this time.
> +        self.wait_incremental(dr0bm0, 'drive0')
> +        self.wait_incremental(dr1bm0, 'drive1')

s/'drive.'/validate=True/, I think (will lead to the same result, 
though, yay for dynamically typed languages).

With that fixed:

Reviewed-by: Max Reitz <mreitz@redhat.com>

Nice!

> +        self.assertFalse(self.vm.get_qmp_events(wait=False))
> +        self.assert_no_active_block_jobs()
> +
> +
>       def test_sync_dirty_bitmap_missing(self):
>           self.assert_no_active_block_jobs()
>           self.files.append(self.err_img)
> diff --git a/tests/qemu-iotests/124.out b/tests/qemu-iotests/124.out
> index 914e373..3f8a935 100644
> --- a/tests/qemu-iotests/124.out
> +++ b/tests/qemu-iotests/124.out
> @@ -1,5 +1,5 @@
> -.....
> +......
>   ----------------------------------------------------------------------
> -Ran 5 tests
> +Ran 6 tests
>   
>   OK
John Snow March 17, 2015, 9:04 p.m. UTC | #2
On 03/17/2015 04:59 PM, Max Reitz wrote:
> On 2015-03-04 at 23:15, John Snow wrote:
>> Use a transaction to request an incremental backup across two drives.
>> Coerce one of the jobs to fail, and then re-run the transaction.
>>
>> Verify that no bitmap data was lost due to the partial transaction
>> failure.
>>
>> Signed-off-by: John Snow <jsnow@redhat.com>
>> ---
>>   tests/qemu-iotests/124     | 119
>> +++++++++++++++++++++++++++++++++++++++++++++
>>   tests/qemu-iotests/124.out |   4 +-
>>   2 files changed, 121 insertions(+), 2 deletions(-)
>>
>> diff --git a/tests/qemu-iotests/124 b/tests/qemu-iotests/124
>> index 4afdca1..48571a5 100644
>> --- a/tests/qemu-iotests/124
>> +++ b/tests/qemu-iotests/124
>> @@ -331,6 +331,125 @@ class TestIncrementalBackup(iotests.QMPTestCase):
>>           self.create_incremental()
>> +    def test_transaction_failure(self):
>> +        '''Test: Verify backups made from a transaction that
>> partially fails.
>> +
>> +        Add a second drive with its own unique pattern, and add a
>> bitmap to each
>> +        drive. Use blkdebug to interfere with the backup on just one
>> drive and
>> +        attempt to create a coherent incremental backup across both
>> drives.
>> +
>> +        verify a failure in one but not both, then delete the failed
>> stubs and
>> +        re-run the same transaction.
>> +
>> +        verify that both incrementals are created successfully.
>> +        '''
>> +
>> +        # Create a second drive, with pattern:
>> +        drive1 = self.add_node('drive1')
>> +        self.img_create(drive1['file'], drive1['fmt'])
>> +        io_write_patterns(drive1['file'], (('0x14', 0, 512),
>> +                                           ('0x5d', '1M', '32k'),
>> +                                           ('0xcd', '32M', '124k')))
>> +
>> +        # Create a blkdebug interface to this img as 'drive1'
>> +        result = self.vm.qmp('blockdev-add', options={
>> +            'id': drive1['id'],
>> +            'driver': drive1['fmt'],
>> +            'file': {
>> +                'driver': 'blkdebug',
>> +                'image': {
>> +                    'driver': 'file',
>> +                    'filename': drive1['file']
>> +                },
>> +                'set-state': [{
>> +                    'event': 'flush_to_disk',
>> +                    'state': 1,
>> +                    'new_state': 2
>> +                }],
>> +                'inject-error': [{
>> +                    'event': 'read_aio',
>> +                    'errno': 5,
>> +                    'state': 2,
>> +                    'immediately': False,
>> +                    'once': True
>> +                }],
>> +            }
>> +        })
>> +        self.assert_qmp(result, 'return', {})
>> +
>> +        # Create bitmaps and full backups for both drives
>> +        drive0 = self.drives[0]
>> +        dr0bm0 = self.add_bitmap('bitmap0', drive0)
>> +        dr1bm0 = self.add_bitmap('bitmap0', drive1)
>> +        self.create_full_backup(drive0)
>> +        self.create_full_backup(drive1)
>> +        self.assert_no_active_block_jobs()
>> +        self.assertFalse(self.vm.get_qmp_events(wait=False))
>> +
>> +        # Emulate some writes
>> +        self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
>> +                                          ('0xfe', '16M', '256k'),
>> +                                          ('0x64', '32736k', '64k')))
>> +        self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
>> +                                          ('0xef', '16M', '256k'),
>> +                                          ('0x46', '32736k', '64k')))
>> +
>> +        # Create incremental backup targets
>> +        target0 = self.prepare_backup(dr0bm0)
>> +        target1 = self.prepare_backup(dr1bm0)
>> +
>> +        # Ask for a new incremental backup per-each drive,
>> +        # expecting drive1's backup to fail:
>> +        transaction = [
>> +            {
>> +                'type': 'drive-backup',
>> +                'data': { 'device': drive0['id'],
>> +                          'sync': 'dirty-bitmap',
>> +                          'format': drive0['fmt'],
>> +                          'target': target0,
>> +                          'mode': 'existing',
>> +                          'bitmap': dr0bm0.name },
>> +            },
>> +            {
>> +                'type': 'drive-backup',
>> +                'data': { 'device': drive1['id'],
>> +                          'sync': 'dirty-bitmap',
>> +                          'format': drive1['fmt'],
>> +                          'target': target1,
>> +                          'mode': 'existing',
>> +                          'bitmap': dr1bm0.name }
>> +            }
>> +        ]
>> +        result = self.vm.qmp('transaction', actions=transaction)
>> +        self.assert_qmp(result, 'return', {})
>> +
>> +        # Observe that drive0's backup completes, but drive1's does not.
>> +        # Consume drive1's error and ensure all pending actions are
>> completed.
>> +        self.wait_incremental(dr0bm0, validate=True)
>> +        self.wait_incremental(dr1bm0, validate=False)
>> +        error = self.vm.event_wait('BLOCK_JOB_ERROR')
>> +        self.assert_qmp(error, 'data', {'device': drive1['id'],
>> +                                        'action': 'report',
>> +                                        'operation': 'read'})
>> +        self.assertFalse(self.vm.get_qmp_events(wait=False))
>> +        self.assert_no_active_block_jobs()
>> +
>> +        # Delete drive0's (successful) backup and create two new empty
>> +        # targets to re-run the transaction.
>> +        dr0bm0.del_target()
>> +        target0 = self.prepare_backup(dr0bm0)
>> +        target1 = self.prepare_backup(dr1bm0)
>> +
>> +        # Re-run the exact same transaction.
>> +        result = self.vm.qmp('transaction', actions=transaction)
>> +        self.assert_qmp(result, 'return', {})
>> +        # Both should complete successfully this time.
>> +        self.wait_incremental(dr0bm0, 'drive0')
>> +        self.wait_incremental(dr1bm0, 'drive1')
>
> s/'drive.'/validate=True/, I think (will lead to the same result,
> though, yay for dynamically typed languages).
>

They really let you get away with murder sometimes. Thanks for the catch.

> With that fixed:
>
> Reviewed-by: Max Reitz <mreitz@redhat.com>
>
> Nice!
>
>> +        self.assertFalse(self.vm.get_qmp_events(wait=False))
>> +        self.assert_no_active_block_jobs()
>> +
>> +
>>       def test_sync_dirty_bitmap_missing(self):
>>           self.assert_no_active_block_jobs()
>>           self.files.append(self.err_img)
>> diff --git a/tests/qemu-iotests/124.out b/tests/qemu-iotests/124.out
>> index 914e373..3f8a935 100644
>> --- a/tests/qemu-iotests/124.out
>> +++ b/tests/qemu-iotests/124.out
>> @@ -1,5 +1,5 @@
>> -.....
>> +......
>>   ----------------------------------------------------------------------
>> -Ran 5 tests
>> +Ran 6 tests
>>   OK
>
>
diff mbox

Patch

diff --git a/tests/qemu-iotests/124 b/tests/qemu-iotests/124
index 4afdca1..48571a5 100644
--- a/tests/qemu-iotests/124
+++ b/tests/qemu-iotests/124
@@ -331,6 +331,125 @@  class TestIncrementalBackup(iotests.QMPTestCase):
         self.create_incremental()
 
 
+    def test_transaction_failure(self):
+        '''Test: Verify backups made from a transaction that partially fails.
+
+        Add a second drive with its own unique pattern, and add a bitmap to each
+        drive. Use blkdebug to interfere with the backup on just one drive and
+        attempt to create a coherent incremental backup across both drives.
+
+        verify a failure in one but not both, then delete the failed stubs and
+        re-run the same transaction.
+
+        verify that both incrementals are created successfully.
+        '''
+
+        # Create a second drive, with pattern:
+        drive1 = self.add_node('drive1')
+        self.img_create(drive1['file'], drive1['fmt'])
+        io_write_patterns(drive1['file'], (('0x14', 0, 512),
+                                           ('0x5d', '1M', '32k'),
+                                           ('0xcd', '32M', '124k')))
+
+        # Create a blkdebug interface to this img as 'drive1'
+        result = self.vm.qmp('blockdev-add', options={
+            'id': drive1['id'],
+            'driver': drive1['fmt'],
+            'file': {
+                'driver': 'blkdebug',
+                'image': {
+                    'driver': 'file',
+                    'filename': drive1['file']
+                },
+                'set-state': [{
+                    'event': 'flush_to_disk',
+                    'state': 1,
+                    'new_state': 2
+                }],
+                'inject-error': [{
+                    'event': 'read_aio',
+                    'errno': 5,
+                    'state': 2,
+                    'immediately': False,
+                    'once': True
+                }],
+            }
+        })
+        self.assert_qmp(result, 'return', {})
+
+        # Create bitmaps and full backups for both drives
+        drive0 = self.drives[0]
+        dr0bm0 = self.add_bitmap('bitmap0', drive0)
+        dr1bm0 = self.add_bitmap('bitmap0', drive1)
+        self.create_full_backup(drive0)
+        self.create_full_backup(drive1)
+        self.assert_no_active_block_jobs()
+        self.assertFalse(self.vm.get_qmp_events(wait=False))
+
+        # Emulate some writes
+        self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
+                                          ('0xfe', '16M', '256k'),
+                                          ('0x64', '32736k', '64k')))
+        self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
+                                          ('0xef', '16M', '256k'),
+                                          ('0x46', '32736k', '64k')))
+
+        # Create incremental backup targets
+        target0 = self.prepare_backup(dr0bm0)
+        target1 = self.prepare_backup(dr1bm0)
+
+        # Ask for a new incremental backup per-each drive,
+        # expecting drive1's backup to fail:
+        transaction = [
+            {
+                'type': 'drive-backup',
+                'data': { 'device': drive0['id'],
+                          'sync': 'dirty-bitmap',
+                          'format': drive0['fmt'],
+                          'target': target0,
+                          'mode': 'existing',
+                          'bitmap': dr0bm0.name },
+            },
+            {
+                'type': 'drive-backup',
+                'data': { 'device': drive1['id'],
+                          'sync': 'dirty-bitmap',
+                          'format': drive1['fmt'],
+                          'target': target1,
+                          'mode': 'existing',
+                          'bitmap': dr1bm0.name }
+            }
+        ]
+        result = self.vm.qmp('transaction', actions=transaction)
+        self.assert_qmp(result, 'return', {})
+
+        # Observe that drive0's backup completes, but drive1's does not.
+        # Consume drive1's error and ensure all pending actions are completed.
+        self.wait_incremental(dr0bm0, validate=True)
+        self.wait_incremental(dr1bm0, validate=False)
+        error = self.vm.event_wait('BLOCK_JOB_ERROR')
+        self.assert_qmp(error, 'data', {'device': drive1['id'],
+                                        'action': 'report',
+                                        'operation': 'read'})
+        self.assertFalse(self.vm.get_qmp_events(wait=False))
+        self.assert_no_active_block_jobs()
+
+        # Delete drive0's (successful) backup and create two new empty
+        # targets to re-run the transaction.
+        dr0bm0.del_target()
+        target0 = self.prepare_backup(dr0bm0)
+        target1 = self.prepare_backup(dr1bm0)
+
+        # Re-run the exact same transaction.
+        result = self.vm.qmp('transaction', actions=transaction)
+        self.assert_qmp(result, 'return', {})
+        # Both should complete successfully this time.
+        self.wait_incremental(dr0bm0, 'drive0')
+        self.wait_incremental(dr1bm0, 'drive1')
+        self.assertFalse(self.vm.get_qmp_events(wait=False))
+        self.assert_no_active_block_jobs()
+
+
     def test_sync_dirty_bitmap_missing(self):
         self.assert_no_active_block_jobs()
         self.files.append(self.err_img)
diff --git a/tests/qemu-iotests/124.out b/tests/qemu-iotests/124.out
index 914e373..3f8a935 100644
--- a/tests/qemu-iotests/124.out
+++ b/tests/qemu-iotests/124.out
@@ -1,5 +1,5 @@ 
-.....
+......
 ----------------------------------------------------------------------
-Ran 5 tests
+Ran 6 tests
 
 OK