diff mbox series

[SRU,G/aws,2/2] PM: hibernate: Batch hibernate and resume IO requests

Message ID 20201104120554.944255-3-andrea.righi@canonical.com
State New
Headers show
Series aws: update patch to batch hibernate and resume IO requests | expand

Commit Message

Andrea Righi Nov. 4, 2020, 12:05 p.m. UTC
From: Xiaoyi Chen <cxiaoyi@amazon.com>

BugLink: https://bugs.launchpad.net/bugs/1902864

Hibernate and resume process submits individual IO requests for each page
of the data, so use blk_plug to improve the batching of these requests.

Testing this change with hibernate and resumes consistently shows merging
of the IO requests and more than an order of magnitude improvement in
hibernate and resume speed is observed.

One hibernate and resume cycle for 16GB RAM out of 32GB in use takes
around 21 minutes before the change, and 1 minutes after the change on
a system with limited storage IOPS.

Signed-off-by: Xiaoyi Chen <cxiaoyi@amazon.com>
Co-Developed-by: Anchal Agarwal <anchalag@amazon.com>
Signed-off-by: Anchal Agarwal <anchalag@amazon.com>
[ rjw: Subject and changelog edits, white space damage fixes ]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
(cherry picked from commit 55c4478a8f0ecedc0c1a0c9379380249985c372a)
Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
---
 kernel/power/swap.c | 15 +++++++++++++++
 1 file changed, 15 insertions(+)
diff mbox series

Patch

diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 01e2858b5fe3..116320a0394d 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -226,6 +226,7 @@  struct hib_bio_batch {
 	atomic_t		count;
 	wait_queue_head_t	wait;
 	blk_status_t		error;
+	struct blk_plug		plug;
 };
 
 static void hib_init_batch(struct hib_bio_batch *hb)
@@ -233,6 +234,12 @@  static void hib_init_batch(struct hib_bio_batch *hb)
 	atomic_set(&hb->count, 0);
 	init_waitqueue_head(&hb->wait);
 	hb->error = BLK_STS_OK;
+	blk_start_plug(&hb->plug);
+}
+
+static void hib_finish_batch(struct hib_bio_batch *hb)
+{
+	blk_finish_plug(&hb->plug);
 }
 
 static void hib_end_io(struct bio *bio)
@@ -294,6 +301,10 @@  static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
 
 static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
 {
+	/*
+	 * We are relying on the behavior of blk_plug that a thread with
+	 * a plug will flush the plug list before sleeping.
+	 */
 	wait_event(hb->wait, atomic_read(&hb->count) == 0);
 	return blk_status_to_errno(hb->error);
 }
@@ -561,6 +572,7 @@  static int save_image(struct swap_map_handle *handle,
 		nr_pages++;
 	}
 	err2 = hib_wait_io(&hb);
+	hib_finish_batch(&hb);
 	stop = ktime_get();
 	if (!ret)
 		ret = err2;
@@ -854,6 +866,7 @@  static int save_image_lzo(struct swap_map_handle *handle,
 		pr_info("Image saving done\n");
 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 out_clean:
+	hib_finish_batch(&hb);
 	if (crc) {
 		if (crc->thr)
 			kthread_stop(crc->thr);
@@ -1084,6 +1097,7 @@  static int load_image(struct swap_map_handle *handle,
 		nr_pages++;
 	}
 	err2 = hib_wait_io(&hb);
+	hib_finish_batch(&hb);
 	stop = ktime_get();
 	if (!ret)
 		ret = err2;
@@ -1447,6 +1461,7 @@  static int load_image_lzo(struct swap_map_handle *handle,
 	}
 	swsusp_show_speed(start, stop, nr_to_read, "Read");
 out_clean:
+	hib_finish_batch(&hb);
 	for (i = 0; i < ring_size; i++)
 		free_page((unsigned long)page[i]);
 	if (crc) {