diff mbox series

[2/2] iommu/dmar: catch early fault occurrences

Message ID 20191015151112.17225-3-volchkov@amazon.de
State Not Applicable
Headers show
Series iommu/dmar: expose fault counters via sysfs | expand

Commit Message

Yuri Volchkov Oct. 15, 2019, 3:11 p.m. UTC
First dmar faults can happen even before linux have scanned PCI
bus. In such case worker will not have chance to lookup a
corresponding 'struct pci_dev'.

This commit defers processing of the fault until intel_iommu_init
function has been called. At this point of time pci devices will be
already initialized.

Signed-off-by: Yuri Volchkov <volchkov@amazon.de>
---
 drivers/iommu/dmar.c        | 51 ++++++++++++++++++++++++++++++++++++-
 drivers/iommu/intel-iommu.c |  1 +
 include/linux/intel-iommu.h |  1 +
 3 files changed, 52 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 0749873e3e41..8db2af3de29f 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1674,7 +1674,10 @@  void dmar_msi_read(int irq, struct msi_msg *msg)
 }
 
 struct dmar_fault_info {
-	struct work_struct work;
+	union {
+		struct work_struct work;
+		struct list_head backlog_list;
+	};
 	struct intel_iommu *iommu;
 	int type;
 	int pasid;
@@ -1757,12 +1760,58 @@  static int dmar_fault_handle_one(struct dmar_fault_info *info)
 	return 0;
 }
 
+struct fault_backlog {
+	struct list_head queue;
+	struct mutex lock;
+	bool is_active;
+};
+
+static struct fault_backlog fault_backlog = {
+	.queue = LIST_HEAD_INIT(fault_backlog.queue),
+	.lock = __MUTEX_INITIALIZER(fault_backlog.lock),
+	.is_active = true,
+};
+
+void dmar_process_deferred_faults(void)
+{
+	struct dmar_fault_info *info, *tmp;
+
+	mutex_lock(&fault_backlog.lock);
+	WARN_ON(!fault_backlog.is_active);
+
+	list_for_each_entry_safe(info, tmp, &fault_backlog.queue,
+				 backlog_list) {
+		dmar_fault_handle_one(info);
+		list_del(&info->backlog_list);
+		free_dmar_fault_info(info);
+	}
+	fault_backlog.is_active = false;
+	mutex_unlock(&fault_backlog.lock);
+}
+
 static void dmar_fault_handle_work(struct work_struct *work)
 {
 	struct dmar_fault_info *info;
 
 	info = container_of(work, struct dmar_fault_info, work);
 
+	if (fault_backlog.is_active) {
+		/* Postpone handling until PCI devices will be
+		 * initialized
+		 */
+
+		mutex_lock(&fault_backlog.lock);
+		if (!fault_backlog.is_active) {
+			mutex_unlock(&fault_backlog.lock);
+			goto process;
+		}
+
+		list_add(&info->backlog_list, &fault_backlog.queue);
+		mutex_unlock(&fault_backlog.lock);
+		return;
+	}
+
+process:
 	dmar_fault_handle_one(info);
 	free_dmar_fault_info(info);
 }
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 3f974919d3bd..a97c05fac5e9 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -5041,6 +5041,7 @@  int __init intel_iommu_init(void)
 		iommu_disable_protect_mem_regions(iommu);
 	}
 	pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
+	dmar_process_deferred_faults();
 
 	intel_iommu_enabled = 1;
 	intel_iommu_debugfs_init();
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index f8963c833fb0..480a31b41263 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -649,6 +649,7 @@  extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
 extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
 
 extern int dmar_ir_support(void);
+extern void dmar_process_deferred_faults(void);
 
 void *alloc_pgtable_page(int node);
 void free_pgtable_page(void *vaddr);