diff mbox series

[kernel,v3,06/22] powerpc/powernv: Detach npu struct from pnv_phb

Message ID 20181113082823.2440-7-aik@ozlabs.ru
State Superseded
Headers show
Series powerpc/powernv/npu, vfio: NVIDIA V100 + P9 passthrough | expand

Commit Message

Alexey Kardashevskiy Nov. 13, 2018, 8:28 a.m. UTC
The powernv PCI code stores NPU data in the pnv_phb struct. The latter
is referenced by pci_controller::private_data. We are going to have NPU2
support in the pseries platform as well but it does not store any
private_data in in the pci_controller struct; and even if it did,
it would be a different data structure.

This adds a global list of NPUs so each platform can register and use
these in the same fashion.

As npdev_to_npu() may now fail, this checks the returned pointer.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
---
 arch/powerpc/platforms/powernv/pci.h     | 16 -----
 arch/powerpc/platforms/powernv/npu-dma.c | 78 ++++++++++++++++++++----
 2 files changed, 65 insertions(+), 29 deletions(-)

Comments

Alistair Popple Nov. 14, 2018, 4:28 a.m. UTC | #1
Hi Alexey,

On Tuesday, 13 November 2018 7:28:07 PM AEDT Alexey Kardashevskiy wrote:
>  static struct npu *npdev_to_npu(struct pci_dev *npdev)
>  {
> -	struct pnv_phb *nphb;
> +	struct pci_controller *hose = pci_bus_to_host(npdev->bus);
> +	struct npu *npu;
> 
> -	nphb = pci_bus_to_host(npdev->bus)->private_data;
> +	list_for_each_entry(npu, &npu2_devices, next)

This is called from the ATSD path which is (or at least has been) quite a 
performance critical path so searching through all the NPUs in a list may be 
problematic.

I guess currently it wont make any practical difference as we only ever have 2 
NPUs, but in future they may get divided into more logical NPUs. Would it be 
possible to store a back-pointer somewhere so we can avoid the lookup?

> +		if (hose == npu->hose)
> +			return npu;
> 
> -	return &nphb->npu;
> +	WARN_ON_ONCE(1);
> +	return NULL;
>  }
> 
>  /* Maximum number of nvlinks per npu */
> @@ -505,6 +531,9 @@ static void acquire_atsd_reg(struct npu_context
> *npu_context, continue;
> 
>  			npu = npdev_to_npu(npdev);
> +			if (!npu)
> +				continue;
> +
>  			mmio_atsd_reg[i].npu = npu;
>  			mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
>  			while (mmio_atsd_reg[i].reg < 0) {
> @@ -701,6 +730,8 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev
> *gpdev,
> 
>  	nphb = pci_bus_to_host(npdev->bus)->private_data;
>  	npu = npdev_to_npu(npdev);
> +	if (!npu)
> +		return ERR_PTR(-ENODEV);
> 
>  	/*
>  	 * Setup the NPU context table for a particular GPU. These need to be
> @@ -821,6 +852,8 @@ void pnv_npu2_destroy_context(struct npu_context
> *npu_context,
> 
>  	nphb = pci_bus_to_host(npdev->bus)->private_data;
>  	npu = npdev_to_npu(npdev);
> +	if (!npu)
> +		return;
>  	nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
>  	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
>  							&nvlink_index)))
> @@ -898,9 +931,15 @@ int pnv_npu2_init(struct pnv_phb *phb)
>  	struct pci_dev *gpdev;
>  	static int npu_index;
>  	uint64_t rc = 0;
> +	struct pci_controller *hose = phb->hose;
> +	struct npu *npu;
> +	int ret;
> 
> -	phb->npu.nmmu_flush =
> -		of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
> +	npu = kzalloc(sizeof(*npu), GFP_KERNEL);
> +	if (!npu)
> +		return -ENOMEM;
> +
> +	npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush");
>  	for_each_child_of_node(phb->hose->dn, dn) {
>  		gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
>  		if (gpdev) {
> @@ -914,18 +953,31 @@ int pnv_npu2_init(struct pnv_phb *phb)
>  		}
>  	}
> 
> -	for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
> +	for (i = 0; !of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
>  							i, &mmio_atsd); i++)
> -		phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
> +		npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
> 
> -	pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
> -	phb->npu.mmio_atsd_count = i;
> -	phb->npu.mmio_atsd_usage = 0;
> +	pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i);
> +	npu->mmio_atsd_count = i;
> +	npu->mmio_atsd_usage = 0;
>  	npu_index++;
> -	if (WARN_ON(npu_index >= NV_MAX_NPUS))
> -		return -ENOSPC;
> +	if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
> +		ret = -ENOSPC;
> +		goto fail_exit;
> +	}
>  	max_npu2_index = npu_index;
> -	phb->npu.index = npu_index;
> +	npu->index = npu_index;
> +	npu->hose = hose;
> +
> +	list_add(&npu->next, &npu2_devices);

Guess we don't need any locking here as the list gets setup once during boot 
long before loading the driver and is never modified right?

- Alistair

>  	return 0;
> +
> +fail_exit:
> +	for (i = 0; i < npu->mmio_atsd_count; ++i)
> +		iounmap(npu->mmio_atsd_regs[i]);
> +
> +	kfree(npu);
> +
> +	return ret;
>  }
Alexey Kardashevskiy Nov. 19, 2018, 7:18 a.m. UTC | #2
On 14/11/2018 15:28, Alistair Popple wrote:
> Hi Alexey,
> 
> On Tuesday, 13 November 2018 7:28:07 PM AEDT Alexey Kardashevskiy wrote:
>>  static struct npu *npdev_to_npu(struct pci_dev *npdev)
>>  {
>> -	struct pnv_phb *nphb;
>> +	struct pci_controller *hose = pci_bus_to_host(npdev->bus);
>> +	struct npu *npu;
>>
>> -	nphb = pci_bus_to_host(npdev->bus)->private_data;
>> +	list_for_each_entry(npu, &npu2_devices, next)
> 
> This is called from the ATSD path which is (or at least has been) quite a 
> performance critical path so searching through all the NPUs in a list may be 
> problematic.
> 
> I guess currently it wont make any practical difference as we only ever have 2 
> NPUs, but in future they may get divided into more logical NPUs. Would it be 
> possible to store a back-pointer somewhere so we can avoid the lookup?


It is quite possible even now with iommu_group_get() + container_of() +
iommu_group_put(), I'll try that in respin.


> 
>> +		if (hose == npu->hose)
>> +			return npu;
>>
>> -	return &nphb->npu;
>> +	WARN_ON_ONCE(1);
>> +	return NULL;
>>  }
>>
>>  /* Maximum number of nvlinks per npu */
>> @@ -505,6 +531,9 @@ static void acquire_atsd_reg(struct npu_context
>> *npu_context, continue;
>>
>>  			npu = npdev_to_npu(npdev);
>> +			if (!npu)
>> +				continue;
>> +
>>  			mmio_atsd_reg[i].npu = npu;
>>  			mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
>>  			while (mmio_atsd_reg[i].reg < 0) {
>> @@ -701,6 +730,8 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev
>> *gpdev,
>>
>>  	nphb = pci_bus_to_host(npdev->bus)->private_data;
>>  	npu = npdev_to_npu(npdev);
>> +	if (!npu)
>> +		return ERR_PTR(-ENODEV);
>>
>>  	/*
>>  	 * Setup the NPU context table for a particular GPU. These need to be
>> @@ -821,6 +852,8 @@ void pnv_npu2_destroy_context(struct npu_context
>> *npu_context,
>>
>>  	nphb = pci_bus_to_host(npdev->bus)->private_data;
>>  	npu = npdev_to_npu(npdev);
>> +	if (!npu)
>> +		return;
>>  	nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
>>  	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
>>  							&nvlink_index)))
>> @@ -898,9 +931,15 @@ int pnv_npu2_init(struct pnv_phb *phb)
>>  	struct pci_dev *gpdev;
>>  	static int npu_index;
>>  	uint64_t rc = 0;
>> +	struct pci_controller *hose = phb->hose;
>> +	struct npu *npu;
>> +	int ret;
>>
>> -	phb->npu.nmmu_flush =
>> -		of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
>> +	npu = kzalloc(sizeof(*npu), GFP_KERNEL);
>> +	if (!npu)
>> +		return -ENOMEM;
>> +
>> +	npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush");
>>  	for_each_child_of_node(phb->hose->dn, dn) {
>>  		gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
>>  		if (gpdev) {
>> @@ -914,18 +953,31 @@ int pnv_npu2_init(struct pnv_phb *phb)
>>  		}
>>  	}
>>
>> -	for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
>> +	for (i = 0; !of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
>>  							i, &mmio_atsd); i++)
>> -		phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
>> +		npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
>>
>> -	pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
>> -	phb->npu.mmio_atsd_count = i;
>> -	phb->npu.mmio_atsd_usage = 0;
>> +	pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i);
>> +	npu->mmio_atsd_count = i;
>> +	npu->mmio_atsd_usage = 0;
>>  	npu_index++;
>> -	if (WARN_ON(npu_index >= NV_MAX_NPUS))
>> -		return -ENOSPC;
>> +	if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
>> +		ret = -ENOSPC;
>> +		goto fail_exit;
>> +	}
>>  	max_npu2_index = npu_index;
>> -	phb->npu.index = npu_index;
>> +	npu->index = npu_index;
>> +	npu->hose = hose;
>> +
>> +	list_add(&npu->next, &npu2_devices);
> 
> Guess we don't need any locking here as the list gets setup once during boot 
> long before loading the driver and is never modified right?


Correct.


> 
> - Alistair
> 
>>  	return 0;
>> +
>> +fail_exit:
>> +	for (i = 0; i < npu->mmio_atsd_count; ++i)
>> +		iounmap(npu->mmio_atsd_regs[i]);
>> +
>> +	kfree(npu);
>> +
>> +	return ret;
>>  }
> 
>
diff mbox series

Patch

diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 2131373..f2d50974 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -8,9 +8,6 @@ 
 
 struct pci_dn;
 
-/* Maximum possible number of ATSD MMIO registers per NPU */
-#define NV_NMMU_ATSD_REGS 8
-
 enum pnv_phb_type {
 	PNV_PHB_IODA1		= 0,
 	PNV_PHB_IODA2		= 1,
@@ -176,19 +173,6 @@  struct pnv_phb {
 	unsigned int		diag_data_size;
 	u8			*diag_data;
 
-	/* Nvlink2 data */
-	struct npu {
-		int index;
-		__be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
-		unsigned int mmio_atsd_count;
-
-		/* Bitmask for MMIO register usage */
-		unsigned long mmio_atsd_usage;
-
-		/* Do we need to explicitly flush the nest mmu? */
-		bool nmmu_flush;
-	} npu;
-
 	int p2p_target_count;
 };
 
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 9f48831..9fc4e4e 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -330,13 +330,39 @@  struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
 /*
  * NPU2 ATS
  */
+/* Maximum possible number of ATSD MMIO registers per NPU */
+#define NV_NMMU_ATSD_REGS 8
+
+/* An NPU descriptor, valid for POWER9 only */
+struct npu {
+	int index;
+	__be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
+	unsigned int mmio_atsd_count;
+
+	/* Bitmask for MMIO register usage */
+	unsigned long mmio_atsd_usage;
+
+	/* Do we need to explicitly flush the nest mmu? */
+	bool nmmu_flush;
+
+	struct list_head next;
+
+	struct pci_controller *hose;
+};
+
+static LIST_HEAD(npu2_devices);
+
 static struct npu *npdev_to_npu(struct pci_dev *npdev)
 {
-	struct pnv_phb *nphb;
+	struct pci_controller *hose = pci_bus_to_host(npdev->bus);
+	struct npu *npu;
 
-	nphb = pci_bus_to_host(npdev->bus)->private_data;
+	list_for_each_entry(npu, &npu2_devices, next)
+		if (hose == npu->hose)
+			return npu;
 
-	return &nphb->npu;
+	WARN_ON_ONCE(1);
+	return NULL;
 }
 
 /* Maximum number of nvlinks per npu */
@@ -505,6 +531,9 @@  static void acquire_atsd_reg(struct npu_context *npu_context,
 				continue;
 
 			npu = npdev_to_npu(npdev);
+			if (!npu)
+				continue;
+
 			mmio_atsd_reg[i].npu = npu;
 			mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
 			while (mmio_atsd_reg[i].reg < 0) {
@@ -701,6 +730,8 @@  struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
 
 	nphb = pci_bus_to_host(npdev->bus)->private_data;
 	npu = npdev_to_npu(npdev);
+	if (!npu)
+		return ERR_PTR(-ENODEV);
 
 	/*
 	 * Setup the NPU context table for a particular GPU. These need to be
@@ -821,6 +852,8 @@  void pnv_npu2_destroy_context(struct npu_context *npu_context,
 
 	nphb = pci_bus_to_host(npdev->bus)->private_data;
 	npu = npdev_to_npu(npdev);
+	if (!npu)
+		return;
 	nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
 	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
 							&nvlink_index)))
@@ -898,9 +931,15 @@  int pnv_npu2_init(struct pnv_phb *phb)
 	struct pci_dev *gpdev;
 	static int npu_index;
 	uint64_t rc = 0;
+	struct pci_controller *hose = phb->hose;
+	struct npu *npu;
+	int ret;
 
-	phb->npu.nmmu_flush =
-		of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
+	npu = kzalloc(sizeof(*npu), GFP_KERNEL);
+	if (!npu)
+		return -ENOMEM;
+
+	npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush");
 	for_each_child_of_node(phb->hose->dn, dn) {
 		gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
 		if (gpdev) {
@@ -914,18 +953,31 @@  int pnv_npu2_init(struct pnv_phb *phb)
 		}
 	}
 
-	for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
+	for (i = 0; !of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
 							i, &mmio_atsd); i++)
-		phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
+		npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
 
-	pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
-	phb->npu.mmio_atsd_count = i;
-	phb->npu.mmio_atsd_usage = 0;
+	pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i);
+	npu->mmio_atsd_count = i;
+	npu->mmio_atsd_usage = 0;
 	npu_index++;
-	if (WARN_ON(npu_index >= NV_MAX_NPUS))
-		return -ENOSPC;
+	if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
+		ret = -ENOSPC;
+		goto fail_exit;
+	}
 	max_npu2_index = npu_index;
-	phb->npu.index = npu_index;
+	npu->index = npu_index;
+	npu->hose = hose;
+
+	list_add(&npu->next, &npu2_devices);
 
 	return 0;
+
+fail_exit:
+	for (i = 0; i < npu->mmio_atsd_count; ++i)
+		iounmap(npu->mmio_atsd_regs[i]);
+
+	kfree(npu);
+
+	return ret;
 }