diff mbox series

[net-next,v3,4/9] gve: Add support for dma_mask register

Message ID 20200908183909.4156744-5-awogbemila@google.com
State Changes Requested
Delegated to: David Miller
Headers show
Series Add GVE Features | expand

Commit Message

David Awogbemila Sept. 8, 2020, 6:39 p.m. UTC
From: Catherine Sullivan <csully@google.com>

Add the dma_mask register and read it to set the dma_masks.

gve_alloc_page will alloc_page with:
 GFP_DMA32 if priv->dma_mask is 32.
This helps in 32-bit device address cases where the guest would
run out of SWIOTLB space. Since its buffers would be allocated
GFP_DMA32, there would be less pressure on SWIOTLB.

Reviewed-by: Yangchun Fu <yangchun@google.com>
Signed-off-by: Catherine Sullivan <csully@google.com>
Signed-off-by: David Awogbemila <awogbemila@google.com>
---
 drivers/net/ethernet/google/gve/gve.h         |  6 ++-
 drivers/net/ethernet/google/gve/gve_main.c    | 40 ++++++++++++-------
 .../net/ethernet/google/gve/gve_register.h    |  3 +-
 3 files changed, 32 insertions(+), 17 deletions(-)

Comments

Jakub Kicinski Sept. 8, 2020, 7:14 p.m. UTC | #1
On Tue,  8 Sep 2020 11:39:04 -0700 David Awogbemila wrote:
> +	dma_mask = readb(&reg_bar->dma_mask);
> +	// Default to 64 if the register isn't set
> +	if (!dma_mask)
> +		dma_mask = 64;
>  	gve_write_version(&reg_bar->driver_version);
>  	/* Get max queues to alloc etherdev */
>  	max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
>  	max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
> +
> +	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));

You use the constant 64, not dma_mask?

You jump through hoops to get GFP_DMA allocations yet you don't set the
right DMA mask. Why would swiotlb become an issue to you if there never
was any reasonable mask set?

> +	if (err) {
> +		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
> +		goto abort_with_reg_bar;
> +	}
> +
> +	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));

dma_set_mask_and_coherent()

> +	if (err) {
> +		dev_err(&pdev->dev,
> +			"Failed to set consistent dma mask: err=%d\n", err);
> +		goto abort_with_reg_bar;
> +	}
David Awogbemila Sept. 9, 2020, 10:12 p.m. UTC | #2
On Tue, Sep 8, 2020 at 12:15 PM Jakub Kicinski <kuba@kernel.org> wrote:
>
> On Tue,  8 Sep 2020 11:39:04 -0700 David Awogbemila wrote:
> > +     dma_mask = readb(&reg_bar->dma_mask);
> > +     // Default to 64 if the register isn't set
> > +     if (!dma_mask)
> > +             dma_mask = 64;
> >       gve_write_version(&reg_bar->driver_version);
> >       /* Get max queues to alloc etherdev */
> >       max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
> >       max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
> > +
> > +     err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
>
> You use the constant 64, not dma_mask?
>
> You jump through hoops to get GFP_DMA allocations yet you don't set the
> right DMA mask. Why would swiotlb become an issue to you if there never
> was any reasonable mask set?
>
> > +     if (err) {
> > +             dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
> > +             goto abort_with_reg_bar;
> > +     }
> > +
> > +     err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
>
> dma_set_mask_and_coherent()
>
> > +     if (err) {
> > +             dev_err(&pdev->dev,
> > +                     "Failed to set consistent dma mask: err=%d\n", err);
> > +             goto abort_with_reg_bar;
> > +     }

I think you're right. Setting dma_set_mask_and_coherent(dev, 64)
should mean we no longer have the swiotlb issue. I will drop this
patch altogether.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 55b34b437764..37a3bbced36a 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -232,6 +232,9 @@  struct gve_priv {
 	struct work_struct service_task;
 	unsigned long service_task_flags;
 	unsigned long state_flags;
+
+  /* Gvnic device's dma mask, set during probe. */
+	u8 dma_mask;
 };
 
 enum gve_service_task_flags {
@@ -451,8 +454,7 @@  static inline bool gve_can_recycle_pages(struct net_device *dev)
 }
 
 /* buffers */
-int gve_alloc_page(struct gve_priv *priv, struct device *dev,
-		   struct page **page, dma_addr_t *dma,
+int gve_alloc_page(struct gve_priv *priv, struct device *dev, struct page **page, dma_addr_t *dma,
 		   enum dma_data_direction);
 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
 		   enum dma_data_direction);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 0fc68f844edf..c69ec044f47c 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -518,7 +518,12 @@  int gve_alloc_page(struct gve_priv *priv, struct device *dev,
 		   struct page **page, dma_addr_t *dma,
 		   enum dma_data_direction dir)
 {
-	*page = alloc_page(GFP_KERNEL);
+	gfp_t gfp_flags = GFP_KERNEL;
+
+	if (priv->dma_mask == 32)
+		gfp_flags |= GFP_DMA32;
+
+	*page = alloc_page(gfp_flags);
 	if (!*page) {
 		priv->page_alloc_fail++;
 		return -ENOMEM;
@@ -1083,6 +1088,7 @@  static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	__be32 __iomem *db_bar;
 	struct gve_registers __iomem *reg_bar;
 	struct gve_priv *priv;
+	u8 dma_mask;
 	int err;
 
 	err = pci_enable_device(pdev);
@@ -1095,19 +1101,6 @@  static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	pci_set_master(pdev);
 
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err) {
-		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
-		goto abort_with_pci_region;
-	}
-
-	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err) {
-		dev_err(&pdev->dev,
-			"Failed to set consistent dma mask: err=%d\n", err);
-		goto abort_with_pci_region;
-	}
-
 	reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
 	if (!reg_bar) {
 		dev_err(&pdev->dev, "Failed to map pci bar!\n");
@@ -1122,10 +1115,28 @@  static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto abort_with_reg_bar;
 	}
 
+	dma_mask = readb(&reg_bar->dma_mask);
+	// Default to 64 if the register isn't set
+	if (!dma_mask)
+		dma_mask = 64;
 	gve_write_version(&reg_bar->driver_version);
 	/* Get max queues to alloc etherdev */
 	max_rx_queues = ioread32be(&reg_bar->max_tx_queues);
 	max_tx_queues = ioread32be(&reg_bar->max_rx_queues);
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err) {
+		dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
+		goto abort_with_reg_bar;
+	}
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err) {
+		dev_err(&pdev->dev,
+			"Failed to set consistent dma mask: err=%d\n", err);
+		goto abort_with_reg_bar;
+	}
+
 	/* Alloc and setup the netdev and priv */
 	dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
 	if (!dev) {
@@ -1158,6 +1169,7 @@  static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	priv->db_bar2 = db_bar;
 	priv->service_task_flags = 0x0;
 	priv->state_flags = 0x0;
+	priv->dma_mask = dma_mask;
 
 	gve_set_probe_in_progress(priv);
 	priv->gve_wq = alloc_ordered_workqueue("gve", 0);
diff --git a/drivers/net/ethernet/google/gve/gve_register.h b/drivers/net/ethernet/google/gve/gve_register.h
index 84ab8893aadd..fad8813d1bb1 100644
--- a/drivers/net/ethernet/google/gve/gve_register.h
+++ b/drivers/net/ethernet/google/gve/gve_register.h
@@ -16,7 +16,8 @@  struct gve_registers {
 	__be32	adminq_pfn;
 	__be32	adminq_doorbell;
 	__be32	adminq_event_counter;
-	u8	reserved[3];
+	u8	reserved[2];
+	u8	dma_mask;
 	u8	driver_version;
 };