diff mbox series

[1/4] gve: Add support for raw addressing device option

Message ID 20201103174651.590586-2-awogbemila@google.com
State Changes Requested
Delegated to: Netdev Driver Reviewers
Headers show
Series GVE Raw Addressing | expand

Checks

Context Check Description
jkicinski/cover_letter success Link
jkicinski/fixes_present success Link
jkicinski/patch_count success Link
jkicinski/tree_selection success Clearly marked for net-next
jkicinski/subject_prefix success Link
jkicinski/source_inline success Was 0 now: 0
jkicinski/verify_signedoff success Link
jkicinski/module_param success Was 0 now: 0
jkicinski/build_32bit success Errors and warnings before: 0 this patch: 0
jkicinski/kdoc success Errors and warnings before: 0 this patch: 0
jkicinski/verify_fixes success Link
jkicinski/checkpatch warning WARNING: line length of 91 exceeds 80 columns WARNING: line length of 94 exceeds 80 columns
jkicinski/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
jkicinski/header_inline success Link
jkicinski/stable success Stable not CCed

Commit Message

David Awogbemila Nov. 3, 2020, 5:46 p.m. UTC
From: Catherine Sullivan <csully@google.com>

Add support to describe device for parsing device options. As
the first device option, add raw addressing.

"Raw Addressing" mode (as opposed to the current "qpl" mode) is an
operational mode which allows the driver avoid bounce buffer copies
which it currently performs using pre-allocated qpls (queue_page_lists)
when sending and receiving packets.
For egress packets, the provided skb data addresses will be dma_map'ed and
passed to the device, allowing the NIC can perform DMA directly - the
driver will not have to copy the buffer content into pre-allocated
buffers/qpls (as in qpl mode).
For ingress packets, copies are also eliminated as buffers are handed to
the networking stack and then recycled or re-allocated as
necessary, avoiding the use of skb_copy_to_linear_data().

This patch only introduces the option to the driver.
Subsequent patches will add the ingress and egress functionality.

Reviewed-by: Yangchun Fu <yangchun@google.com>
Signed-off-by: Catherine Sullivan <csully@google.com>
Signed-off-by: David Awogbemila <awogbemila@google.com>
---
 drivers/net/ethernet/google/gve/gve.h        |  1 +
 drivers/net/ethernet/google/gve/gve_adminq.c | 52 ++++++++++++++++++++
 drivers/net/ethernet/google/gve/gve_adminq.h | 15 ++++--
 drivers/net/ethernet/google/gve/gve_main.c   |  9 ++++
 4 files changed, 73 insertions(+), 4 deletions(-)

Comments

Saeed Mahameed Nov. 3, 2020, 10:43 p.m. UTC | #1
On Tue, 2020-11-03 at 09:46 -0800, David Awogbemila wrote:
> From: Catherine Sullivan <csully@google.com>
> 
> Add support to describe device for parsing device options. As
> the first device option, add raw addressing.
> 
> "Raw Addressing" mode (as opposed to the current "qpl" mode) is an
> operational mode which allows the driver avoid bounce buffer copies
> which it currently performs using pre-allocated qpls
> (queue_page_lists)
> when sending and receiving packets.
> For egress packets, the provided skb data addresses will be
> dma_map'ed and
> passed to the device, allowing the NIC can perform DMA directly - the
> driver will not have to copy the buffer content into pre-allocated
> buffers/qpls (as in qpl mode).
> For ingress packets, copies are also eliminated as buffers are handed
> to
> the networking stack and then recycled or re-allocated as
> necessary, avoiding the use of skb_copy_to_linear_data().
> 
> This patch only introduces the option to the driver.
> Subsequent patches will add the ingress and egress functionality.
> 
> Reviewed-by: Yangchun Fu <yangchun@google.com>
> Signed-off-by: Catherine Sullivan <csully@google.com>
> Signed-off-by: David Awogbemila <awogbemila@google.com>
> ---
>  drivers/net/ethernet/google/gve/gve.h        |  1 +
>  drivers/net/ethernet/google/gve/gve_adminq.c | 52
> ++++++++++++++++++++
>  drivers/net/ethernet/google/gve/gve_adminq.h | 15 ++++--
>  drivers/net/ethernet/google/gve/gve_main.c   |  9 ++++
>  4 files changed, 73 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/ethernet/google/gve/gve.h
> b/drivers/net/ethernet/google/gve/gve.h
> index f5c80229ea96..80cdae06ee39 100644
> --- a/drivers/net/ethernet/google/gve/gve.h
> +++ b/drivers/net/ethernet/google/gve/gve.h
> @@ -199,6 +199,7 @@ struct gve_priv {
>  	u64 num_registered_pages; /* num pages registered with NIC */
>  	u32 rx_copybreak; /* copy packets smaller than this */
>  	u16 default_num_queues; /* default num queues to set up */
> +	bool raw_addressing; /* true if this dev supports raw
> addressing */
>  
>  	struct gve_queue_config tx_cfg;
>  	struct gve_queue_config rx_cfg;
> diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c
> b/drivers/net/ethernet/google/gve/gve_adminq.c
> index 24ae6a28a806..0b7a2653fe33 100644
> --- a/drivers/net/ethernet/google/gve/gve_adminq.c
> +++ b/drivers/net/ethernet/google/gve/gve_adminq.c
> @@ -460,11 +460,14 @@ int gve_adminq_destroy_rx_queues(struct
> gve_priv *priv, u32 num_queues)
>  int gve_adminq_describe_device(struct gve_priv *priv)
>  {
>  	struct gve_device_descriptor *descriptor;
> +	struct gve_device_option *dev_opt;
>  	union gve_adminq_command cmd;
>  	dma_addr_t descriptor_bus;
> +	u16 num_options;
>  	int err = 0;
>  	u8 *mac;
>  	u16 mtu;
> +	int i;
>  
>  	memset(&cmd, 0, sizeof(cmd));
>  	descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
> @@ -518,6 +521,55 @@ int gve_adminq_describe_device(struct gve_priv
> *priv)
>  		priv->rx_desc_cnt = priv->rx_pages_per_qpl;
>  	}
>  	priv->default_num_queues = be16_to_cpu(descriptor-
> >default_num_queues);
> +	dev_opt = (void *)(descriptor + 1);
> +
> +	num_options = be16_to_cpu(descriptor->num_device_options);
> +	for (i = 0; i < num_options; i++) {
> +		u16 option_length = be16_to_cpu(dev_opt-
> >option_length);
> +		u16 option_id = be16_to_cpu(dev_opt->option_id);
> +		void *option_end;
> +
> +		option_end = (void *)dev_opt + sizeof(*dev_opt) +
> option_length;
> +		if (option_end > (void *)descriptor +
> be16_to_cpu(descriptor->total_length)) {
> +			dev_err(&priv->dev->dev,
> +				"options exceed device_descriptor's
> total length.\n");
> +			err = -EINVAL;
> +			goto free_device_descriptor;
> +		}
> +
> +		switch (option_id) {
> +		case GVE_DEV_OPT_ID_RAW_ADDRESSING:
> +			/* If the length or feature mask doesn't match,
> +			 * continue without enabling the feature.
> +			 */
> +			if (option_length !=
> GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
> +			    dev_opt->feat_mask !=
> +			    cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRE
> SSING)) {
> +				dev_warn(&priv->pdev->dev,
> +					 "Raw addressing option
> error:\n"
> +					 "	Expected: length=%d,
> feature_mask=%x.\n"
> +					 "	Actual: length=%d,
> feature_mask=%x.\n",
> +					 GVE_DEV_OPT_LEN_RAW_ADDRESSING
> ,
> +					 cpu_to_be32(GVE_DEV_OPT_FEAT_M
> ASK_RAW_ADDRESSING),
> +					 option_length, dev_opt-
> >feat_mask);
> +				priv->raw_addressing = false;
> +			} else {
> +				dev_info(&priv->pdev->dev,
> +					 "Raw addressing device option
> enabled.\n");
> +				priv->raw_addressing = true;
> +			}
> +			break;
> +		default:
> +			/* If we don't recognize the option just
> continue
> +			 * without doing anything.
> +			 */
> +			dev_dbg(&priv->pdev->dev,
> +				"Unrecognized device option 0x%hx not
> enabled.\n",
> +				option_id);
> +			break;
> +		}
> +		dev_opt = (void *)dev_opt + sizeof(*dev_opt) +
> option_length;

This was already calculated above, "option_end"


Suggestion: you can make an iterator macro to return the next opt

next_opt = GET_NEXT_OPT(descriptor, curr_opt);

you can make it check boundaries and return null on last iteration or
when total length is exceeded, and just use it in a more readable
iterator loop.
David Awogbemila Nov. 6, 2020, 7:41 p.m. UTC | #2
On Tue, Nov 3, 2020 at 2:43 PM Saeed Mahameed <saeed@kernel.org> wrote:
>
> On Tue, 2020-11-03 at 09:46 -0800, David Awogbemila wrote:
> > From: Catherine Sullivan <csully@google.com>
> >
> > Add support to describe device for parsing device options. As
> > the first device option, add raw addressing.
> >
> > "Raw Addressing" mode (as opposed to the current "qpl" mode) is an
> > operational mode which allows the driver avoid bounce buffer copies
> > which it currently performs using pre-allocated qpls
> > (queue_page_lists)
> > when sending and receiving packets.
> > For egress packets, the provided skb data addresses will be
> > dma_map'ed and
> > passed to the device, allowing the NIC can perform DMA directly - the
> > driver will not have to copy the buffer content into pre-allocated
> > buffers/qpls (as in qpl mode).
> > For ingress packets, copies are also eliminated as buffers are handed
> > to
> > the networking stack and then recycled or re-allocated as
> > necessary, avoiding the use of skb_copy_to_linear_data().
> >
> > This patch only introduces the option to the driver.
> > Subsequent patches will add the ingress and egress functionality.
> >
> > Reviewed-by: Yangchun Fu <yangchun@google.com>
> > Signed-off-by: Catherine Sullivan <csully@google.com>
> > Signed-off-by: David Awogbemila <awogbemila@google.com>
> > ---
> >  drivers/net/ethernet/google/gve/gve.h        |  1 +
> >  drivers/net/ethernet/google/gve/gve_adminq.c | 52
> > ++++++++++++++++++++
> >  drivers/net/ethernet/google/gve/gve_adminq.h | 15 ++++--
> >  drivers/net/ethernet/google/gve/gve_main.c   |  9 ++++
> >  4 files changed, 73 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/google/gve/gve.h
> > b/drivers/net/ethernet/google/gve/gve.h
> > index f5c80229ea96..80cdae06ee39 100644
> > --- a/drivers/net/ethernet/google/gve/gve.h
> > +++ b/drivers/net/ethernet/google/gve/gve.h
> > @@ -199,6 +199,7 @@ struct gve_priv {
> >       u64 num_registered_pages; /* num pages registered with NIC */
> >       u32 rx_copybreak; /* copy packets smaller than this */
> >       u16 default_num_queues; /* default num queues to set up */
> > +     bool raw_addressing; /* true if this dev supports raw
> > addressing */
> >
> >       struct gve_queue_config tx_cfg;
> >       struct gve_queue_config rx_cfg;
> > diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c
> > b/drivers/net/ethernet/google/gve/gve_adminq.c
> > index 24ae6a28a806..0b7a2653fe33 100644
> > --- a/drivers/net/ethernet/google/gve/gve_adminq.c
> > +++ b/drivers/net/ethernet/google/gve/gve_adminq.c
> > @@ -460,11 +460,14 @@ int gve_adminq_destroy_rx_queues(struct
> > gve_priv *priv, u32 num_queues)
> >  int gve_adminq_describe_device(struct gve_priv *priv)
> >  {
> >       struct gve_device_descriptor *descriptor;
> > +     struct gve_device_option *dev_opt;
> >       union gve_adminq_command cmd;
> >       dma_addr_t descriptor_bus;
> > +     u16 num_options;
> >       int err = 0;
> >       u8 *mac;
> >       u16 mtu;
> > +     int i;
> >
> >       memset(&cmd, 0, sizeof(cmd));
> >       descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
> > @@ -518,6 +521,55 @@ int gve_adminq_describe_device(struct gve_priv
> > *priv)
> >               priv->rx_desc_cnt = priv->rx_pages_per_qpl;
> >       }
> >       priv->default_num_queues = be16_to_cpu(descriptor-
> > >default_num_queues);
> > +     dev_opt = (void *)(descriptor + 1);
> > +
> > +     num_options = be16_to_cpu(descriptor->num_device_options);
> > +     for (i = 0; i < num_options; i++) {
> > +             u16 option_length = be16_to_cpu(dev_opt-
> > >option_length);
> > +             u16 option_id = be16_to_cpu(dev_opt->option_id);
> > +             void *option_end;
> > +
> > +             option_end = (void *)dev_opt + sizeof(*dev_opt) +
> > option_length;
> > +             if (option_end > (void *)descriptor +
> > be16_to_cpu(descriptor->total_length)) {
> > +                     dev_err(&priv->dev->dev,
> > +                             "options exceed device_descriptor's
> > total length.\n");
> > +                     err = -EINVAL;
> > +                     goto free_device_descriptor;
> > +             }
> > +
> > +             switch (option_id) {
> > +             case GVE_DEV_OPT_ID_RAW_ADDRESSING:
> > +                     /* If the length or feature mask doesn't match,
> > +                      * continue without enabling the feature.
> > +                      */
> > +                     if (option_length !=
> > GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
> > +                         dev_opt->feat_mask !=
> > +                         cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRE
> > SSING)) {
> > +                             dev_warn(&priv->pdev->dev,
> > +                                      "Raw addressing option
> > error:\n"
> > +                                      "      Expected: length=%d,
> > feature_mask=%x.\n"
> > +                                      "      Actual: length=%d,
> > feature_mask=%x.\n",
> > +                                      GVE_DEV_OPT_LEN_RAW_ADDRESSING
> > ,
> > +                                      cpu_to_be32(GVE_DEV_OPT_FEAT_M
> > ASK_RAW_ADDRESSING),
> > +                                      option_length, dev_opt-
> > >feat_mask);
> > +                             priv->raw_addressing = false;
> > +                     } else {
> > +                             dev_info(&priv->pdev->dev,
> > +                                      "Raw addressing device option
> > enabled.\n");
> > +                             priv->raw_addressing = true;
> > +                     }
> > +                     break;
> > +             default:
> > +                     /* If we don't recognize the option just
> > continue
> > +                      * without doing anything.
> > +                      */
> > +                     dev_dbg(&priv->pdev->dev,
> > +                             "Unrecognized device option 0x%hx not
> > enabled.\n",
> > +                             option_id);
> > +                     break;
> > +             }
> > +             dev_opt = (void *)dev_opt + sizeof(*dev_opt) +
> > option_length;
>
> This was already calculated above, "option_end"
>
>
> Suggestion: you can make an iterator macro to return the next opt
>
> next_opt = GET_NEXT_OPT(descriptor, curr_opt);
>
> you can make it check boundaries and return null on last iteration or
> when total length is exceeded, and just use it in a more readable
> iterator loop.
>
Thanks for the suggestion. I will adopt a macro but it'll only return
NULL if the options exceed the boundary - that way we can distinguish
between an error (boundary exceeded) and the last option.
David Awogbemila Nov. 9, 2020, 9:02 p.m. UTC | #3
Actually, I think I'll adopt a helper static inline function - it may
be tidier than a macro.

On Fri, Nov 6, 2020 at 11:41 AM David Awogbemila <awogbemila@google.com> wrote:
>
> On Tue, Nov 3, 2020 at 2:43 PM Saeed Mahameed <saeed@kernel.org> wrote:
> >
> > On Tue, 2020-11-03 at 09:46 -0800, David Awogbemila wrote:
> > > From: Catherine Sullivan <csully@google.com>
> > >
> > > Add support to describe device for parsing device options. As
> > > the first device option, add raw addressing.
> > >
> > > "Raw Addressing" mode (as opposed to the current "qpl" mode) is an
> > > operational mode which allows the driver avoid bounce buffer copies
> > > which it currently performs using pre-allocated qpls
> > > (queue_page_lists)
> > > when sending and receiving packets.
> > > For egress packets, the provided skb data addresses will be
> > > dma_map'ed and
> > > passed to the device, allowing the NIC can perform DMA directly - the
> > > driver will not have to copy the buffer content into pre-allocated
> > > buffers/qpls (as in qpl mode).
> > > For ingress packets, copies are also eliminated as buffers are handed
> > > to
> > > the networking stack and then recycled or re-allocated as
> > > necessary, avoiding the use of skb_copy_to_linear_data().
> > >
> > > This patch only introduces the option to the driver.
> > > Subsequent patches will add the ingress and egress functionality.
> > >
> > > Reviewed-by: Yangchun Fu <yangchun@google.com>
> > > Signed-off-by: Catherine Sullivan <csully@google.com>
> > > Signed-off-by: David Awogbemila <awogbemila@google.com>
> > > ---
> > >  drivers/net/ethernet/google/gve/gve.h        |  1 +
> > >  drivers/net/ethernet/google/gve/gve_adminq.c | 52
> > > ++++++++++++++++++++
> > >  drivers/net/ethernet/google/gve/gve_adminq.h | 15 ++++--
> > >  drivers/net/ethernet/google/gve/gve_main.c   |  9 ++++
> > >  4 files changed, 73 insertions(+), 4 deletions(-)
> > >
> > > diff --git a/drivers/net/ethernet/google/gve/gve.h
> > > b/drivers/net/ethernet/google/gve/gve.h
> > > index f5c80229ea96..80cdae06ee39 100644
> > > --- a/drivers/net/ethernet/google/gve/gve.h
> > > +++ b/drivers/net/ethernet/google/gve/gve.h
> > > @@ -199,6 +199,7 @@ struct gve_priv {
> > >       u64 num_registered_pages; /* num pages registered with NIC */
> > >       u32 rx_copybreak; /* copy packets smaller than this */
> > >       u16 default_num_queues; /* default num queues to set up */
> > > +     bool raw_addressing; /* true if this dev supports raw
> > > addressing */
> > >
> > >       struct gve_queue_config tx_cfg;
> > >       struct gve_queue_config rx_cfg;
> > > diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c
> > > b/drivers/net/ethernet/google/gve/gve_adminq.c
> > > index 24ae6a28a806..0b7a2653fe33 100644
> > > --- a/drivers/net/ethernet/google/gve/gve_adminq.c
> > > +++ b/drivers/net/ethernet/google/gve/gve_adminq.c
> > > @@ -460,11 +460,14 @@ int gve_adminq_destroy_rx_queues(struct
> > > gve_priv *priv, u32 num_queues)
> > >  int gve_adminq_describe_device(struct gve_priv *priv)
> > >  {
> > >       struct gve_device_descriptor *descriptor;
> > > +     struct gve_device_option *dev_opt;
> > >       union gve_adminq_command cmd;
> > >       dma_addr_t descriptor_bus;
> > > +     u16 num_options;
> > >       int err = 0;
> > >       u8 *mac;
> > >       u16 mtu;
> > > +     int i;
> > >
> > >       memset(&cmd, 0, sizeof(cmd));
> > >       descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
> > > @@ -518,6 +521,55 @@ int gve_adminq_describe_device(struct gve_priv
> > > *priv)
> > >               priv->rx_desc_cnt = priv->rx_pages_per_qpl;
> > >       }
> > >       priv->default_num_queues = be16_to_cpu(descriptor-
> > > >default_num_queues);
> > > +     dev_opt = (void *)(descriptor + 1);
> > > +
> > > +     num_options = be16_to_cpu(descriptor->num_device_options);
> > > +     for (i = 0; i < num_options; i++) {
> > > +             u16 option_length = be16_to_cpu(dev_opt-
> > > >option_length);
> > > +             u16 option_id = be16_to_cpu(dev_opt->option_id);
> > > +             void *option_end;
> > > +
> > > +             option_end = (void *)dev_opt + sizeof(*dev_opt) +
> > > option_length;
> > > +             if (option_end > (void *)descriptor +
> > > be16_to_cpu(descriptor->total_length)) {
> > > +                     dev_err(&priv->dev->dev,
> > > +                             "options exceed device_descriptor's
> > > total length.\n");
> > > +                     err = -EINVAL;
> > > +                     goto free_device_descriptor;
> > > +             }
> > > +
> > > +             switch (option_id) {
> > > +             case GVE_DEV_OPT_ID_RAW_ADDRESSING:
> > > +                     /* If the length or feature mask doesn't match,
> > > +                      * continue without enabling the feature.
> > > +                      */
> > > +                     if (option_length !=
> > > GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
> > > +                         dev_opt->feat_mask !=
> > > +                         cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRE
> > > SSING)) {
> > > +                             dev_warn(&priv->pdev->dev,
> > > +                                      "Raw addressing option
> > > error:\n"
> > > +                                      "      Expected: length=%d,
> > > feature_mask=%x.\n"
> > > +                                      "      Actual: length=%d,
> > > feature_mask=%x.\n",
> > > +                                      GVE_DEV_OPT_LEN_RAW_ADDRESSING
> > > ,
> > > +                                      cpu_to_be32(GVE_DEV_OPT_FEAT_M
> > > ASK_RAW_ADDRESSING),
> > > +                                      option_length, dev_opt-
> > > >feat_mask);
> > > +                             priv->raw_addressing = false;
> > > +                     } else {
> > > +                             dev_info(&priv->pdev->dev,
> > > +                                      "Raw addressing device option
> > > enabled.\n");
> > > +                             priv->raw_addressing = true;
> > > +                     }
> > > +                     break;
> > > +             default:
> > > +                     /* If we don't recognize the option just
> > > continue
> > > +                      * without doing anything.
> > > +                      */
> > > +                     dev_dbg(&priv->pdev->dev,
> > > +                             "Unrecognized device option 0x%hx not
> > > enabled.\n",
> > > +                             option_id);
> > > +                     break;
> > > +             }
> > > +             dev_opt = (void *)dev_opt + sizeof(*dev_opt) +
> > > option_length;
> >
> > This was already calculated above, "option_end"
> >
> >
> > Suggestion: you can make an iterator macro to return the next opt
> >
> > next_opt = GET_NEXT_OPT(descriptor, curr_opt);
> >
> > you can make it check boundaries and return null on last iteration or
> > when total length is exceeded, and just use it in a more readable
> > iterator loop.
> >
> Thanks for the suggestion. I will adopt a macro but it'll only return
> NULL if the options exceed the boundary - that way we can distinguish
> between an error (boundary exceeded) and the last option.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index f5c80229ea96..80cdae06ee39 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -199,6 +199,7 @@  struct gve_priv {
 	u64 num_registered_pages; /* num pages registered with NIC */
 	u32 rx_copybreak; /* copy packets smaller than this */
 	u16 default_num_queues; /* default num queues to set up */
+	bool raw_addressing; /* true if this dev supports raw addressing */
 
 	struct gve_queue_config tx_cfg;
 	struct gve_queue_config rx_cfg;
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 24ae6a28a806..0b7a2653fe33 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -460,11 +460,14 @@  int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
 int gve_adminq_describe_device(struct gve_priv *priv)
 {
 	struct gve_device_descriptor *descriptor;
+	struct gve_device_option *dev_opt;
 	union gve_adminq_command cmd;
 	dma_addr_t descriptor_bus;
+	u16 num_options;
 	int err = 0;
 	u8 *mac;
 	u16 mtu;
+	int i;
 
 	memset(&cmd, 0, sizeof(cmd));
 	descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
@@ -518,6 +521,55 @@  int gve_adminq_describe_device(struct gve_priv *priv)
 		priv->rx_desc_cnt = priv->rx_pages_per_qpl;
 	}
 	priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
+	dev_opt = (void *)(descriptor + 1);
+
+	num_options = be16_to_cpu(descriptor->num_device_options);
+	for (i = 0; i < num_options; i++) {
+		u16 option_length = be16_to_cpu(dev_opt->option_length);
+		u16 option_id = be16_to_cpu(dev_opt->option_id);
+		void *option_end;
+
+		option_end = (void *)dev_opt + sizeof(*dev_opt) + option_length;
+		if (option_end > (void *)descriptor + be16_to_cpu(descriptor->total_length)) {
+			dev_err(&priv->dev->dev,
+				"options exceed device_descriptor's total length.\n");
+			err = -EINVAL;
+			goto free_device_descriptor;
+		}
+
+		switch (option_id) {
+		case GVE_DEV_OPT_ID_RAW_ADDRESSING:
+			/* If the length or feature mask doesn't match,
+			 * continue without enabling the feature.
+			 */
+			if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
+			    dev_opt->feat_mask !=
+			    cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING)) {
+				dev_warn(&priv->pdev->dev,
+					 "Raw addressing option error:\n"
+					 "	Expected: length=%d, feature_mask=%x.\n"
+					 "	Actual: length=%d, feature_mask=%x.\n",
+					 GVE_DEV_OPT_LEN_RAW_ADDRESSING,
+					 cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING),
+					 option_length, dev_opt->feat_mask);
+				priv->raw_addressing = false;
+			} else {
+				dev_info(&priv->pdev->dev,
+					 "Raw addressing device option enabled.\n");
+				priv->raw_addressing = true;
+			}
+			break;
+		default:
+			/* If we don't recognize the option just continue
+			 * without doing anything.
+			 */
+			dev_dbg(&priv->pdev->dev,
+				"Unrecognized device option 0x%hx not enabled.\n",
+				option_id);
+			break;
+		}
+		dev_opt = (void *)dev_opt + sizeof(*dev_opt) + option_length;
+	}
 
 free_device_descriptor:
 	dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 281de8326bc5..af5f586167bd 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -79,12 +79,17 @@  struct gve_device_descriptor {
 
 static_assert(sizeof(struct gve_device_descriptor) == 40);
 
-struct device_option {
-	__be32 option_id;
-	__be32 option_length;
+struct gve_device_option {
+	__be16 option_id;
+	__be16 option_length;
+	__be32 feat_mask;
 };
 
-static_assert(sizeof(struct device_option) == 8);
+static_assert(sizeof(struct gve_device_option) == 8);
+
+#define GVE_DEV_OPT_ID_RAW_ADDRESSING 0x1
+#define GVE_DEV_OPT_LEN_RAW_ADDRESSING 0x0
+#define GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING 0x0
 
 struct gve_adminq_configure_device_resources {
 	__be64 counter_array;
@@ -111,6 +116,8 @@  struct gve_adminq_unregister_page_list {
 
 static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4);
 
+#define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF
+
 struct gve_adminq_create_tx_queue {
 	__be32 queue_id;
 	__be32 reserved;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 48a433154ce0..70685c10db0e 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -678,6 +678,10 @@  static int gve_alloc_qpls(struct gve_priv *priv)
 	int i, j;
 	int err;
 
+	/* Raw addressing means no QPLs */
+	if (priv->raw_addressing)
+		return 0;
+
 	priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
 	if (!priv->qpls)
 		return -ENOMEM;
@@ -718,6 +722,10 @@  static void gve_free_qpls(struct gve_priv *priv)
 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
 	int i;
 
+	/* Raw addressing means no QPLs */
+	if (priv->raw_addressing)
+		return;
+
 	kvfree(priv->qpl_cfg.qpl_id_map);
 
 	for (i = 0; i < num_qpls; i++)
@@ -1078,6 +1086,7 @@  static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 	if (skip_describe_device)
 		goto setup_device;
 
+	priv->raw_addressing = false;
 	/* Get the initial information we need from the device */
 	err = gve_adminq_describe_device(priv);
 	if (err) {