diff mbox series

[v2,net-next,1/2] ibmvnic: Update reset infrastructure to support tunable parameters

Message ID cbaefef3-fa1d-a2ff-3d23-a4b0a3c67050@linux.vnet.ibm.com
State Changes Requested, archived
Delegated to: David Miller
Headers show
Series ibmvnic: Tunable parameter support | expand

Commit Message

John Allen Oct. 23, 2017, 4:33 p.m. UTC
Update ibmvnic reset infrastructure to include a new reset option that will
allow changing of tunable parameters. There currently is no way to request
different capabilities from the vnic server on the fly so this patch
achieves this by resetting the driver and attempting to log in with the
requested changes. If the reset operation fails, the old values of the
tunable parameters are stored in the "fallback" struct and we attempt to
login with the fallback values.

Signed-off-by: John Allen <jallen@linux.vnet.ibm.com>
---

Comments

Nathan Fontenot Oct. 24, 2017, 4:04 p.m. UTC | #1
On 10/23/2017 11:33 AM, John Allen wrote:
> Update ibmvnic reset infrastructure to include a new reset option that will
> allow changing of tunable parameters. There currently is no way to request
> different capabilities from the vnic server on the fly so this patch
> achieves this by resetting the driver and attempting to log in with the
> requested changes. If the reset operation fails, the old values of the
> tunable parameters are stored in the "fallback" struct and we attempt to
> login with the fallback values.
> 
> Signed-off-by: John Allen <jallen@linux.vnet.ibm.com>
> ---
> diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
> index 11eba82..c2c4a5b 100644
> --- a/drivers/net/ethernet/ibm/ibmvnic.c
> +++ b/drivers/net/ethernet/ibm/ibmvnic.c
> @@ -115,6 +115,7 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
>  static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
>  static int ibmvnic_init(struct ibmvnic_adapter *);
>  static void release_crq_queue(struct ibmvnic_adapter *);
> +static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
> 
>  struct ibmvnic_stat {
>  	char name[ETH_GSTRING_LEN];
> @@ -926,6 +927,11 @@ static int ibmvnic_open(struct net_device *netdev)
> 
>  	mutex_lock(&adapter->reset_lock);
> 
> +	if (adapter->desired->mac) {
> +		__ibmvnic_set_mac(netdev, adapter->desired->mac);
> +		adapter->desired->mac = NULL;

Not sure if I missed it, but setting this to NULL doesn't seem right.

This is allocated in ibmvnic_probe() but only gets any data in ibmvnic_set_mac()
if the adapter state is closed. I think the scenario of probe -> set mac addr ->
open -> close -> set mac addr,  would fail in the second set mac address because
the mac struct pointer is NULL.

> +	}
> +
>  	if (adapter->state != VNIC_CLOSED) {
>  		rc = ibmvnic_login(netdev);
>  		if (rc) {
> @@ -1426,7 +1432,7 @@ static void ibmvnic_set_multi(struct net_device *netdev)
>  	}
>  }
> 
> -static int ibmvnic_set_mac(struct net_device *netdev, void *p)
> +static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
>  {
>  	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
>  	struct sockaddr *addr = p;
> @@ -1444,6 +1450,21 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
>  	return 0;
>  }
> 
> +static int ibmvnic_set_mac(struct net_device *netdev, void *p)
> +{
> +	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
> +	struct sockaddr *addr = p;
> +
> +	if (adapter->state != VNIC_OPEN) {
> +		memcpy(adapter->desired->mac, addr, sizeof(struct sockaddr));
> +		return 0;
> +	}
> +
> +	__ibmvnic_set_mac(netdev, addr);
> +
> +	return 0;
> +}
> +
>  /**
>   * do_reset returns zero if we are able to keep processing reset events, or
>   * non-zero if we hit a fatal error and must halt.
> @@ -1470,6 +1491,13 @@ static int do_reset(struct ibmvnic_adapter *adapter,
>  	if (rc)
>  		return rc;
> 
> +	if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
> +	    adapter->wait_for_reset) {
> +		release_resources(adapter);
> +		release_sub_crqs(adapter);
> +		release_crq_queue(adapter);
> +	}
> +
>  	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
>  		/* remove the closed state so when we call open it appears
>  		 * we are coming from the probed state.
> @@ -1492,16 +1520,23 @@ static int do_reset(struct ibmvnic_adapter *adapter,
>  			return 0;
>  		}
> 
> -		rc = reset_tx_pools(adapter);
> -		if (rc)
> -			return rc;
> +		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
> +		    adapter->wait_for_reset) {
> +			rc = init_resources(adapter);
> +			if (rc)
> +				return rc;
> +		} else {
> +			rc = reset_tx_pools(adapter);
> +			if (rc)
> +				return rc;
> 
> -		rc = reset_rx_pools(adapter);
> -		if (rc)
> -			return rc;
> +			rc = reset_rx_pools(adapter);
> +			if (rc)
> +				return rc;
> 
> -		if (reset_state == VNIC_CLOSED)
> -			return 0;
> +			if (reset_state == VNIC_CLOSED)
> +				return 0;
> +		}
>  	}
> 
>  	rc = __ibmvnic_open(netdev);
> @@ -1580,6 +1615,12 @@ static void __ibmvnic_reset(struct work_struct *work)
>  		rwi = get_next_rwi(adapter);
>  	}
> 
> +	if (adapter->wait_for_reset) {
> +		adapter->wait_for_reset = false;
> +		adapter->reset_done_rc = rc;
> +		complete(&adapter->reset_done);
> +	}
> +
>  	if (rc) {
>  		netdev_dbg(adapter->netdev, "Reset failed\n");
>  		free_all_rwi(adapter);
> @@ -1759,9 +1800,42 @@ static void ibmvnic_netpoll_controller(struct net_device *dev)
>  }
>  #endif
> 
> +static int wait_for_reset(struct ibmvnic_adapter *adapter)
> +{
> +	adapter->fallback->mtu = adapter->req_mtu;
> +	adapter->fallback->rx_queues = adapter->req_rx_queues;
> +	adapter->fallback->tx_queues = adapter->req_tx_queues;
> +	adapter->fallback->rx_entries = adapter->req_rx_add_entries_per_subcrq;
> +	adapter->fallback->tx_entries = adapter->req_tx_entries_per_subcrq;
> +
> +	init_completion(&adapter->reset_done);
> +	ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
> +	adapter->wait_for_reset = true;
> +	wait_for_completion(&adapter->reset_done);
> +
> +	if (adapter->reset_done_rc) {
> +		adapter->desired->mtu = adapter->fallback->mtu;
> +		adapter->desired->rx_queues = adapter->fallback->rx_queues;
> +		adapter->desired->tx_queues = adapter->fallback->tx_queues;
> +		adapter->desired->rx_entries = adapter->fallback->rx_entries;
> +		adapter->desired->tx_entries = adapter->fallback->tx_entries;
> +
> +		init_completion(&adapter->reset_done);
> +		ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
> +		wait_for_completion(&adapter->reset_done);
> +	}
> +	adapter->wait_for_reset = false;
> +
> +	return adapter->reset_done_rc;
> +}
> +
>  static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
>  {
> -	return -EOPNOTSUPP;
> +	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
> +
> +	adapter->desired->mtu = new_mtu + ETH_HLEN;

Not really relevant to this patch but we do this calculation in many places,
perhaps it's time for a macro.

> +
> +	return wait_for_reset(adapter);
>  }
> 
>  static const struct net_device_ops ibmvnic_netdev_ops = {
> @@ -1849,6 +1923,27 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
>  	ring->rx_jumbo_pending = 0;
>  }
> 
> +static int ibmvnic_set_ringparam(struct net_device *netdev,
> +				 struct ethtool_ringparam *ring)
> +{
> +	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
> +
> +	if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
> +	    ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
> +		netdev_err(netdev, "Invalid request.\n");
> +		netdev_err(netdev, "Max tx buffers = %llu\n",
> +			   adapter->max_rx_add_entries_per_subcrq);
> +		netdev_err(netdev, "Max rx buffers = %llu\n",
> +			   adapter->max_tx_entries_per_subcrq);
> +		return -EINVAL;
> +	}
> +
> +	adapter->desired->rx_entries = ring->rx_pending;
> +	adapter->desired->tx_entries = ring->tx_pending;
> +
> +	return wait_for_reset(adapter);
> +}
> +
>  static void ibmvnic_get_channels(struct net_device *netdev,
>  				 struct ethtool_channels *channels)
>  {
> @@ -1864,6 +1959,17 @@ static void ibmvnic_get_channels(struct net_device *netdev,
>  	channels->combined_count = 0;
>  }
> 
> +static int ibmvnic_set_channels(struct net_device *netdev,
> +				struct ethtool_channels *channels)
> +{
> +	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
> +
> +	adapter->desired->rx_queues = channels->rx_count;
> +	adapter->desired->tx_queues = channels->tx_count;
> +
> +	return wait_for_reset(adapter);
> +}
> +
>  static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
>  {
>  	struct ibmvnic_adapter *adapter = netdev_priv(dev);
> @@ -1960,7 +2066,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
>  	.set_msglevel		= ibmvnic_set_msglevel,
>  	.get_link		= ibmvnic_get_link,
>  	.get_ringparam		= ibmvnic_get_ringparam,
> +	.set_ringparam		= ibmvnic_set_ringparam,
>  	.get_channels		= ibmvnic_get_channels,
> +	.set_channels		= ibmvnic_set_channels,
>  	.get_strings            = ibmvnic_get_strings,
>  	.get_sset_count         = ibmvnic_get_sset_count,
>  	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
> @@ -2426,6 +2534,7 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
>  {
>  	struct device *dev = &adapter->vdev->dev;
>  	union ibmvnic_crq crq;
> +	int max_entries;
> 
>  	if (!retry) {
>  		/* Sub-CRQ entries are 32 byte long */
> @@ -2437,21 +2546,60 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
>  			return;
>  		}
> 
> -		/* Get the minimum between the queried max and the entries
> -		 * that fit in our PAGE_SIZE
> -		 */
> -		adapter->req_tx_entries_per_subcrq =
> -		    adapter->max_tx_entries_per_subcrq > entries_page ?
> -		    entries_page : adapter->max_tx_entries_per_subcrq;
> -		adapter->req_rx_add_entries_per_subcrq =
> -		    adapter->max_rx_add_entries_per_subcrq > entries_page ?
> -		    entries_page : adapter->max_rx_add_entries_per_subcrq;
> -
> -		adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
> -		adapter->req_rx_queues = adapter->opt_rx_comp_queues;
> -		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
> +		if (adapter->desired->mtu)
> +			adapter->req_mtu = adapter->desired->mtu;
> +		else
> +			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
> +
> +		if (!adapter->desired->tx_entries)
> +			adapter->desired->tx_entries =
> +					adapter->max_tx_entries_per_subcrq;
> +		if (!adapter->desired->rx_entries)
> +			adapter->desired->rx_entries =
> +					adapter->max_rx_add_entries_per_subcrq;
> +
> +		max_entries = IBMVNIC_MAX_LTB_SIZE /
> +			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
> +
> +		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
> +			adapter->desired->tx_entries > IBMVNIC_MAX_LTB_SIZE) {
> +			adapter->desired->tx_entries = max_entries;
> +		}
> +
> +		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
> +			adapter->desired->rx_entries > IBMVNIC_MAX_LTB_SIZE) {
> +			adapter->desired->rx_entries = max_entries;
> +		}
> 
> -		adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
> +		if (adapter->desired->tx_entries)
> +			adapter->req_tx_entries_per_subcrq =
> +					adapter->desired->tx_entries;
> +		else
> +			adapter->req_tx_entries_per_subcrq =
> +					adapter->max_tx_entries_per_subcrq;
> +
> +		if (adapter->desired->rx_entries)
> +			adapter->req_rx_add_entries_per_subcrq =
> +					adapter->desired->rx_entries;
> +		else
> +			adapter->req_rx_add_entries_per_subcrq =
> +					adapter->max_rx_add_entries_per_subcrq;
> +
> +		if (adapter->desired->tx_queues)
> +			adapter->req_tx_queues =
> +					adapter->desired->tx_queues;
> +		else
> +			adapter->req_tx_queues =
> +					adapter->opt_tx_comp_sub_queues;
> +
> +		if (adapter->desired->rx_queues)
> +			adapter->req_rx_queues =
> +					adapter->desired->rx_queues;
> +		else
> +			adapter->req_rx_queues =
> +					adapter->opt_rx_comp_queues;
> +
> +		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
>  	}
> 
>  	memset(&crq, 0, sizeof(crq));
> @@ -3272,6 +3420,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
>  			    struct ibmvnic_adapter *adapter)
>  {
>  	struct device *dev = &adapter->vdev->dev;
> +	struct net_device *netdev = adapter->netdev;
>  	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
>  	struct ibmvnic_login_buffer *login = adapter->login_buf;
>  	int i;
> @@ -3291,6 +3440,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
>  		return 0;
>  	}
> 
> +	netdev->mtu = adapter->req_mtu - ETH_HLEN;
> +
>  	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
>  	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
>  		netdev_dbg(adapter->netdev, "%016lx\n",
> @@ -3846,7 +3997,7 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
>  	unsigned long timeout = msecs_to_jiffies(30000);
>  	int rc;
> 
> -	if (adapter->resetting) {
> +	if (adapter->resetting && !adapter->wait_for_reset) {
>  		rc = ibmvnic_reset_crq(adapter);
>  		if (!rc)
>  			rc = vio_enable_interrupts(adapter->vdev);
> @@ -3880,7 +4031,7 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
>  		return -1;
>  	}
> 
> -	if (adapter->resetting)
> +	if (adapter->resetting && !adapter->wait_for_reset)
>  		rc = reset_sub_crq_queues(adapter);
>  	else
>  		rc = init_sub_crqs(adapter);
> @@ -3931,6 +4082,11 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
>  	adapter->vdev = dev;
>  	adapter->netdev = netdev;
> 
> +	adapter->desired = kzalloc(sizeof(*adapter->desired), GFP_KERNEL);
> +	adapter->desired->mac = kzalloc(sizeof(*adapter->desired->mac),
> +					GFP_KERNEL);
> +	adapter->fallback = kzalloc(sizeof(*adapter->fallback), GFP_KERNEL);
> +

If we are going to allocate the structs always, would it be better to just
have them in the adapter struct instead of allocating them?

-Nathan

>  	ether_addr_copy(adapter->mac_addr, mac_addr_p);
>  	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
>  	netdev->irq = dev->irq;
> @@ -3956,6 +4112,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
>  	} while (rc == EAGAIN);
> 
>  	netdev->mtu = adapter->req_mtu - ETH_HLEN;
> +	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
> +	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
> 
>  	rc = device_create_file(&dev->dev, &dev_attr_failover);
>  	if (rc)
> @@ -3970,6 +4128,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
>  	dev_info(&dev->dev, "ibmvnic registered\n");
> 
>  	adapter->state = VNIC_PROBED;
> +
> +	adapter->wait_for_reset = false;
> +
>  	return 0;
> 
>  ibmvnic_register_fail:
> @@ -3993,6 +4154,11 @@ static int ibmvnic_remove(struct vio_dev *dev)
>  	mutex_lock(&adapter->reset_lock);
> 
>  	release_resources(adapter);
> +
> +	kfree(adapter->desired->mac);
> +	kfree(adapter->desired);
> +	kfree(adapter->fallback);
> +
>  	release_sub_crqs(adapter);
>  	release_crq_queue(adapter);
> 
> diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
> index 7aa347a..8de998a 100644
> --- a/drivers/net/ethernet/ibm/ibmvnic.h
> +++ b/drivers/net/ethernet/ibm/ibmvnic.h
> @@ -42,6 +42,9 @@
>  #define IBMVNIC_TSO_BUF_SZ	65536
>  #define IBMVNIC_TSO_BUFS	64
> 
> +#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
> +#define IBMVNIC_BUFFER_HLEN 500
> +
>  struct ibmvnic_login_buffer {
>  	__be32 len;
>  	__be32 version;
> @@ -945,13 +948,23 @@ enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
>  			   VNIC_RESET_MOBILITY,
>  			   VNIC_RESET_FATAL,
>  			   VNIC_RESET_NON_FATAL,
> -			   VNIC_RESET_TIMEOUT};
> +			   VNIC_RESET_TIMEOUT,
> +			   VNIC_RESET_CHANGE_PARAM};
> 
>  struct ibmvnic_rwi {
>  	enum ibmvnic_reset_reason reset_reason;
>  	struct list_head list;
>  };
> 
> +struct ibmvnic_tunables {
> +	u64 rx_queues;
> +	u64 tx_queues;
> +	u64 rx_entries;
> +	u64 tx_entries;
> +	u64 mtu;
> +	struct sockaddr *mac;
> +};
> +
>  struct ibmvnic_adapter {
>  	struct vio_dev *vdev;
>  	struct net_device *netdev;
> @@ -1012,6 +1025,10 @@ struct ibmvnic_adapter {
>  	struct completion fw_done;
>  	int fw_done_rc;
> 
> +	struct completion reset_done;
> +	int reset_done_rc;
> +	bool wait_for_reset;
> +
>  	/* partner capabilities */
>  	u64 min_tx_queues;
>  	u64 min_rx_queues;
> @@ -1056,4 +1073,7 @@ struct ibmvnic_adapter {
>  	struct work_struct ibmvnic_reset;
>  	bool resetting;
>  	bool napi_enabled, from_passive_init;
> +
> +	struct ibmvnic_tunables *desired;
> +	struct ibmvnic_tunables *fallback;
>  };
>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 11eba82..c2c4a5b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -115,6 +115,7 @@  static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
 static int ibmvnic_init(struct ibmvnic_adapter *);
 static void release_crq_queue(struct ibmvnic_adapter *);
+static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);

 struct ibmvnic_stat {
 	char name[ETH_GSTRING_LEN];
@@ -926,6 +927,11 @@  static int ibmvnic_open(struct net_device *netdev)

 	mutex_lock(&adapter->reset_lock);

+	if (adapter->desired->mac) {
+		__ibmvnic_set_mac(netdev, adapter->desired->mac);
+		adapter->desired->mac = NULL;
+	}
+
 	if (adapter->state != VNIC_CLOSED) {
 		rc = ibmvnic_login(netdev);
 		if (rc) {
@@ -1426,7 +1432,7 @@  static void ibmvnic_set_multi(struct net_device *netdev)
 	}
 }

-static int ibmvnic_set_mac(struct net_device *netdev, void *p)
+static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
 {
 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 	struct sockaddr *addr = p;
@@ -1444,6 +1450,21 @@  static int ibmvnic_set_mac(struct net_device *netdev, void *p)
 	return 0;
 }

+static int ibmvnic_set_mac(struct net_device *netdev, void *p)
+{
+	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+
+	if (adapter->state != VNIC_OPEN) {
+		memcpy(adapter->desired->mac, addr, sizeof(struct sockaddr));
+		return 0;
+	}
+
+	__ibmvnic_set_mac(netdev, addr);
+
+	return 0;
+}
+
 /**
  * do_reset returns zero if we are able to keep processing reset events, or
  * non-zero if we hit a fatal error and must halt.
@@ -1470,6 +1491,13 @@  static int do_reset(struct ibmvnic_adapter *adapter,
 	if (rc)
 		return rc;

+	if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
+	    adapter->wait_for_reset) {
+		release_resources(adapter);
+		release_sub_crqs(adapter);
+		release_crq_queue(adapter);
+	}
+
 	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
 		/* remove the closed state so when we call open it appears
 		 * we are coming from the probed state.
@@ -1492,16 +1520,23 @@  static int do_reset(struct ibmvnic_adapter *adapter,
 			return 0;
 		}

-		rc = reset_tx_pools(adapter);
-		if (rc)
-			return rc;
+		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
+		    adapter->wait_for_reset) {
+			rc = init_resources(adapter);
+			if (rc)
+				return rc;
+		} else {
+			rc = reset_tx_pools(adapter);
+			if (rc)
+				return rc;

-		rc = reset_rx_pools(adapter);
-		if (rc)
-			return rc;
+			rc = reset_rx_pools(adapter);
+			if (rc)
+				return rc;

-		if (reset_state == VNIC_CLOSED)
-			return 0;
+			if (reset_state == VNIC_CLOSED)
+				return 0;
+		}
 	}

 	rc = __ibmvnic_open(netdev);
@@ -1580,6 +1615,12 @@  static void __ibmvnic_reset(struct work_struct *work)
 		rwi = get_next_rwi(adapter);
 	}

+	if (adapter->wait_for_reset) {
+		adapter->wait_for_reset = false;
+		adapter->reset_done_rc = rc;
+		complete(&adapter->reset_done);
+	}
+
 	if (rc) {
 		netdev_dbg(adapter->netdev, "Reset failed\n");
 		free_all_rwi(adapter);
@@ -1759,9 +1800,42 @@  static void ibmvnic_netpoll_controller(struct net_device *dev)
 }
 #endif

+static int wait_for_reset(struct ibmvnic_adapter *adapter)
+{
+	adapter->fallback->mtu = adapter->req_mtu;
+	adapter->fallback->rx_queues = adapter->req_rx_queues;
+	adapter->fallback->tx_queues = adapter->req_tx_queues;
+	adapter->fallback->rx_entries = adapter->req_rx_add_entries_per_subcrq;
+	adapter->fallback->tx_entries = adapter->req_tx_entries_per_subcrq;
+
+	init_completion(&adapter->reset_done);
+	ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+	adapter->wait_for_reset = true;
+	wait_for_completion(&adapter->reset_done);
+
+	if (adapter->reset_done_rc) {
+		adapter->desired->mtu = adapter->fallback->mtu;
+		adapter->desired->rx_queues = adapter->fallback->rx_queues;
+		adapter->desired->tx_queues = adapter->fallback->tx_queues;
+		adapter->desired->rx_entries = adapter->fallback->rx_entries;
+		adapter->desired->tx_entries = adapter->fallback->tx_entries;
+
+		init_completion(&adapter->reset_done);
+		ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+		wait_for_completion(&adapter->reset_done);
+	}
+	adapter->wait_for_reset = false;
+
+	return adapter->reset_done_rc;
+}
+
 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
 {
-	return -EOPNOTSUPP;
+	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+	adapter->desired->mtu = new_mtu + ETH_HLEN;
+
+	return wait_for_reset(adapter);
 }

 static const struct net_device_ops ibmvnic_netdev_ops = {
@@ -1849,6 +1923,27 @@  static void ibmvnic_get_ringparam(struct net_device *netdev,
 	ring->rx_jumbo_pending = 0;
 }

+static int ibmvnic_set_ringparam(struct net_device *netdev,
+				 struct ethtool_ringparam *ring)
+{
+	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+	if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
+	    ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
+		netdev_err(netdev, "Invalid request.\n");
+		netdev_err(netdev, "Max tx buffers = %llu\n",
+			   adapter->max_rx_add_entries_per_subcrq);
+		netdev_err(netdev, "Max rx buffers = %llu\n",
+			   adapter->max_tx_entries_per_subcrq);
+		return -EINVAL;
+	}
+
+	adapter->desired->rx_entries = ring->rx_pending;
+	adapter->desired->tx_entries = ring->tx_pending;
+
+	return wait_for_reset(adapter);
+}
+
 static void ibmvnic_get_channels(struct net_device *netdev,
 				 struct ethtool_channels *channels)
 {
@@ -1864,6 +1959,17 @@  static void ibmvnic_get_channels(struct net_device *netdev,
 	channels->combined_count = 0;
 }

+static int ibmvnic_set_channels(struct net_device *netdev,
+				struct ethtool_channels *channels)
+{
+	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+	adapter->desired->rx_queues = channels->rx_count;
+	adapter->desired->tx_queues = channels->tx_count;
+
+	return wait_for_reset(adapter);
+}
+
 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
@@ -1960,7 +2066,9 @@  static void ibmvnic_get_ethtool_stats(struct net_device *dev,
 	.set_msglevel		= ibmvnic_set_msglevel,
 	.get_link		= ibmvnic_get_link,
 	.get_ringparam		= ibmvnic_get_ringparam,
+	.set_ringparam		= ibmvnic_set_ringparam,
 	.get_channels		= ibmvnic_get_channels,
+	.set_channels		= ibmvnic_set_channels,
 	.get_strings            = ibmvnic_get_strings,
 	.get_sset_count         = ibmvnic_get_sset_count,
 	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
@@ -2426,6 +2534,7 @@  static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
 {
 	struct device *dev = &adapter->vdev->dev;
 	union ibmvnic_crq crq;
+	int max_entries;

 	if (!retry) {
 		/* Sub-CRQ entries are 32 byte long */
@@ -2437,21 +2546,60 @@  static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
 			return;
 		}

-		/* Get the minimum between the queried max and the entries
-		 * that fit in our PAGE_SIZE
-		 */
-		adapter->req_tx_entries_per_subcrq =
-		    adapter->max_tx_entries_per_subcrq > entries_page ?
-		    entries_page : adapter->max_tx_entries_per_subcrq;
-		adapter->req_rx_add_entries_per_subcrq =
-		    adapter->max_rx_add_entries_per_subcrq > entries_page ?
-		    entries_page : adapter->max_rx_add_entries_per_subcrq;
-
-		adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
-		adapter->req_rx_queues = adapter->opt_rx_comp_queues;
-		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
+		if (adapter->desired->mtu)
+			adapter->req_mtu = adapter->desired->mtu;
+		else
+			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
+
+		if (!adapter->desired->tx_entries)
+			adapter->desired->tx_entries =
+					adapter->max_tx_entries_per_subcrq;
+		if (!adapter->desired->rx_entries)
+			adapter->desired->rx_entries =
+					adapter->max_rx_add_entries_per_subcrq;
+
+		max_entries = IBMVNIC_MAX_LTB_SIZE /
+			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
+
+		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
+			adapter->desired->tx_entries > IBMVNIC_MAX_LTB_SIZE) {
+			adapter->desired->tx_entries = max_entries;
+		}
+
+		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
+			adapter->desired->rx_entries > IBMVNIC_MAX_LTB_SIZE) {
+			adapter->desired->rx_entries = max_entries;
+		}

-		adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
+		if (adapter->desired->tx_entries)
+			adapter->req_tx_entries_per_subcrq =
+					adapter->desired->tx_entries;
+		else
+			adapter->req_tx_entries_per_subcrq =
+					adapter->max_tx_entries_per_subcrq;
+
+		if (adapter->desired->rx_entries)
+			adapter->req_rx_add_entries_per_subcrq =
+					adapter->desired->rx_entries;
+		else
+			adapter->req_rx_add_entries_per_subcrq =
+					adapter->max_rx_add_entries_per_subcrq;
+
+		if (adapter->desired->tx_queues)
+			adapter->req_tx_queues =
+					adapter->desired->tx_queues;
+		else
+			adapter->req_tx_queues =
+					adapter->opt_tx_comp_sub_queues;
+
+		if (adapter->desired->rx_queues)
+			adapter->req_rx_queues =
+					adapter->desired->rx_queues;
+		else
+			adapter->req_rx_queues =
+					adapter->opt_rx_comp_queues;
+
+		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
 	}

 	memset(&crq, 0, sizeof(crq));
@@ -3272,6 +3420,7 @@  static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
 			    struct ibmvnic_adapter *adapter)
 {
 	struct device *dev = &adapter->vdev->dev;
+	struct net_device *netdev = adapter->netdev;
 	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
 	struct ibmvnic_login_buffer *login = adapter->login_buf;
 	int i;
@@ -3291,6 +3440,8 @@  static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
 		return 0;
 	}

+	netdev->mtu = adapter->req_mtu - ETH_HLEN;
+
 	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
 	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
 		netdev_dbg(adapter->netdev, "%016lx\n",
@@ -3846,7 +3997,7 @@  static int ibmvnic_init(struct ibmvnic_adapter *adapter)
 	unsigned long timeout = msecs_to_jiffies(30000);
 	int rc;

-	if (adapter->resetting) {
+	if (adapter->resetting && !adapter->wait_for_reset) {
 		rc = ibmvnic_reset_crq(adapter);
 		if (!rc)
 			rc = vio_enable_interrupts(adapter->vdev);
@@ -3880,7 +4031,7 @@  static int ibmvnic_init(struct ibmvnic_adapter *adapter)
 		return -1;
 	}

-	if (adapter->resetting)
+	if (adapter->resetting && !adapter->wait_for_reset)
 		rc = reset_sub_crq_queues(adapter);
 	else
 		rc = init_sub_crqs(adapter);
@@ -3931,6 +4082,11 @@  static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 	adapter->vdev = dev;
 	adapter->netdev = netdev;

+	adapter->desired = kzalloc(sizeof(*adapter->desired), GFP_KERNEL);
+	adapter->desired->mac = kzalloc(sizeof(*adapter->desired->mac),
+					GFP_KERNEL);
+	adapter->fallback = kzalloc(sizeof(*adapter->fallback), GFP_KERNEL);
+
 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
 	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
 	netdev->irq = dev->irq;
@@ -3956,6 +4112,8 @@  static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 	} while (rc == EAGAIN);

 	netdev->mtu = adapter->req_mtu - ETH_HLEN;
+	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
+	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;

 	rc = device_create_file(&dev->dev, &dev_attr_failover);
 	if (rc)
@@ -3970,6 +4128,9 @@  static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 	dev_info(&dev->dev, "ibmvnic registered\n");

 	adapter->state = VNIC_PROBED;
+
+	adapter->wait_for_reset = false;
+
 	return 0;

 ibmvnic_register_fail:
@@ -3993,6 +4154,11 @@  static int ibmvnic_remove(struct vio_dev *dev)
 	mutex_lock(&adapter->reset_lock);

 	release_resources(adapter);
+
+	kfree(adapter->desired->mac);
+	kfree(adapter->desired);
+	kfree(adapter->fallback);
+
 	release_sub_crqs(adapter);
 	release_crq_queue(adapter);

diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 7aa347a..8de998a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -42,6 +42,9 @@ 
 #define IBMVNIC_TSO_BUF_SZ	65536
 #define IBMVNIC_TSO_BUFS	64

+#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
+#define IBMVNIC_BUFFER_HLEN 500
+
 struct ibmvnic_login_buffer {
 	__be32 len;
 	__be32 version;
@@ -945,13 +948,23 @@  enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
 			   VNIC_RESET_MOBILITY,
 			   VNIC_RESET_FATAL,
 			   VNIC_RESET_NON_FATAL,
-			   VNIC_RESET_TIMEOUT};
+			   VNIC_RESET_TIMEOUT,
+			   VNIC_RESET_CHANGE_PARAM};

 struct ibmvnic_rwi {
 	enum ibmvnic_reset_reason reset_reason;
 	struct list_head list;
 };

+struct ibmvnic_tunables {
+	u64 rx_queues;
+	u64 tx_queues;
+	u64 rx_entries;
+	u64 tx_entries;
+	u64 mtu;
+	struct sockaddr *mac;
+};
+
 struct ibmvnic_adapter {
 	struct vio_dev *vdev;
 	struct net_device *netdev;
@@ -1012,6 +1025,10 @@  struct ibmvnic_adapter {
 	struct completion fw_done;
 	int fw_done_rc;

+	struct completion reset_done;
+	int reset_done_rc;
+	bool wait_for_reset;
+
 	/* partner capabilities */
 	u64 min_tx_queues;
 	u64 min_rx_queues;
@@ -1056,4 +1073,7 @@  struct ibmvnic_adapter {
 	struct work_struct ibmvnic_reset;
 	bool resetting;
 	bool napi_enabled, from_passive_init;
+
+	struct ibmvnic_tunables *desired;
+	struct ibmvnic_tunables *fallback;
 };