From patchwork Thu Apr 7 18:39:46 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jakub Kicinski X-Patchwork-Id: 607627 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3qgry23FkNz9sC4 for ; Fri, 8 Apr 2016 04:40:42 +1000 (AEST) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=netronome-com.20150623.gappssmtp.com header.i=@netronome-com.20150623.gappssmtp.com header.b=eRbHBpcC; dkim-atps=neutral Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932166AbcDGSki (ORCPT ); Thu, 7 Apr 2016 14:40:38 -0400 Received: from mail-qg0-f41.google.com ([209.85.192.41]:35166 "EHLO mail-qg0-f41.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1757388AbcDGSke (ORCPT ); Thu, 7 Apr 2016 14:40:34 -0400 Received: by mail-qg0-f41.google.com with SMTP id f105so47335400qge.2 for ; Thu, 07 Apr 2016 11:40:34 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=netronome-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=dUEbRV4hqOKDYakr7UuwxaohQnZ5W2tPvE14mWSbxHk=; b=eRbHBpcC4CIFb2bys+WPacGaLGTe/qx0vlQCG+X+7VbClgChxlDWuWUyVH5nLSqwiY aEGePIwpgl4BwJQJoErEPpOucKiAf110hb6t/++Wr4OMhOwFWeogEQBo2IZrz5y05Qyn oRnNd+QlcWBI9WWaENva9+RNO5UcFIkfby/fMOYDWlhq+31SauVn+H9/5UHdRXNs2/gZ vs4fVJUeQMzNg0qVUgYgpsfBhSQ5DLelHUy6CNRMi8cC8bzjJQCQY4SLSevgudJs+nnm ANSN/6jdOg+5N4jHVNMAVIdChAV3bVMd9fYuI7r/oJOsr25kuySH+ljfpCHV5JlSkBJZ TDcA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=dUEbRV4hqOKDYakr7UuwxaohQnZ5W2tPvE14mWSbxHk=; b=iEdb8psu5M38xjfdKBlnmXLX9mQV61j3N/C3d7YHaHOL5/RS3lDEUEmIk49OxOkQyR siWJsz1Z7xqLX4cGzZkb+I5GGLOF4pvJrEOOAtHosxE2VYDM+P6/Onp8hKsE93wtt2Vv 6JViCWG/VqAfkNpZEPnMIITNy1sKMRcK+G8WxusTG1fS7zp73C6r0Zr+VDg1Oy/4Urha PyJ3hhVH5adpGkbx0het8BTiShCOFlKt3ozxkhqYw4zWyIlQSUebSg/Pl1e2cN0Ehvp+ SO9/8rV9tnL0Z2v5Db0IGI5HKnjJs2Du4F8fjfZHkfd80Rfme2GTxXiXdj0yH1HgZ2PN OfoQ== X-Gm-Message-State: AD7BkJIV3OLHVJroDE6d4lV8atYpOpY080PwgJYQgl9Susu6kvyAvd/BkOjcULp2ZJbDZ4/8 X-Received: by 10.140.219.134 with SMTP id p128mr6232060qhb.80.1460054433765; Thu, 07 Apr 2016 11:40:33 -0700 (PDT) Received: from jkicinski-Precision-T1700.netronome.com ([213.205.251.176]) by smtp.gmail.com with ESMTPSA id 131sm2530154qhk.15.2016.04.07.11.40.32 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Thu, 07 Apr 2016 11:40:33 -0700 (PDT) From: Jakub Kicinski To: netdev@vger.kernel.org Cc: Jakub Kicinski Subject: [PATCH v5 net-next 13/15] nfp: convert .ndo_change_mtu() to prepare/commit paradigm Date: Thu, 7 Apr 2016 19:39:46 +0100 Message-Id: <1460054388-471-14-git-send-email-jakub.kicinski@netronome.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1460054388-471-1-git-send-email-jakub.kicinski@netronome.com> References: <1460054388-471-1-git-send-email-jakub.kicinski@netronome.com> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org When changing MTU on running device first allocate new rings and buffers and once it succeeds proceed with changing MTU. Allocation of new rings is not really necessary for this operation - it's done to keep the code simple and because size of the extra ring memory is quite small compared to the size of buffers. Operation can still fail midway through if FW communication times out. In that case we retry with old MTU (rings). Signed-off-by: Jakub Kicinski --- .../net/ethernet/netronome/nfp/nfp_net_common.c | 108 +++++++++++++++++++-- 1 file changed, 102 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 03c60f755de0..e7c420fdcb0d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1506,6 +1506,64 @@ err_alloc: return -ENOMEM; } +static struct nfp_net_rx_ring * +nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz) +{ + struct nfp_net_rx_ring *rings; + unsigned int r; + + rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL); + if (!rings) + return NULL; + + for (r = 0; r < nn->num_rx_rings; r++) { + nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r); + + if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz)) + goto err_free_prev; + + if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r])) + goto err_free_ring; + } + + return rings; + +err_free_prev: + while (r--) { + nfp_net_rx_ring_bufs_free(nn, &rings[r]); +err_free_ring: + nfp_net_rx_ring_free(&rings[r]); + } + kfree(rings); + return NULL; +} + +static struct nfp_net_rx_ring * +nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings) +{ + struct nfp_net_rx_ring *old = nn->rx_rings; + unsigned int r; + + for (r = 0; r < nn->num_rx_rings; r++) + old[r].r_vec->rx_ring = &rings[r]; + + nn->rx_rings = rings; + return old; +} + +static void +nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings) +{ + unsigned int r; + + for (r = 0; r < nn->num_r_vecs; r++) { + nfp_net_rx_ring_bufs_free(nn, &rings[r]); + nfp_net_rx_ring_free(&rings[r]); + } + + kfree(rings); +} + static int nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, int idx) @@ -1984,23 +2042,61 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) { + unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz; struct nfp_net *nn = netdev_priv(netdev); + struct nfp_net_rx_ring *tmp_rings; + int err; if (new_mtu < 68 || new_mtu > nn->max_mtu) { nn_err(nn, "New MTU (%d) is not valid\n", new_mtu); return -EINVAL; } + old_mtu = netdev->mtu; + old_fl_bufsz = nn->fl_bufsz; + new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu; + + if (!netif_running(netdev)) { + netdev->mtu = new_mtu; + nn->fl_bufsz = new_fl_bufsz; + return 0; + } + + /* Prepare new rings */ + tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz); + if (!tmp_rings) + return -ENOMEM; + + /* Stop device, swap in new rings, try to start the firmware */ + nfp_net_close_stack(nn); + nfp_net_clear_config_and_disable(nn); + + tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings); + netdev->mtu = new_mtu; - nn->fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu; + nn->fl_bufsz = new_fl_bufsz; + + err = nfp_net_set_config_and_enable(nn); + if (err) { + const int err_new = err; + + /* Try with old configuration and old rings */ + tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings); + + netdev->mtu = old_mtu; + nn->fl_bufsz = old_fl_bufsz; - /* restart if running */ - if (netif_running(netdev)) { - nfp_net_netdev_close(netdev); - nfp_net_netdev_open(netdev); + err = __nfp_net_set_config_and_enable(nn); + if (err) + nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n", + err_new, err); } - return 0; + nfp_net_shadow_rx_rings_free(nn, tmp_rings); + + nfp_net_open_stack(nn); + + return err; } static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,