From patchwork Tue May 22 07:35:02 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?b?QmrDtnJuIFTDtnBlbA==?= X-Patchwork-Id: 918055 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=gmail.com Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 40qnVn0hKxz9s4w for ; Tue, 22 May 2018 17:35:29 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751721AbeEVHf0 (ORCPT ); Tue, 22 May 2018 03:35:26 -0400 Received: from mga05.intel.com ([192.55.52.43]:21728 "EHLO mga05.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751403AbeEVHfU (ORCPT ); Tue, 22 May 2018 03:35:20 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 22 May 2018 00:35:20 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.49,429,1520924400"; d="scan'208";a="57378029" Received: from kkavanag-mobl1.ger.corp.intel.com (HELO btopel-mobl1.ger.corp.intel.com) ([10.252.32.78]) by fmsmga001.fm.intel.com with ESMTP; 22 May 2018 00:35:19 -0700 From: =?utf-8?b?QmrDtnJuIFTDtnBlbA==?= To: magnus.karlsson@intel.com, magnus.karlsson@gmail.com, ast@fb.com, daniel@iogearbox.net, netdev@vger.kernel.org Cc: =?utf-8?b?QmrDtnJuIFTDtnBlbA==?= Subject: [PATCH bpf-next 7/8] xsk: simplified umem setup Date: Tue, 22 May 2018 09:35:02 +0200 Message-Id: <20180522073503.2199-8-bjorn.topel@gmail.com> X-Mailer: git-send-email 2.14.1 In-Reply-To: <20180522073503.2199-1-bjorn.topel@gmail.com> References: <20180522073503.2199-1-bjorn.topel@gmail.com> MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Björn Töpel As suggested by Daniel Borkmann, the umem setup code was a too defensive and complex. Here, we reduce the number of checks. Also, the memory pinning is now folded into the umem creation, and we do correct locking. Signed-off-by: Björn Töpel --- net/xdp/xdp_umem.c | 79 ++++++++++++++++++++++++++---------------------------- net/xdp/xdp_umem.h | 3 +-- net/xdp/xsk.c | 24 ++++++++--------- 3 files changed, 51 insertions(+), 55 deletions(-) diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index c47909c74899..faa6ffbaf6ab 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -16,39 +16,25 @@ #define XDP_UMEM_MIN_FRAME_SIZE 2048 -int xdp_umem_create(struct xdp_umem **umem) -{ - *umem = kzalloc(sizeof(**umem), GFP_KERNEL); - - if (!*umem) - return -ENOMEM; - - return 0; -} - static void xdp_umem_unpin_pages(struct xdp_umem *umem) { unsigned int i; - if (umem->pgs) { - for (i = 0; i < umem->npgs; i++) { - struct page *page = umem->pgs[i]; - - set_page_dirty_lock(page); - put_page(page); - } + for (i = 0; i < umem->npgs; i++) { + struct page *page = umem->pgs[i]; - kfree(umem->pgs); - umem->pgs = NULL; + set_page_dirty_lock(page); + put_page(page); } + + kfree(umem->pgs); + umem->pgs = NULL; } static void xdp_umem_unaccount_pages(struct xdp_umem *umem) { - if (umem->user) { - atomic_long_sub(umem->npgs, &umem->user->locked_vm); - free_uid(umem->user); - } + atomic_long_sub(umem->npgs, &umem->user->locked_vm); + free_uid(umem->user); } static void xdp_umem_release(struct xdp_umem *umem) @@ -66,22 +52,18 @@ static void xdp_umem_release(struct xdp_umem *umem) umem->cq = NULL; } - if (umem->pgs) { - xdp_umem_unpin_pages(umem); - - task = get_pid_task(umem->pid, PIDTYPE_PID); - put_pid(umem->pid); - if (!task) - goto out; - mm = get_task_mm(task); - put_task_struct(task); - if (!mm) - goto out; + xdp_umem_unpin_pages(umem); - mmput(mm); - umem->pgs = NULL; - } + task = get_pid_task(umem->pid, PIDTYPE_PID); + put_pid(umem->pid); + if (!task) + goto out; + mm = get_task_mm(task); + put_task_struct(task); + if (!mm) + goto out; + mmput(mm); xdp_umem_unaccount_pages(umem); out: kfree(umem); @@ -167,16 +149,13 @@ static int xdp_umem_account_pages(struct xdp_umem *umem) return 0; } -int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) +static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) { u32 frame_size = mr->frame_size, frame_headroom = mr->frame_headroom; u64 addr = mr->addr, size = mr->len; unsigned int nframes, nfpp; int size_chk, err; - if (!umem) - return -EINVAL; - if (frame_size < XDP_UMEM_MIN_FRAME_SIZE || frame_size > PAGE_SIZE) { /* Strictly speaking we could support this, if: * - huge pages, or* @@ -245,6 +224,24 @@ int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) return err; } +struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr) +{ + struct xdp_umem *umem; + int err; + + umem = kzalloc(sizeof(*umem), GFP_KERNEL); + if (!umem) + return ERR_PTR(-ENOMEM); + + err = xdp_umem_reg(umem, mr); + if (err) { + kfree(umem); + return ERR_PTR(err); + } + + return umem; +} + bool xdp_umem_validate_queues(struct xdp_umem *umem) { return umem->fq && umem->cq; diff --git a/net/xdp/xdp_umem.h b/net/xdp/xdp_umem.h index 70fe225baa51..9802287ff19d 100644 --- a/net/xdp/xdp_umem.h +++ b/net/xdp/xdp_umem.h @@ -50,9 +50,8 @@ static inline char *xdp_umem_get_data_with_headroom(struct xdp_umem *umem, } bool xdp_umem_validate_queues(struct xdp_umem *umem); -int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr); void xdp_get_umem(struct xdp_umem *umem); void xdp_put_umem(struct xdp_umem *umem); -int xdp_umem_create(struct xdp_umem **umem); +struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr); #endif /* XDP_UMEM_H_ */ diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 01f010ec0c05..cce0e4f8a536 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -406,25 +406,23 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, struct xdp_umem_reg mr; struct xdp_umem *umem; - if (xs->umem) - return -EBUSY; - if (copy_from_user(&mr, optval, sizeof(mr))) return -EFAULT; mutex_lock(&xs->mutex); - err = xdp_umem_create(&umem); + if (xs->umem) { + mutex_unlock(&xs->mutex); + return -EBUSY; + } - err = xdp_umem_reg(umem, &mr); - if (err) { - kfree(umem); + umem = xdp_umem_create(&mr); + if (IS_ERR(umem)) { mutex_unlock(&xs->mutex); - return err; + return PTR_ERR(umem); } /* Make sure umem is ready before it can be seen by others */ smp_wmb(); - xs->umem = umem; mutex_unlock(&xs->mutex); return 0; @@ -435,13 +433,15 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, struct xsk_queue **q; int entries; - if (!xs->umem) - return -EINVAL; - if (copy_from_user(&entries, optval, sizeof(entries))) return -EFAULT; mutex_lock(&xs->mutex); + if (!xs->umem) { + mutex_unlock(&xs->mutex); + return -EINVAL; + } + q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq : &xs->umem->cq; err = xsk_init_queue(entries, q, true);