From patchwork Sat Jul 2 16:38:32 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Shmulik Ravid X-Patchwork-Id: 102993 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id BBA65B6EDF for ; Sun, 3 Jul 2011 00:49:53 +1000 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752707Ab1GBOtn (ORCPT ); Sat, 2 Jul 2011 10:49:43 -0400 Received: from mms1.broadcom.com ([216.31.210.17]:4863 "EHLO mms1.broadcom.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752331Ab1GBOtm (ORCPT ); Sat, 2 Jul 2011 10:49:42 -0400 Received: from [10.9.200.131] by mms1.broadcom.com with ESMTP (Broadcom SMTP Relay (Email Firewall v6.3.2)); Sat, 02 Jul 2011 07:54:27 -0700 X-Server-Uuid: 02CED230-5797-4B57-9875-D5D2FEE4708A Received: from mail-irva-13.broadcom.com (10.11.16.103) by IRVEXCHHUB01.corp.ad.broadcom.com (10.9.200.131) with Microsoft SMTP Server id 8.2.247.2; Sat, 2 Jul 2011 07:49:30 -0700 Received: from [10.185.7.69] (lb-tlvb-shmulik.il.broadcom.com [10.185.7.69]) by mail-irva-13.broadcom.com (Postfix) with ESMTP id 09C5074D03; Sat, 2 Jul 2011 07:49:23 -0700 (PDT) Subject: [net-next PATCH 2/2] dcbnl: Add CEE notification From: "Shmulik Ravid" To: davem@davemloft.net cc: "John Fastabend" , netdev@vger.kernel.org Organization: Broadcom Date: Sat, 2 Jul 2011 19:38:32 +0300 Message-ID: <1309624712.15528.48.camel@lb-tlvb-shmulik.il.broadcom.com> MIME-Version: 1.0 X-Mailer: Evolution 2.12.3 (2.12.3-8.el5_2.3) X-WSS-ID: 6211EEA93B413078574-01-01 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch add an unsolicited notification of the DCBX negotiated parameters for the CEE flavor of the DCBX protocol. The notification message is identical to the aggregated CEE get operation and holds all the pertinent local and peer information. The notification routine is exported so it can be invoked by drivers supporting an embedded DCBX stack. Signed-off-by: Shmulik Ravid --- include/net/dcbnl.h | 5 +- net/dcb/dcbnl.c | 409 ++++++++++++++++++++++++++++----------------------- 2 files changed, 226 insertions(+), 188 deletions(-) diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h index d5bbb79..f5aa399 100644 --- a/include/net/dcbnl.h +++ b/include/net/dcbnl.h @@ -34,7 +34,10 @@ int dcb_ieee_setapp(struct net_device *, struct dcb_app *); int dcb_ieee_delapp(struct net_device *, struct dcb_app *); u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *); -int dcbnl_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 pid); +int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, + u32 seq, u32 pid); +int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, + u32 seq, u32 pid); /* * Ops struct for the netlink callbacks. Used by DCB-enabled drivers through diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 5b75ed7..1937f92 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1310,8 +1310,193 @@ nla_put_failure: return err; } -int dcbnl_notify(struct net_device *dev, int event, int cmd, - u32 seq, u32 pid) +static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, + int dir) +{ + u8 pgid, up_map, prio, tc_pct; + const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; + int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG; + struct nlattr *pg = nla_nest_start(skb, i); + if (!pg) + goto nla_put_failure; + + for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { + struct nlattr *tc_nest = nla_nest_start(skb, i); + if (!tc_nest) + goto nla_put_failure; + + pgid = DCB_ATTR_VALUE_UNDEFINED; + prio = DCB_ATTR_VALUE_UNDEFINED; + tc_pct = DCB_ATTR_VALUE_UNDEFINED; + up_map = DCB_ATTR_VALUE_UNDEFINED; + + if (!dir) + ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0, + &prio, &pgid, &tc_pct, &up_map); + else + ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, + &prio, &pgid, &tc_pct, &up_map); + + NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid); + NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); + NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); + NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); + nla_nest_end(skb, tc_nest); + } + + for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { + tc_pct = DCB_ATTR_VALUE_UNDEFINED; + + if (!dir) + ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0, + &tc_pct); + else + ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, + &tc_pct); + NLA_PUT_U8(skb, i, tc_pct); + } + nla_nest_end(skb, pg); + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) +{ + struct nlattr *cee, *app; + struct dcb_app_type *itr; + const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; + int dcbx, i, err = -EMSGSIZE; + u8 value; + + NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); + + cee = nla_nest_start(skb, DCB_ATTR_CEE); + if (!cee) + goto nla_put_failure; + + /* local pg */ + if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) { + err = dcbnl_cee_pg_fill(skb, netdev, 1); + if (err) + goto nla_put_failure; + } + + if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) { + err = dcbnl_cee_pg_fill(skb, netdev, 0); + if (err) + goto nla_put_failure; + } + + /* local pfc */ + if (ops->getpfccfg) { + struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC); + if (!pfc_nest) + goto nla_put_failure; + + for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { + ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); + NLA_PUT_U8(skb, i, value); + } + nla_nest_end(skb, pfc_nest); + } + + /* local app */ + spin_lock(&dcb_lock); + app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); + if (!app) + goto nla_put_failure; + + list_for_each_entry(itr, &dcb_app_list, list) { + if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) { + struct nlattr *app_nest = nla_nest_start(skb, + DCB_ATTR_APP); + if (!app_nest) + goto dcb_unlock; + + err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, + itr->app.selector); + if (err) + goto dcb_unlock; + + err = nla_put_u16(skb, DCB_APP_ATTR_ID, + itr->app.protocol); + if (err) + goto dcb_unlock; + + err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, + itr->app.priority); + if (err) + goto dcb_unlock; + + nla_nest_end(skb, app_nest); + } + } + nla_nest_end(skb, app); + + if (netdev->dcbnl_ops->getdcbx) + dcbx = netdev->dcbnl_ops->getdcbx(netdev); + else + dcbx = -EOPNOTSUPP; + + spin_unlock(&dcb_lock); + + /* features flags */ + if (ops->getfeatcfg) { + struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT); + if (!feat) + goto nla_put_failure; + + for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; + i++) + if (!ops->getfeatcfg(netdev, i, &value)) + NLA_PUT_U8(skb, i, value); + + nla_nest_end(skb, feat); + } + + /* peer info if available */ + if (ops->cee_peer_getpg) { + struct cee_pg pg; + err = ops->cee_peer_getpg(netdev, &pg); + if (!err) + NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg); + } + + if (ops->cee_peer_getpfc) { + struct cee_pfc pfc; + err = ops->cee_peer_getpfc(netdev, &pfc); + if (!err) + NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc); + } + + if (ops->peer_getappinfo && ops->peer_getapptable) { + err = dcbnl_build_peer_app(netdev, skb, + DCB_ATTR_CEE_PEER_APP_TABLE, + DCB_ATTR_CEE_PEER_APP_INFO, + DCB_ATTR_CEE_PEER_APP); + if (err) + goto nla_put_failure; + } + nla_nest_end(skb, cee); + + /* DCBX state */ + if (dcbx >= 0) { + err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); + if (err) + goto nla_put_failure; + } + return 0; + +dcb_unlock: + spin_unlock(&dcb_lock); +nla_put_failure: + return err; +} + +static int dcbnl_notify(struct net_device *dev, int event, int cmd, + u32 seq, u32 pid, int dcbx_ver) { struct net *net = dev_net(dev); struct sk_buff *skb; @@ -1337,7 +1522,11 @@ int dcbnl_notify(struct net_device *dev, int event, int cmd, dcb->dcb_family = AF_UNSPEC; dcb->cmd = cmd; - err = dcbnl_ieee_fill(skb, dev); + if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE) + err = dcbnl_ieee_fill(skb, dev); + else + err = dcbnl_cee_fill(skb, dev); + if (err < 0) { /* Report error to broadcast listeners */ nlmsg_cancel(skb, nlh); @@ -1351,7 +1540,20 @@ int dcbnl_notify(struct net_device *dev, int event, int cmd, return err; } -EXPORT_SYMBOL(dcbnl_notify); + +int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, + u32 seq, u32 pid) +{ + return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE); +} +EXPORT_SYMBOL(dcbnl_ieee_notify); + +int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, + u32 seq, u32 pid) +{ + return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE); +} +EXPORT_SYMBOL(dcbnl_cee_notify); /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not * be completed the entire msg is aborted and error value is returned. @@ -1411,7 +1613,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb, err: dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE, pid, seq, flags); - dcbnl_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); + dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); return err; } @@ -1495,7 +1697,7 @@ static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb, err: dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE, pid, seq, flags); - dcbnl_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); + dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); return err; } @@ -1642,70 +1844,16 @@ err: return ret; } -static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, - int dir) -{ - u8 pgid, up_map, prio, tc_pct; - const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; - int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG; - struct nlattr *pg = nla_nest_start(skb, i); - if (!pg) - goto nla_put_failure; - - for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { - struct nlattr *tc_nest = nla_nest_start(skb, i); - if (!tc_nest) - goto nla_put_failure; - - pgid = DCB_ATTR_VALUE_UNDEFINED; - prio = DCB_ATTR_VALUE_UNDEFINED; - tc_pct = DCB_ATTR_VALUE_UNDEFINED; - up_map = DCB_ATTR_VALUE_UNDEFINED; - - if (!dir) - ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0, - &prio, &pgid, &tc_pct, &up_map); - else - ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, - &prio, &pgid, &tc_pct, &up_map); - - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid); - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); - NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); - nla_nest_end(skb, tc_nest); - } - - for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { - tc_pct = DCB_ATTR_VALUE_UNDEFINED; - - if (!dir) - ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0, - &tc_pct); - else - ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, - &tc_pct); - NLA_PUT_U8(skb, i, tc_pct); - } - nla_nest_end(skb, pg); - return 0; - -nla_put_failure: - return -EMSGSIZE; -} - /* Handle CEE DCBX GET commands. */ static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb, u32 pid, u32 seq, u16 flags) { + struct net *net = dev_net(netdev); struct sk_buff *skb; struct nlmsghdr *nlh; struct dcbmsg *dcb; - struct nlattr *cee, *app; - struct dcb_app_type *itr; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; - int dcbx, i, err = -EMSGSIZE; - u8 value; + int err; if (!ops) return -EOPNOTSUPP; @@ -1714,138 +1862,25 @@ static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb, if (!skb) return -ENOBUFS; - nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); + nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); + if (nlh == NULL) { + nlmsg_free(skb); + return -EMSGSIZE; + } dcb = NLMSG_DATA(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = DCB_CMD_CEE_GET; - NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); - - cee = nla_nest_start(skb, DCB_ATTR_CEE); - if (!cee) - goto nla_put_failure; - - /* local pg */ - if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) { - err = dcbnl_cee_pg_fill(skb, netdev, 1); - if (err) - goto nla_put_failure; - } - - if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) { - err = dcbnl_cee_pg_fill(skb, netdev, 0); - if (err) - goto nla_put_failure; - } - - /* local pfc */ - if (ops->getpfccfg) { - struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC); - if (!pfc_nest) - goto nla_put_failure; - - for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { - ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); - NLA_PUT_U8(skb, i, value); - } - nla_nest_end(skb, pfc_nest); - } - - /* local app */ - spin_lock(&dcb_lock); - app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); - if (!app) - goto nla_put_failure; - - list_for_each_entry(itr, &dcb_app_list, list) { - if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) { - struct nlattr *app_nest = nla_nest_start(skb, - DCB_ATTR_APP); - if (!app_nest) - goto dcb_unlock; - - err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, - itr->app.selector); - if (err) - goto dcb_unlock; - - err = nla_put_u16(skb, DCB_APP_ATTR_ID, - itr->app.protocol); - if (err) - goto dcb_unlock; - - err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, - itr->app.priority); - if (err) - goto dcb_unlock; - - nla_nest_end(skb, app_nest); - } - } - nla_nest_end(skb, app); - - if (netdev->dcbnl_ops->getdcbx) - dcbx = netdev->dcbnl_ops->getdcbx(netdev); - else - dcbx = -EOPNOTSUPP; - - spin_unlock(&dcb_lock); - - /* features flags */ - if (ops->getfeatcfg) { - struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT); - if (!feat) - goto nla_put_failure; + err = dcbnl_cee_fill(skb, netdev); - for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; - i++) - if (!ops->getfeatcfg(netdev, i, &value)) - NLA_PUT_U8(skb, i, value); - - nla_nest_end(skb, feat); - } - - /* peer info if available */ - if (ops->cee_peer_getpg) { - struct cee_pg pg; - err = ops->cee_peer_getpg(netdev, &pg); - if (!err) - NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg); - } - - if (ops->cee_peer_getpfc) { - struct cee_pfc pfc; - err = ops->cee_peer_getpfc(netdev, &pfc); - if (!err) - NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc); - } - - if (ops->peer_getappinfo && ops->peer_getapptable) { - err = dcbnl_build_peer_app(netdev, skb, - DCB_ATTR_CEE_PEER_APP_TABLE, - DCB_ATTR_CEE_PEER_APP_INFO, - DCB_ATTR_CEE_PEER_APP); - if (err) - goto nla_put_failure; - } - nla_nest_end(skb, cee); - - /* DCBX state */ - if (dcbx >= 0) { - err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); - if (err) - goto nla_put_failure; + if (err < 0) { + nlmsg_cancel(skb, nlh); + nlmsg_free(skb); + } else { + nlmsg_end(skb, nlh); + err = rtnl_unicast(skb, net, pid); } - nlmsg_end(skb, nlh); - return rtnl_unicast(skb, &init_net, pid); - -dcb_unlock: - spin_unlock(&dcb_lock); -nla_put_failure: - nlmsg_cancel(skb, nlh); -nlmsg_failure: - nlmsg_free(skb); return err; }