Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/1.2/patches/833147/?format=api
{ "id": 833147, "url": "http://patchwork.ozlabs.org/api/1.2/patches/833147/?format=api", "web_url": "http://patchwork.ozlabs.org/project/netdev/patch/20171102003606.19913-7-david.daney@cavium.com/", "project": { "id": 7, "url": "http://patchwork.ozlabs.org/api/1.2/projects/7/?format=api", "name": "Linux network development", "link_name": "netdev", "list_id": "netdev.vger.kernel.org", "list_email": "netdev@vger.kernel.org", "web_url": null, "scm_url": null, "webscm_url": null, "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20171102003606.19913-7-david.daney@cavium.com>", "list_archive_url": null, "date": "2017-11-02T00:36:05", "name": "[6/7] netdev: octeon-ethernet: Add Cavium Octeon III support.", "commit_ref": null, "pull_url": null, "state": "changes-requested", "archived": true, "hash": "d4e6fefa5c8c98000272bf450947902c096980fb", "submitter": { "id": 8400, "url": "http://patchwork.ozlabs.org/api/1.2/people/8400/?format=api", "name": "David Daney", "email": "david.daney@cavium.com" }, "delegate": { "id": 34, "url": "http://patchwork.ozlabs.org/api/1.2/users/34/?format=api", "username": "davem", "first_name": "David", "last_name": "Miller", "email": "davem@davemloft.net" }, "mbox": "http://patchwork.ozlabs.org/project/netdev/patch/20171102003606.19913-7-david.daney@cavium.com/mbox/", "series": [ { "id": 11414, "url": "http://patchwork.ozlabs.org/api/1.2/series/11414/?format=api", "web_url": "http://patchwork.ozlabs.org/project/netdev/list/?series=11414", "date": "2017-11-02T00:35:59", "name": "Cavium OCTEON-III network driver.", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/11414/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/833147/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/833147/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<netdev-owner@vger.kernel.org>", "X-Original-To": "patchwork-incoming@ozlabs.org", "Delivered-To": "patchwork-incoming@ozlabs.org", "Authentication-Results": [ "ozlabs.org;\n\tspf=none (mailfrom) smtp.mailfrom=vger.kernel.org\n\t(client-ip=209.132.180.67; helo=vger.kernel.org;\n\tenvelope-from=netdev-owner@vger.kernel.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org; dkim=pass (1024-bit key;\n\tunprotected) header.d=CAVIUMNETWORKS.onmicrosoft.com\n\theader.i=@CAVIUMNETWORKS.onmicrosoft.com header.b=\"Jfn7bE1o\"; \n\tdkim-atps=neutral", "spf=none (sender IP is )\n\tsmtp.mailfrom=David.Daney@cavium.com; " ], "Received": [ "from vger.kernel.org (vger.kernel.org [209.132.180.67])\n\tby ozlabs.org (Postfix) with ESMTP id 3yS5lJ2pJKz9t39\n\tfor <patchwork-incoming@ozlabs.org>;\n\tThu, 2 Nov 2017 11:37:32 +1100 (AEDT)", "(majordomo@vger.kernel.org) by vger.kernel.org via listexpand\n\tid S934124AbdKBAhS (ORCPT <rfc822;patchwork-incoming@ozlabs.org>);\n\tWed, 1 Nov 2017 20:37:18 -0400", "from mail-sn1nam02on0040.outbound.protection.outlook.com\n\t([104.47.36.40]:52592\n\t\"EHLO NAM02-SN1-obe.outbound.protection.outlook.com\"\n\trhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP\n\tid S934081AbdKBAg5 (ORCPT <rfc822;netdev@vger.kernel.org>);\n\tWed, 1 Nov 2017 20:36:57 -0400", "from ddl.caveonetworks.com (50.233.148.156) by\n\tCY4PR07MB3496.namprd07.prod.outlook.com (10.171.252.153) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384_P256) id\n\t15.20.178.6; Thu, 2 Nov 2017 00:36:44 +0000" ], "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=CAVIUMNETWORKS.onmicrosoft.com; s=selector1-cavium-com;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n\tbh=6/sbyd1JCD3rMvlmo9aGDPIkNEpkG6zBhooky2qhhSs=;\n\tb=Jfn7bE1ozov3udLlzz1+0Z8dP/z/OF6iVTOhdEanHPnnmavjF4NeVTSJkIbuItobg94lLkMPqMGP0eYDDatnVzPTIefKmIR8sHgwW6gk7tAHg4NrmhPgy8nd0iUSMmVBzT4NbcsqGqe6pIVQ+b2dTpRgObtiI420oYIsS8gupLc=", "From": "David Daney <david.daney@cavium.com>", "To": "linux-mips@linux-mips.org, ralf@linux-mips.org,\n\tJames Hogan <james.hogan@mips.com>, netdev@vger.kernel.org,\n\t\"David S. Miller\" <davem@davemloft.net>,\n\tRob Herring <robh+dt@kernel.org>, Mark Rutland <mark.rutland@arm.com>", "Cc": "linux-kernel@vger.kernel.org, \"Steven J. Hill\" <steven.hill@cavium.com>,\n\tdevicetree@vger.kernel.org, Carlos Munoz <cmunoz@cavium.com>,\n\t\"Steven J . Hill\" <Steven.Hill@cavium.com>,\n\tDavid Daney <david.daney@cavium.com>", "Subject": "[PATCH 6/7] netdev: octeon-ethernet: Add Cavium Octeon III support.", "Date": "Wed, 1 Nov 2017 17:36:05 -0700", "Message-Id": "<20171102003606.19913-7-david.daney@cavium.com>", "X-Mailer": "git-send-email 2.13.6", "In-Reply-To": "<20171102003606.19913-1-david.daney@cavium.com>", "References": "<20171102003606.19913-1-david.daney@cavium.com>", "MIME-Version": "1.0", "Content-Type": "text/plain", "X-Originating-IP": "[50.233.148.156]", "X-ClientProxiedBy": "CO2PR07CA0072.namprd07.prod.outlook.com (10.174.192.40) To\n\tCY4PR07MB3496.namprd07.prod.outlook.com (10.171.252.153)", "X-MS-PublicTrafficType": "Email", "X-MS-Office365-Filtering-Correlation-Id": "26c4a87f-f68d-4d8a-09f5-08d52189ce6a", "X-Microsoft-Antispam": "UriScan:; BCL:0; PCL:0;\n\tRULEID:(22001)(4534020)(4602075)(2017052603199);\n\tSRVR:CY4PR07MB3496; ", "X-Microsoft-Exchange-Diagnostics": [ "1; CY4PR07MB3496;\n\t3:Qz+mKURJZBcg+j4Sp9u1YLU2IzPG6Kr83mNNsztmlcgt/QrntYLI7fMbpFEtdOIFy9mf2y2u4wZTlr5AQCB2NTTqABz4DC48/m2EsNbi+ymR4CmqvLsep+I4M55DUvPO4no62iPG3U4XMfyin7SFt/R0dGDKhlegEEByEHcTIZBcv/1IzV+NCbFbfAkhXiWDxLK1E0p8PhslYRrcuiWGV3X+gfOpOt3FjdrVfVB9XcKITYy1teqmm+mfSPm82+L5;\n\t25:MxZvCBrCDNCJ3PxwT9//DR38Zf2Ral5V5k6+9vtOc9m2ZsTBbuinYoxSE5NxVfKuTzwhqOxzOwdkTOlExzNxGNqN2Yb/+YU4xyNOi0jX3LaaUcxh6iTQVfAk+Ywa9aElChgL44a2l7/QZ1pfMLpGASNbBE9OGyE25jbPQGRNSsgOi9A5loAXQZkYgusu14UasNsIMv4WLyM7FgVWqQojnwQtFQ5x3VuyNWfwzDKqZwqrceB8dPSYwKAV9yF3eyWK6tPwyvXxAxbSN3lrwTKy0+ENUIC8o4tAODnZ4Ps6QMvxVTBgc6cw/JozlQQ2x/j+6/KhCdN9i0sH1aWDjVprIw==;\n\t31:IJLLWynQ0bRDayMh+sIdOD85FbyjReNslXAWpI3Tcp26F0q4vzsTusuMNPhrCK8jCb+bXmWjKSjt9Q8wfYcsEpzhPacAkY05Q77Hb8U2vKOi3UX3wvJ7Gh9k27WcG9s1SLJ/AGgrwmLbjQx2xTub+iyvPU6l7r/TvqlrwRKzarOhxDpUD5ATPcyrCa7CGgQIJi4uGPtOF2rUl1lBl1VOHHb0F8yCqwMR4hwv1KzoVH4=", "1; CY4PR07MB3496;\n\t20:ihoQqQQ8Ka/BJ/YZJUzMbtkIRM263lA1JJxp0nzSrQY8ZtJJSiYR+vf3K0EtqfwYdkdHKkZ5MnLwwHpGhijWrMQGwrK1WXnkF7hXchttihY20peutOsMy3HfcGoPMr/XmocmSU8ifa+wljwKnyuAI25EXjXliKHsI/ZaXzwQ+MbZyHUJONKUQ5bdrmztYljiJ2lbylMefnPEgbhNKB5kPvT/7VYJFubZ9nG5C2kbwChw2v+X8H+kHshP1VIB5scMr2kQGlVau8ILzzeMAGJ9TpsCwEJOPJOokdxayyqNaP5fbAH4Ar9faHUXXQGN4P+S8/AmkhmzQY/x/0RlWaUb+lBQdjm6WGP0U9Es8WsmDYDrNkEpKaUbuaUnWFJ96Vj2rgRDuLO3gCGhx8x3ZZ4t9VffqQ5EAAe5Tb2FxxeF9kWUQxM4MOxdbWegqgWPx5f1wFEZ2+TIZfiUsm+xcusPvrPyttGnude6uCULHKDVhYnj4Mt8cAR25lLZKx9y8P3L;\n\t4:hXHzI8JtKKmf1EMTQqgZYfVryh3aqlDZaPMS2ZRHuvWcI5oZ3nxFUdqSPQFLup7GSE4ZEFuN5YkH1oiK63IwZaCgY/vKNcueJIvcIWJlc1mC/Z4VXQ3lbNaR+iI5csIeX4kmTK1ua/FKILHNKkDq8SJbtugXZLwAOKPQy1NLRT8YQvXDVi+KRuIN3VIoT/gpZD52mvMIeKyqVQykeDyFXnDh1oJ4yP5P60FF7p8C1eJuYh670IGt4N0ngEzLrbleDJW16FMp6zuRqBhzeq4iYet23tcksuwHF3+iR0CU92psJJKOVQ5wHHhj3FgpEF4joBAYjlmCKc145Gfe5Q2rXiv/iC35TAv33pwCkdOu8fjNJ14rzDtCqE/ijOH1IUA5", "1; CY4PR07MB3496;\n\t23:Dz0TzmY8SDyiW6LSr2nLPp4nAQcVzslhnRm2UsMZt9PRXDv0VYxRpu9gXedlYDkP8dD3jea7wXqtLe0A1V1rJZ2Lai2uErN42KpyJLvRHHoSKueIdaLr8DB8HHYRveD2DpMJcMtl380VX4eVRdNJs+MU2cgABjmtF3PVJf7oZ0IsBIzW5saNlztSF0PziMTEuPpmpM0pp7zkdLFEwz+yzwcpSvAY0XnOROtmxs62JRLV04ml4Sc5T8u2cxpa8gHbNAMZf+cIm39gRlcn9pYGGlzRxRu12IOuzxKS2UzB5nG9NUZk64G0QT9g+G7oyD71IXV+cRFHeKRib38ZGz5K9b0CeAKPsnx98jTvuDP6kw+GiMTS/p/aibiSiONqNNuBfPZeexYriCbbQwCFA33bX7dVMvqBW6jdGuVJi83rokumtGdc4fXY71XqrV87/wdgJoq3sFL7tXy93dmwBfJikyWuqDErj1Vs2aANNk96U1qd2l6GcLoUdU4D6zKX96A3R5XCHJxEaRc8P8MZRmRbxU6emNqSky4pw18wU9SOaiWpyZrSZjxh4CRfLGjiU4wWJXdJolm4npQK2TaTLnelER0e+2F1HSgQzbAup7tv2w8jCIZTk79uXu+kYfcB/4660wCqMHXRINEbfhmcvSTktSUFfcGb5dyrWEgVK7asdK6kwndfCSN6Xs1q7D6FeLkb/auA8WS2k1cdiKzG66wl6e/RJ1Iz2gSbmYDRhIvMB6gF13CcH/vwB0DrQ2rgSD8EqN6dJaO7k8mRs5hGnpEdgCmKcoh5OGoyrlqBz0eVJNCnOmx5mkJPsQWeLErX64raD+qntBPuGm5UJ8ggmji/CJz+Xfmtkr6Q7LwGcWdRTPvUoNr/1psgaFx6XqP996bKjziC8kielQJI1dOdfAM9ToAFA91bFhAuQqPJjT12Qw2huHgkP4U0i2x0pbBks3InWaiMCdNtMkaBHSgTcoqht/WvUQNIGOJsZgRU1q+4mJUQFsf8aNyP4B2D0GFxu6DJV0Y1Ik6HtBi8AkkFCKkYbjsdH9YD6eeOSFhjOmOOY6Gke1ScBRlg0XlRFlBoA/BD86o1V4Eu7wK/XiNsmUqLTy9jXSuBKzWNVruDUNqN6QnvKES8kFwrwH3eQNhB4wONaPT+EWUCWCz9xbs0ASPaJrRxzWVRMfO43QlSbI3k8L1FhpHQdpQxI7V5PIhad7Wqs8L6CS+HjF+9BGNHa6EaHAmnThmOM4gdBM7zB6Lu+X/SPG6tCXuNSBPmtEP7SG4S7TrAXY8KfwA+ZG3iHFx4Nz8q7wgx+OZ1iYFcrZUkRsdM3jnYvKdnQ60F0HtZTmkP87r1IGzuR0tpIqSOX4fcI/JiasGUHz7vWI7z6aUfZ3U=", "1; CY4PR07MB3496;\n\t6:UKQbH2bfsvKJPJF9OCXKVBlrG5fHt77peEDev3Ur0XNSG5GfTSD0Q2CeRKuwBD8niBVLTQsiw8Ysnl+OwkKUwwzKkLdvvEBwHFGG2sXIB+fUKUNOLTVj+Yu3TKG5KrmEy/r4XITagGMERntzExyWihi8oau2xb3zgxYqE0uQHve1rEShxhzOWJcSFXuR8Nfg7ZZA+mQZUI5cIoaKNFAhVL1/6z0QxAwBAHNpzbGTNHVL/d94IGbpFnEnIk6vfXSHGR+50b9UKlu5NvZVpB+19QJ4zPcIrYzCuDJIhC8G+MUVoAUej0cucOD/ak4QW0CS3doNAG9SfJnx5qPZ4Te/NZf4vZvxBH1dCAWwoOiRP0A=;\n\t5:o9wO2QoQL8lT/3+Q5qeb7m/7MBC9NymwmQBpprFkfCSfJ0ZgPsNLaBvxpE/Lw4M2IeLO6cq/RXefp9kloy2u8g9iHikejmBjvu5+jtlWh4YKahRkOsH3L18BnKwTWS02XMcE2OTQnKvvZB2MiwHTy0Vn1Xz/Yaubsb+hyRqIoYM=;\n\t24:I+INrQv9uIBKQsP95r/bbJ5q3klEyjOWuisZpGFG7haNHrgNpj83GO90Ze9PfUBIwSAWRVwVBCZNFybY3q0mYxkmUVVERkENxnPX6dWJggA=;\n\t7:G0Gb3lcKAJa+iY2w7kKzKUjc6V5Ipe5Yiu85Zb6I0Q6OzI3/GxNsQ1zl0ywl1Nd0AitqbrcORmVhP8vw5Z0XwgROp9goJDoaEoCp+qqlRg52lzs3eVOH5WCTCiRtazLUmH2xt/lSdSjDxUD7ze+EmF2+kerCmUSApiQSfczN2nQF+wEdCrsAstu/wVn6vzL/P0Eryo32qjIz/KSwMFQ2dqemcuOvyO76sEjC+fcu1CA1gToY+MGVECNrBow0KA97" ], "X-MS-TrafficTypeDiagnostic": "CY4PR07MB3496:", "X-Exchange-Antispam-Report-Test": "UriScan:(131327999870524)(21532816269658)(17755550239193); ", "X-Microsoft-Antispam-PRVS": "<CY4PR07MB3496836A932675CF1746FBF1975C0@CY4PR07MB3496.namprd07.prod.outlook.com>", "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(100000700101)(100105000095)(100000701101)(100105300095)(100000702101)(100105100095)(6040450)(2401047)(5005006)(8121501046)(10201501046)(93006095)(93001095)(3002001)(100000703101)(100105400095)(3231020)(6041248)(20161123555025)(20161123560025)(20161123562025)(20161123564025)(20161123558100)(201703131423075)(201702281528075)(201703061421075)(201703061406153)(6072148)(201708071742011)(100000704101)(100105200095)(100000705101)(100105500095);\n\tSRVR:CY4PR07MB3496; BCL:0; PCL:0;\n\tRULEID:(100000800101)(100110000095)(100000801101)(100110300095)(100000802101)(100110100095)(100000803101)(100110400095)(100000804101)(100110200095)(100000805101)(100110500095);\n\tSRVR:CY4PR07MB3496; ", "X-Forefront-PRVS": "047999FF16", "X-Forefront-Antispam-Report": "SFV:NSPM;\n\tSFS:(10009020)(6009001)(346002)(376002)(51234002)(189002)(199003)(25786009)(72206003)(16526018)(50226002)(53416004)(7736002)(105586002)(305945005)(101416001)(48376002)(478600001)(50986999)(6506006)(76176999)(50466002)(33646002)(47776003)(5890100001)(5003940100001)(106356001)(107886003)(551984002)(68736007)(2906002)(4326008)(6666003)(6486002)(2950100002)(6512007)(316002)(110136005)(97736004)(16586007)(54906003)(53946003)(5660300001)(53936002)(16200700003)(189998001)(3846002)(36756003)(6116002)(8936002)(66066001)(1076002)(8676002)(575784001)(81166006)(81156014)(86362001)(69596002)(559001)(569006);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:CY4PR07MB3496;\n\tH:ddl.caveonetworks.com; FPR:; SPF:None; PTR:InfoNoRecords;\n\tMX:1; A:1; LANG:en; ", "Received-SPF": "None (protection.outlook.com: cavium.com does not designate\n\tpermitted sender hosts)", "SpamDiagnosticOutput": "1:99", "SpamDiagnosticMetadata": "NSPM", "X-OriginatorOrg": "cavium.com", "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "02 Nov 2017 00:36:44.0862\n\t(UTC)", "X-MS-Exchange-CrossTenant-Network-Message-Id": "26c4a87f-f68d-4d8a-09f5-08d52189ce6a", "X-MS-Exchange-CrossTenant-FromEntityHeader": "Hosted", "X-MS-Exchange-CrossTenant-Id": "711e4ccf-2e9b-4bcf-a551-4094005b6194", "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CY4PR07MB3496", "Sender": "netdev-owner@vger.kernel.org", "Precedence": "bulk", "List-ID": "<netdev.vger.kernel.org>", "X-Mailing-List": "netdev@vger.kernel.org" }, "content": "From: Carlos Munoz <cmunoz@cavium.com>\n\nThe Cavium OCTEON cn78xx and cn73xx SoCs have network packet I/O\nhardware that is significantly different from previous generations of\nthe family.\n\nAdd a new driver for this hardware. The Ethernet MAC is called BGX on\nthese devices. Common code for the MAC is in octeon3-bgx-port.c.\nFour of these BGX MACs are grouped together and managed as a group by\nocteon3-bgx-nexus.c. Ingress packet classification is done by the PKI\nunit initialized in octeon3-pki.c. Queue management is done in the\nSSO, initialized by octeon3-sso.c. Egress is handled by the PKO,\ninitialized in octeon3-pko.c.\n\nSigned-off-by: Carlos Munoz <cmunoz@cavium.com>\nSigned-off-by: Steven J. Hill <Steven.Hill@cavium.com>\nSigned-off-by: David Daney <david.daney@cavium.com>\n---\n drivers/net/ethernet/cavium/Kconfig | 28 +-\n drivers/net/ethernet/cavium/octeon/Makefile | 6 +\n .../net/ethernet/cavium/octeon/octeon3-bgx-nexus.c | 698 +++++++\n .../net/ethernet/cavium/octeon/octeon3-bgx-port.c | 2023 +++++++++++++++++++\n drivers/net/ethernet/cavium/octeon/octeon3-core.c | 2075 ++++++++++++++++++++\n drivers/net/ethernet/cavium/octeon/octeon3-pki.c | 833 ++++++++\n drivers/net/ethernet/cavium/octeon/octeon3-pko.c | 1719 ++++++++++++++++\n drivers/net/ethernet/cavium/octeon/octeon3-sso.c | 309 +++\n drivers/net/ethernet/cavium/octeon/octeon3.h | 411 ++++\n 9 files changed, 8101 insertions(+), 1 deletion(-)\n create mode 100644 drivers/net/ethernet/cavium/octeon/octeon3-bgx-nexus.c\n create mode 100644 drivers/net/ethernet/cavium/octeon/octeon3-bgx-port.c\n create mode 100644 drivers/net/ethernet/cavium/octeon/octeon3-core.c\n create mode 100644 drivers/net/ethernet/cavium/octeon/octeon3-pki.c\n create mode 100644 drivers/net/ethernet/cavium/octeon/octeon3-pko.c\n create mode 100644 drivers/net/ethernet/cavium/octeon/octeon3-sso.c\n create mode 100644 drivers/net/ethernet/cavium/octeon/octeon3.h", "diff": "diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig\nindex 63be75eb34d2..366c0b238b76 100644\n--- a/drivers/net/ethernet/cavium/Kconfig\n+++ b/drivers/net/ethernet/cavium/Kconfig\n@@ -4,7 +4,7 @@\n \n config NET_VENDOR_CAVIUM\n \tbool \"Cavium ethernet drivers\"\n-\tdepends on PCI\n+\tdepends on PCI || CAVIUM_OCTEON_SOC\n \tdefault y\n \t---help---\n \t Select this option if you want enable Cavium network support.\n@@ -87,4 +87,30 @@ config LIQUIDIO_VF\n \t will be called liquidio_vf. MSI-X interrupt support is required\n \t for this driver to work correctly\n \n+config OCTEON3_BGX_NEXUS\n+\ttristate\n+\tdepends on CAVIUM_OCTEON_SOC\n+\n+config OCTEON3_BGX_PORT\n+\ttristate \"Cavium Octeon III BGX port support\"\n+\tdepends on CAVIUM_OCTEON_SOC\n+\tselect OCTEON3_BGX_NEXUS\n+\tselect OCTEON3_COMMON_NEXUS\n+\t---help---\n+\t This driver adds support for Cavium Octeon III BGX ports. BGX ports\n+\t support sgmii, rgmii, xaui, rxaui, xlaui, xfi, 10KR and 40KR modes.\n+\n+\t Say Y to use the management port on Octeon III boards or to use\n+\t any other ethernet port.\n+\n+config OCTEON3_ETHERNET\n+\ttristate \"Cavium OCTEON III PKI/PKO Ethernet support\"\n+\tdepends on CAVIUM_OCTEON_SOC\n+\tselect OCTEON_BGX_PORT\n+\tselect OCTEON_FPA3\n+\tselect FW_LOADER\n+\t---help---\n+\t Support for 'BGX' Ethernet via PKI/PKO units. No support\n+\t for cn70xx chips (use OCTEON_ETHERNET for cn70xx).\n+\n endif # NET_VENDOR_CAVIUM\ndiff --git a/drivers/net/ethernet/cavium/octeon/Makefile b/drivers/net/ethernet/cavium/octeon/Makefile\nindex efa41c1d91c5..1eacab1d8dad 100644\n--- a/drivers/net/ethernet/cavium/octeon/Makefile\n+++ b/drivers/net/ethernet/cavium/octeon/Makefile\n@@ -3,3 +3,9 @@\n #\n \n obj-$(CONFIG_OCTEON_MGMT_ETHERNET)\t+= octeon_mgmt.o\n+obj-$(CONFIG_OCTEON3_BGX_PORT)\t\t+= octeon3-bgx-port.o\n+obj-$(CONFIG_OCTEON3_BGX_NEXUS)\t\t+= octeon3-bgx-nexus.o\n+obj-$(CONFIG_OCTEON3_ETHERNET)\t\t+= octeon3-ethernet.o\n+\n+octeon3-ethernet-objs += octeon3-core.o octeon3-pki.o octeon3-sso.o \\\n+\t\t\t octeon3-pko.o\ndiff --git a/drivers/net/ethernet/cavium/octeon/octeon3-bgx-nexus.c b/drivers/net/ethernet/cavium/octeon/octeon3-bgx-nexus.c\nnew file mode 100644\nindex 000000000000..bd6bb0090671\n--- /dev/null\n+++ b/drivers/net/ethernet/cavium/octeon/octeon3-bgx-nexus.c\n@@ -0,0 +1,698 @@\n+/*\n+ * Copyright (c) 2017 Cavium, Inc.\n+ *\n+ * This file is subject to the terms and conditions of the GNU General Public\n+ * License. See the file \"COPYING\" in the main directory of this archive\n+ * for more details.\n+ */\n+#include <linux/platform_device.h>\n+#include <linux/of_platform.h>\n+#include <linux/of_address.h>\n+#include <linux/module.h>\n+#include <linux/slab.h>\n+#include <linux/list.h>\n+#include <linux/ctype.h>\n+\n+#include \"octeon3.h\"\n+\n+static atomic_t request_mgmt_once;\n+static atomic_t load_driver_once;\n+static atomic_t pki_id;\n+\n+static char *mix_port;\n+module_param(mix_port, charp, 0444);\n+MODULE_PARM_DESC(mix_port, \"Specifies which ports connect to MIX interfaces.\");\n+\n+static char *pki_port;\n+module_param(pki_port, charp, 0444);\n+MODULE_PARM_DESC(pki_port, \"Specifies which ports connect to the PKI.\");\n+\n+#define MAX_MIX_PER_NODE\t2\n+\n+#define MAX_MIX\t\t\t(MAX_NODES * MAX_MIX_PER_NODE)\n+\n+/**\n+ * struct mix_port_lmac - Describes a lmac that connects to a mix\n+ *\t\t\t port. The lmac must be on the same node as\n+ *\t\t\t the mix.\n+ * @node:\tNode of the lmac.\n+ * @bgx:\tBgx of the lmac.\n+ * @lmac:\tLmac index.\n+ */\n+struct mix_port_lmac {\n+\tint\tnode;\n+\tint\tbgx;\n+\tint\tlmac;\n+};\n+\n+/* mix_ports_lmacs contains all the lmacs connected to mix ports */\n+static struct mix_port_lmac mix_port_lmacs[MAX_MIX];\n+\n+/* pki_ports keeps track of the lmacs connected to the pki */\n+static bool pki_ports[MAX_NODES][MAX_BGX_PER_NODE][MAX_LMAC_PER_BGX];\n+\n+/* Created platform devices get added to this list */\n+static struct list_head pdev_list;\n+static struct mutex pdev_list_lock;\n+\n+/* Created platform device use this structure to add themselves to the list */\n+struct pdev_list_item {\n+\tstruct list_head\tlist;\n+\tstruct platform_device\t*pdev;\n+};\n+\n+/**\n+ * is_lmac_to_mix - Search the list of lmacs connected to mix'es for a match.\n+ * @node: Numa node of lmac to search for.\n+ * @bgx: Bgx of lmac to search for.\n+ * @lmac: Lmac index to search for.\n+ *\n+ * Returns true if the lmac is connected to a mix.\n+ * Returns false if the lmac is not connected to a mix.\n+ */\n+static bool is_lmac_to_mix(int node, int bgx, int lmac)\n+{\n+\tint\ti;\n+\n+\tfor (i = 0; i < MAX_MIX; i++) {\n+\t\tif (mix_port_lmacs[i].node == node &&\n+\t\t mix_port_lmacs[i].bgx == bgx &&\n+\t\t mix_port_lmacs[i].lmac == lmac)\n+\t\t\treturn true;\n+\t}\n+\n+\treturn false;\n+}\n+\n+/**\n+ * is_lmac_to_pki - Search the list of lmacs connected to the pki for a match.\n+ * @node: Numa node of lmac to search for.\n+ * @bgx: Bgx of lmac to search for.\n+ * @lmac: Lmac index to search for.\n+ *\n+ * Returns true if the lmac is connected to the pki.\n+ * Returns false if the lmac is not connected to the pki.\n+ */\n+static bool is_lmac_to_pki(int node, int bgx, int lmac)\n+{\n+\treturn pki_ports[node][bgx][lmac];\n+}\n+\n+/**\n+ * is_lmac_to_xcv - Check if this lmac is connected to the xcv block (rgmii).\n+ * @of_node: Device node to check.\n+ *\n+ * Returns true if the lmac is connected to the xcv port.\n+ * Returns false if the lmac is not connected to the xcv port.\n+ */\n+static bool is_lmac_to_xcv(struct device_node *of_node)\n+{\n+\treturn of_device_is_compatible(of_node, \"cavium,octeon-7360-xcv\");\n+}\n+\n+static int bgx_probe(struct platform_device *pdev)\n+{\n+\tstruct mac_platform_data platform_data;\n+\tconst __be32 *reg;\n+\tu32 port;\n+\tu64 addr;\n+\tstruct device_node *child;\n+\tstruct platform_device *new_dev;\n+\tstruct platform_device *pki_dev;\n+\tint numa_node, interface;\n+\tint i;\n+\tint r = 0;\n+\tchar id[64];\n+\tu64 data;\n+\n+\treg = of_get_property(pdev->dev.of_node, \"reg\", NULL);\n+\taddr = of_translate_address(pdev->dev.of_node, reg);\n+\tinterface = (addr >> 24) & 0xf;\n+\tnuma_node = (addr >> 36) & 0x7;\n+\n+\t/* Assign 8 CAM entries per LMAC */\n+\tfor (i = 0; i < 32; i++) {\n+\t\tdata = i >> 3;\n+\t\toct_csr_write(data, BGX_CMR_RX_ADRX_CAM(numa_node, interface, i));\n+\t}\n+\n+\tfor_each_available_child_of_node(pdev->dev.of_node, child) {\n+\t\tbool is_mix = false;\n+\t\tbool is_pki = false;\n+\t\tbool is_xcv = false;\n+\t\tstruct pdev_list_item *pdev_item;\n+\n+\t\tif (!of_device_is_compatible(child, \"cavium,octeon-7890-bgx-port\") &&\n+\t\t !of_device_is_compatible(child, \"cavium,octeon-7360-xcv\"))\n+\t\t\tcontinue;\n+\t\tr = of_property_read_u32(child, \"reg\", &port);\n+\t\tif (r)\n+\t\t\treturn -ENODEV;\n+\n+\t\tis_mix = is_lmac_to_mix(numa_node, interface, port);\n+\t\tis_pki = is_lmac_to_pki(numa_node, interface, port);\n+\t\tis_xcv = is_lmac_to_xcv(child);\n+\n+\t\t/* Check if this port should be configured */\n+\t\tif (!is_mix && !is_pki)\n+\t\t\tcontinue;\n+\n+\t\t/* Connect to PKI/PKO */\n+\t\tdata = oct_csr_read(BGX_CMR_CONFIG(numa_node, interface, port));\n+\t\tif (is_mix)\n+\t\t\tdata |= BIT(11);\n+\t\telse\n+\t\t\tdata &= ~BIT(11);\n+\t\toct_csr_write(data, BGX_CMR_CONFIG(numa_node, interface, port));\n+\n+\t\t/* Unreset the mix bgx interface or it will interfare with the\n+\t\t * other ports.\n+\t\t */\n+\t\tif (is_mix) {\n+\t\t\tdata = oct_csr_read(BGX_CMR_GLOBAL_CONFIG(numa_node, interface));\n+\t\t\tif (!port)\n+\t\t\t\tdata &= ~BIT(3);\n+\t\t\telse if (port == 1)\n+\t\t\t\tdata &= ~BIT(4);\n+\t\t\toct_csr_write(data, BGX_CMR_GLOBAL_CONFIG(numa_node, interface));\n+\t\t}\n+\n+\t\tsnprintf(id, sizeof(id), \"%llx.%u.ethernet-mac\",\n+\t\t\t (unsigned long long)addr, port);\n+\t\tnew_dev = of_platform_device_create(child, id, &pdev->dev);\n+\t\tif (!new_dev) {\n+\t\t\tdev_err(&pdev->dev, \"Error creating %s\\n\", id);\n+\t\t\tcontinue;\n+\t\t}\n+\t\tplatform_data.mac_type = BGX_MAC;\n+\t\tplatform_data.numa_node = numa_node;\n+\t\tplatform_data.interface = interface;\n+\t\tplatform_data.port = port;\n+\t\tif (is_xcv)\n+\t\t\tplatform_data.src_type = XCV;\n+\t\telse\n+\t\t\tplatform_data.src_type = QLM;\n+\n+\t\t/* Add device to the list of created devices so we can remove it\n+\t\t * on exit.\n+\t\t */\n+\t\tpdev_item = kmalloc(sizeof(*pdev_item), GFP_KERNEL);\n+\t\tpdev_item->pdev = new_dev;\n+\t\tmutex_lock(&pdev_list_lock);\n+\t\tlist_add(&pdev_item->list, &pdev_list);\n+\t\tmutex_unlock(&pdev_list_lock);\n+\n+\t\ti = atomic_inc_return(&pki_id);\n+\t\tpki_dev = platform_device_register_data(&new_dev->dev,\n+\t\t\t\t\t\t\tis_mix ? \"octeon_mgmt\" : \"ethernet-mac-pki\",\n+\t\t\t\t\t\t\ti, &platform_data, sizeof(platform_data));\n+\t\tdev_info(&pdev->dev, \"Created %s %u: %p\\n\",\n+\t\t\t is_mix ? \"MIX\" : \"PKI\", pki_dev->id, pki_dev);\n+\n+\t\t/* Add device to the list of created devices so we can remove it\n+\t\t * on exit.\n+\t\t */\n+\t\tpdev_item = kmalloc(sizeof(*pdev_item), GFP_KERNEL);\n+\t\tpdev_item->pdev = pki_dev;\n+\t\tmutex_lock(&pdev_list_lock);\n+\t\tlist_add(&pdev_item->list, &pdev_list);\n+\t\tmutex_unlock(&pdev_list_lock);\n+\n+#ifdef CONFIG_NUMA\n+\t\tnew_dev->dev.numa_node = pdev->dev.numa_node;\n+\t\tpki_dev->dev.numa_node = pdev->dev.numa_node;\n+#endif\n+\t\t/* One time request driver module */\n+\t\tif (is_mix) {\n+\t\t\tif (atomic_cmpxchg(&request_mgmt_once, 0, 1) == 0)\n+\t\t\t\trequest_module_nowait(\"octeon_mgmt\");\n+\t\t}\n+\t\tif (is_pki) {\n+\t\t\tif (atomic_cmpxchg(&load_driver_once, 0, 1) == 0)\n+\t\t\t\trequest_module_nowait(\"octeon3-ethernet\");\n+\t\t}\n+\t}\n+\n+\tdev_info(&pdev->dev, \"Probed\\n\");\n+\treturn 0;\n+}\n+\n+/**\n+ * bgx_mix_init_from_fdt - Initialize the list of lmacs that connect to mix\n+ *\t\t\t ports from information in the device tree.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+static int bgx_mix_init_from_fdt(void)\n+{\n+\tstruct device_node\t*node;\n+\tstruct device_node\t*parent = NULL;\n+\tint\t\t\tmix = 0;\n+\n+\tfor_each_compatible_node(node, NULL, \"cavium,octeon-7890-mix\") {\n+\t\tstruct device_node\t*lmac_fdt_node;\n+\t\tconst __be32\t\t*reg;\n+\t\tu64\t\t\taddr;\n+\n+\t\t/* Get the fdt node of the lmac connected to this mix */\n+\t\tlmac_fdt_node = of_parse_phandle(node, \"cavium,mac-handle\", 0);\n+\t\tif (!lmac_fdt_node)\n+\t\t\tgoto err;\n+\n+\t\t/* Get the numa node and bgx of the lmac */\n+\t\tparent = of_get_parent(lmac_fdt_node);\n+\t\tif (!parent)\n+\t\t\tgoto err;\n+\t\treg = of_get_property(parent, \"reg\", NULL);\n+\t\tif (!reg)\n+\t\t\tgoto err;\n+\t\taddr = of_translate_address(parent, reg);\n+\t\tof_node_put(parent);\n+\t\tparent = NULL;\n+\n+\t\tmix_port_lmacs[mix].node = (addr >> 36) & 0x7;\n+\t\tmix_port_lmacs[mix].bgx = (addr >> 24) & 0xf;\n+\n+\t\t/* Get the lmac index */\n+\t\treg = of_get_property(lmac_fdt_node, \"reg\", NULL);\n+\t\tif (!reg)\n+\t\t\tgoto err;\n+\n+\t\tmix_port_lmacs[mix].lmac = *reg;\n+\n+\t\tmix++;\n+\t\tif (mix >= MAX_MIX)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn 0;\n+ err:\n+\tpr_warn(\"Invalid device tree mix port information\\n\");\n+\tfor (mix = 0; mix < MAX_MIX; mix++) {\n+\t\tmix_port_lmacs[mix].node = -1;\n+\t\tmix_port_lmacs[mix].bgx = -1;\n+\t\tmix_port_lmacs[mix].lmac = -1;\n+\t}\n+\tif (parent)\n+\t\tof_node_put(parent);\n+\n+\treturn -EINVAL;\n+}\n+\n+/**\n+ * bgx_mix_init_from_param - Initialize the list of lmacs that connect to mix\n+ *\t\t\t ports from information in the \"mix_port\" parameter.\n+ *\t\t\t The mix_port parameter format is as follows:\n+ *\t\t\t mix_port=nbl\n+ *\t\t\t where:\n+ *\t\t\t\tn = node\n+ *\t\t\t\tb = bgx\n+ *\t\t\t\tl = lmac\n+ *\t\t\t There can be up to 4 lmacs defined separated by\n+ *\t\t\t commas. For example to select node0, bgx0, lmac0\n+ *\t\t\t and node0, bgx4, lamc0, the mix_port parameter\n+ *\t\t\t would be: mix_port=000,040\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+static int bgx_mix_init_from_param(void)\n+{\n+\tchar\t*p = mix_port;\n+\tint\tmix = 0;\n+\tint\ti;\n+\n+\twhile (*p) {\n+\t\tint\tnode = -1;\n+\t\tint\tbgx = -1;\n+\t\tint\tlmac = -1;\n+\n+\t\tif (strlen(p) < 3)\n+\t\t\tgoto err;\n+\n+\t\t/* Get the numa node */\n+\t\tif (!isdigit(*p))\n+\t\t\tgoto err;\n+\t\tnode = *p - '0';\n+\t\tif (node >= MAX_NODES)\n+\t\t\tgoto err;\n+\n+\t\t/* Get the bgx */\n+\t\tp++;\n+\t\tif (!isdigit(*p))\n+\t\t\tgoto err;\n+\t\tbgx = *p - '0';\n+\t\tif (bgx >= MAX_BGX_PER_NODE)\n+\t\t\tgoto err;\n+\n+\t\t/* Get the lmac index */\n+\t\tp++;\n+\t\tif (!isdigit(*p))\n+\t\t\tgoto err;\n+\t\tlmac = *p - '0';\n+\t\tif (lmac >= 2)\n+\t\t\tgoto err;\n+\n+\t\t/* Only one lmac0 and one lmac1 per node is supported */\n+\t\tfor (i = 0; i < MAX_MIX; i++) {\n+\t\t\tif (mix_port_lmacs[i].node == node &&\n+\t\t\t mix_port_lmacs[i].lmac == lmac)\n+\t\t\t\tgoto err;\n+\t\t}\n+\n+\t\tmix_port_lmacs[mix].node = node;\n+\t\tmix_port_lmacs[mix].bgx = bgx;\n+\t\tmix_port_lmacs[mix].lmac = lmac;\n+\n+\t\tp++;\n+\t\tif (*p == ',')\n+\t\t\tp++;\n+\n+\t\tmix++;\n+\t\tif (mix >= MAX_MIX)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn 0;\n+ err:\n+\tpr_warn(\"Invalid parameter mix_port=%s\\n\", mix_port);\n+\tfor (mix = 0; mix < MAX_MIX; mix++) {\n+\t\tmix_port_lmacs[mix].node = -1;\n+\t\tmix_port_lmacs[mix].bgx = -1;\n+\t\tmix_port_lmacs[mix].lmac = -1;\n+\t}\n+\treturn -EINVAL;\n+}\n+\n+/**\n+ * bgx_mix_port_lmacs_init - Initialize the mix_port_lmacs variable with the\n+ *\t\t\t lmacs that connect to mic ports.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+static int bgx_mix_port_lmacs_init(void)\n+{\n+\tint\tmix;\n+\n+\t/* Start with no mix ports configured */\n+\tfor (mix = 0; mix < MAX_MIX; mix++) {\n+\t\tmix_port_lmacs[mix].node = -1;\n+\t\tmix_port_lmacs[mix].bgx = -1;\n+\t\tmix_port_lmacs[mix].lmac = -1;\n+\t}\n+\n+\t/* Check if no mix port should be configured */\n+\tif (mix_port && !strcmp(mix_port, \"none\"))\n+\t\treturn 0;\n+\n+\t/* Configure the mix ports using information from the device tree if no\n+\t * parameter was passed. Otherwise, use the information in the module\n+\t * parameter.\n+\t */\n+\tif (!mix_port)\n+\t\tbgx_mix_init_from_fdt();\n+\telse\n+\t\tbgx_mix_init_from_param();\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * bgx_parse_pki_elem - Parse a single element (node, bgx, or lmac) out a pki\n+ *\t\t\tlmac string and set its bitmap accordingly.\n+ * @str: Pki lmac string to parse.\n+ * @bitmap: Updated with the bits selected by str.\n+ * @size: Maximum size of the bitmap.\n+ *\n+ * Returns number of characters processed from str.\n+ * Returns <0 for error codes.\n+ */\n+static int bgx_parse_pki_elem(const char *str, unsigned long *bitmap, int size)\n+{\n+\tconst char\t*p = str;\n+\tint\t\tlen = -1;\n+\tint\t\tbit;\n+\n+\tif (*p == 0) {\n+\t\t/* If identifier is missing, the whole subset is allowed */\n+\t\tbitmap_set(bitmap, 0, size);\n+\t\tlen = 0;\n+\t} else if (*p == '*') {\n+\t\t/* If identifier is an asterisk, the whole subset is allowed */\n+\t\tbitmap_set(bitmap, 0, size);\n+\t\tlen = 1;\n+\t} else if (isdigit(*p)) {\n+\t\t/* If identifier is a digit, only the bit corresponding to the\n+\t\t * digit is set.\n+\t\t */\n+\t\tbit = *p - '0';\n+\t\tif (bit < size) {\n+\t\t\tbitmap_set(bitmap, bit, 1);\n+\t\t\tlen = 1;\n+\t\t}\n+\t} else if (*p == '[') {\n+\t\t/* If identifier is a bracket, all the bits corresponding to\n+\t\t * the digits inside the bracket are set.\n+\t\t */\n+\t\tp++;\n+\t\tlen = 1;\n+\t\tdo {\n+\t\t\tif (isdigit(*p)) {\n+\t\t\t\tbit = *p - '0';\n+\t\t\t\tif (bit < size)\n+\t\t\t\t\tbitmap_set(bitmap, bit, 1);\n+\t\t\t\telse\n+\t\t\t\t\treturn -1;\n+\t\t\t} else {\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tp++;\n+\t\t\tlen++;\n+\t\t} while (*p != ']');\n+\t\tlen++;\n+\t} else {\n+\t\tlen = -1;\n+\t}\n+\n+\treturn len;\n+}\n+\n+/**\n+ * bgx_pki_bitmap_set - Set the bitmap bits for all elements (node, bgx, and\n+ *\t\t\tlmac) selected by a pki lmac string.\n+ * @str: Pki lmac string to process.\n+ * @node: Updated with the nodes specified in the pki lmac string.\n+ * @bgx: Updated with the bgx's specified in the pki lmac string.\n+ * @lmac: Updated with the lmacs specified in the pki lmac string.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+static unsigned long bgx_pki_bitmap_set(const char *str, unsigned long *node,\n+\t\t\t\t\tunsigned long *bgx, unsigned long *lmac)\n+{\n+\tconst char\t*p = str;\n+\tint\t\tlen;\n+\n+\t/* Parse the node */\n+\tlen = bgx_parse_pki_elem(p, node, MAX_NODES);\n+\tif (len < 0)\n+\t\tgoto err;\n+\n+\t/* Parse the bgx */\n+\tp += len;\n+\tlen = bgx_parse_pki_elem(p, bgx, MAX_BGX_PER_NODE);\n+\tif (len < 0)\n+\t\tgoto err;\n+\n+\t/* Parse the lmac */\n+\tp += len;\n+\tlen = bgx_parse_pki_elem(p, lmac, MAX_LMAC_PER_BGX);\n+\tif (len < 0)\n+\t\tgoto err;\n+\n+\treturn 0;\n+ err:\n+\tbitmap_zero(node, MAX_NODES);\n+\tbitmap_zero(bgx, MAX_BGX_PER_NODE);\n+\tbitmap_zero(lmac, MAX_LMAC_PER_BGX);\n+\treturn len;\n+}\n+\n+/**\n+ * bgx_pki_init_from_param - Initialize the list of lmacs that connect to the\n+ *\t\t\t pki from information in the \"pki_port\" parameter.\n+ *\n+ *\t\t\t The pki_port parameter format is as follows:\n+ *\t\t\t pki_port=nbl\n+ *\t\t\t where:\n+ *\t\t\t\tn = node\n+ *\t\t\t\tb = bgx\n+ *\t\t\t\tl = lmac\n+ *\n+ *\t\t\t Commas must be used to separate multiple lmacs:\n+ *\t\t\t pki_port=000,100,110\n+ *\n+ *\t\t\t Asterisks (*) specify all possible characters in\n+ *\t\t\t the subset:\n+ *\t\t\t pki_port=00* (all lmacs of node0 bgx0).\n+ *\n+ *\t\t\t Missing lmacs identifiers default to all\n+ *\t\t\t possible characters in the subset:\n+ *\t\t\t pki_port=00 (all lmacs on node0 bgx0)\n+ *\n+ *\t\t\t Brackets ('[' and ']') specify the valid\n+ *\t\t\t characters in the subset:\n+ *\t\t\t pki_port=00[01] (lmac0 and lmac1 of node0 bgx0).\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+static int bgx_pki_init_from_param(void)\n+{\n+\tchar\t*cur;\n+\tchar\t*next;\n+\tDECLARE_BITMAP(node_bitmap, MAX_NODES);\n+\tDECLARE_BITMAP(bgx_bitmap, MAX_BGX_PER_NODE);\n+\tDECLARE_BITMAP(lmac_bitmap, MAX_LMAC_PER_BGX);\n+\n+\t/* Parse each comma separated lmac specifier */\n+\tcur = pki_port;\n+\twhile (cur) {\n+\t\tunsigned long\tnode;\n+\t\tunsigned long\tbgx;\n+\t\tunsigned long\tlmac;\n+\n+\t\tbitmap_zero(node_bitmap, BITS_PER_LONG);\n+\t\tbitmap_zero(bgx_bitmap, BITS_PER_LONG);\n+\t\tbitmap_zero(lmac_bitmap, BITS_PER_LONG);\n+\n+\t\tnext = strchr(cur, ',');\n+\t\tif (next)\n+\t\t\t*next++ = '\\0';\n+\n+\t\t/* Convert the specifier into a bitmap */\n+\t\tbgx_pki_bitmap_set(cur, node_bitmap, bgx_bitmap, lmac_bitmap);\n+\n+\t\t/* Mark the lmacs to be connected to the pki */\n+\t\tfor_each_set_bit(node, node_bitmap, MAX_NODES) {\n+\t\t\tfor_each_set_bit(bgx, bgx_bitmap, MAX_BGX_PER_NODE) {\n+\t\t\t\tfor_each_set_bit(lmac, lmac_bitmap,\n+\t\t\t\t\t\t MAX_LMAC_PER_BGX)\n+\t\t\t\t\tpki_ports[node][bgx][lmac] = true;\n+\t\t\t}\n+\t\t}\n+\n+\t\tcur = next;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * bgx_pki_ports_init - Initialize the pki_ports variable with the lmacs that\n+ *\t\t\tconnect to the pki.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+static int bgx_pki_ports_init(void)\n+{\n+\tint\ti, j, k;\n+\tbool\tdef_val;\n+\n+\t/* Whether all ports default to connect to the pki or not depend on the\n+\t * passed module parameter (if any).\n+\t */\n+\tif (pki_port)\n+\t\tdef_val = false;\n+\telse\n+\t\tdef_val = true;\n+\n+\tfor (i = 0; i < MAX_NODES; i++) {\n+\t\tfor (j = 0; j < MAX_BGX_PER_NODE; j++) {\n+\t\t\tfor (k = 0; k < MAX_LMAC_PER_BGX; k++)\n+\t\t\t\tpki_ports[i][j][k] = def_val;\n+\t\t}\n+\t}\n+\n+\t/* Check if ports have to be individually configured */\n+\tif (pki_port && strcmp(pki_port, \"none\"))\n+\t\tbgx_pki_init_from_param();\n+\n+\treturn 0;\n+}\n+\n+static int bgx_remove(struct platform_device *pdev)\n+{\n+\treturn 0;\n+}\n+\n+static void bgx_shutdown(struct platform_device *pdev)\n+{\n+}\n+\n+static const struct of_device_id bgx_match[] = {\n+\t{\n+\t\t.compatible = \"cavium,octeon-7890-bgx\",\n+\t},\n+\t{},\n+};\n+MODULE_DEVICE_TABLE(of, bgx_match);\n+\n+static struct platform_driver bgx_driver = {\n+\t.probe\t\t= bgx_probe,\n+\t.remove\t\t= bgx_remove,\n+\t.shutdown = bgx_shutdown,\n+\t.driver\t\t= {\n+\t\t.owner\t= THIS_MODULE,\n+\t\t.name\t= KBUILD_MODNAME,\n+\t\t.of_match_table = bgx_match,\n+\t},\n+};\n+\n+/* Allow bgx_port driver to force this driver to load */\n+void bgx_nexus_load(void)\n+{\n+}\n+EXPORT_SYMBOL(bgx_nexus_load);\n+\n+static int __init bgx_driver_init(void)\n+{\n+\tint r;\n+\n+\tINIT_LIST_HEAD(&pdev_list);\n+\tmutex_init(&pdev_list_lock);\n+\n+\tbgx_mix_port_lmacs_init();\n+\tbgx_pki_ports_init();\n+\n+\tr = platform_driver_register(&bgx_driver);\n+\n+\treturn r;\n+}\n+\n+static void __exit bgx_driver_exit(void)\n+{\n+\tstruct pdev_list_item *pdev_item;\n+\n+\tmutex_lock(&pdev_list_lock);\n+\twhile (!list_empty(&pdev_list)) {\n+\t\tpdev_item = list_first_entry(&pdev_list, struct pdev_list_item, list);\n+\t\tlist_del(&pdev_item->list);\n+\t\tplatform_device_unregister(pdev_item->pdev);\n+\t\tkfree(pdev_item);\n+\t}\n+\tmutex_unlock(&pdev_list_lock);\n+\n+\tplatform_driver_unregister(&bgx_driver);\n+}\n+\n+module_init(bgx_driver_init);\n+module_exit(bgx_driver_exit);\n+\n+MODULE_LICENSE(\"GPL\");\n+MODULE_AUTHOR(\"Cavium, Inc. <support@caviumnetworks.com>\");\n+MODULE_DESCRIPTION(\"Cavium, Inc. BGX MAC Nexus driver.\");\ndiff --git a/drivers/net/ethernet/cavium/octeon/octeon3-bgx-port.c b/drivers/net/ethernet/cavium/octeon/octeon3-bgx-port.c\nnew file mode 100644\nindex 000000000000..fb801b7f87fe\n--- /dev/null\n+++ b/drivers/net/ethernet/cavium/octeon/octeon3-bgx-port.c\n@@ -0,0 +1,2023 @@\n+/*\n+ * Copyright (c) 2017 Cavium, Inc.\n+ *\n+ * This file is subject to the terms and conditions of the GNU General Public\n+ * License. See the file \"COPYING\" in the main directory of this archive\n+ * for more details.\n+ */\n+#include <linux/platform_device.h>\n+#include <linux/netdevice.h>\n+#include <linux/etherdevice.h>\n+#include <linux/of_platform.h>\n+#include <linux/of_address.h>\n+#include <linux/of_mdio.h>\n+#include <linux/of_net.h>\n+#include <linux/module.h>\n+#include <linux/slab.h>\n+#include <linux/list.h>\n+\n+#include <asm/octeon/octeon.h>\n+\n+#include \"octeon3.h\"\n+\n+struct bgx_port_priv {\n+\tint node;\n+\tint bgx;\n+\tint index; /* Port index on BGX block*/\n+\tenum port_mode mode;\n+\tint pknd;\n+\tint qlm;\n+\tconst u8 *mac_addr;\n+\tstruct phy_device *phydev;\n+\tstruct device_node *phy_np;\n+\tbool mode_1000basex;\n+\tbool bgx_as_phy;\n+\tstruct net_device *netdev;\n+\tstruct mutex lock;\t/* Serializes delayed work */\n+\tstruct port_status (*get_link)(struct bgx_port_priv *priv);\n+\tint (*set_link)(struct bgx_port_priv *priv, struct port_status status);\n+\tstruct port_status last_status;\n+\tstruct delayed_work dwork;\n+\tbool work_queued;\n+};\n+\n+/* lmac_pknd keeps track of the port kinds assigned to the lmacs */\n+static int lmac_pknd[MAX_NODES][MAX_BGX_PER_NODE][MAX_LMAC_PER_BGX];\n+\n+static struct workqueue_struct *check_state_wq;\n+static DEFINE_MUTEX(check_state_wq_mutex);\n+\n+int bgx_port_get_qlm(int node, int bgx, int index)\n+{\n+\tu64\tdata;\n+\tint\tqlm = -1;\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX)) {\n+\t\tif (bgx < 2) {\n+\t\t\tdata = oct_csr_read(BGX_CMR_GLOBAL_CONFIG(node, bgx));\n+\t\t\tif (data & 1)\n+\t\t\t\tqlm = bgx + 2;\n+\t\t\telse\n+\t\t\t\tqlm = bgx;\n+\t\t} else {\n+\t\t\tqlm = bgx + 2;\n+\t\t}\n+\t} else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {\n+\t\tif (bgx < 2) {\n+\t\t\tqlm = bgx + 2;\n+\t\t} else {\n+\t\t\t/* Ports on bgx2 can be connected to qlm5 or qlm6 */\n+\t\t\tif (index < 2)\n+\t\t\t\tqlm = 5;\n+\t\t\telse\n+\t\t\t\tqlm = 6;\n+\t\t}\n+\t} else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {\n+\t\t/* Ports on bgx0 can be connected to qlm4 or qlm5 */\n+\t\tif (index < 2)\n+\t\t\tqlm = 4;\n+\t\telse\n+\t\t\tqlm = 5;\n+\t}\n+\n+\treturn qlm;\n+}\n+EXPORT_SYMBOL(bgx_port_get_qlm);\n+\n+/* Returns the mode of the bgx port */\n+enum port_mode bgx_port_get_mode(int node, int bgx, int index)\n+{\n+\tenum port_mode\tmode;\n+\tu64\t\tdata;\n+\n+\tdata = oct_csr_read(BGX_CMR_CONFIG(node, bgx, index));\n+\n+\tswitch ((data >> 8) & 7) {\n+\tcase 0:\n+\t\tmode = PORT_MODE_SGMII;\n+\t\tbreak;\n+\tcase 1:\n+\t\tmode = PORT_MODE_XAUI;\n+\t\tbreak;\n+\tcase 2:\n+\t\tmode = PORT_MODE_RXAUI;\n+\t\tbreak;\n+\tcase 3:\n+\t\tdata = oct_csr_read(BGX_SPU_BR_PMD_CONTROL(node, bgx, index));\n+\t\t/* The use of training differentiates 10G_KR from xfi */\n+\t\tif (data & BIT(1))\n+\t\t\tmode = PORT_MODE_10G_KR;\n+\t\telse\n+\t\t\tmode = PORT_MODE_XFI;\n+\t\tbreak;\n+\tcase 4:\n+\t\tdata = oct_csr_read(BGX_SPU_BR_PMD_CONTROL(node, bgx, index));\n+\t\t/* The use of training differentiates 40G_KR4 from xlaui */\n+\t\tif (data & BIT(1))\n+\t\t\tmode = PORT_MODE_40G_KR4;\n+\t\telse\n+\t\t\tmode = PORT_MODE_XLAUI;\n+\t\tbreak;\n+\tcase 5:\n+\t\tmode = PORT_MODE_RGMII;\n+\t\tbreak;\n+\tdefault:\n+\t\tmode = PORT_MODE_DISABLED;\n+\t\tbreak;\n+\t}\n+\n+\treturn mode;\n+}\n+EXPORT_SYMBOL(bgx_port_get_mode);\n+\n+int bgx_port_allocate_pknd(int node)\n+{\n+\tstruct global_resource_tag\ttag;\n+\tchar\t\t\t\tbuf[16];\n+\tint\t\t\t\tpknd;\n+\n+\tstrncpy((char *)&tag.lo, \"cvm_pknd\", 8);\n+\tsnprintf(buf, 16, \"_%d......\", node);\n+\tmemcpy(&tag.hi, buf, 8);\n+\n+\tres_mgr_create_resource(tag, 64);\n+\tpknd = res_mgr_alloc(tag, -1, false);\n+\tif (pknd < 0) {\n+\t\tpr_err(\"bgx-port: Failed to allocate pknd\\n\");\n+\t\treturn -ENODEV;\n+\t}\n+\n+\treturn pknd;\n+}\n+EXPORT_SYMBOL(bgx_port_allocate_pknd);\n+\n+int bgx_port_get_pknd(int node, int bgx, int index)\n+{\n+\treturn lmac_pknd[node][bgx][index];\n+}\n+EXPORT_SYMBOL(bgx_port_get_pknd);\n+\n+/* GSER-20075 */\n+static void bgx_port_gser_20075(struct bgx_port_priv\t*priv,\n+\t\t\t\tint\t\t\tqlm,\n+\t\t\t\tint\t\t\tlane)\n+{\n+\tu64\tdata;\n+\tu64\taddr;\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&\n+\t (lane == -1 || lane == 3)) {\n+\t\t/* Enable software control */\n+\t\taddr = GSER_BR_RX_CTL(priv->node, qlm, 3);\n+\t\tdata = oct_csr_read(addr);\n+\t\tdata |= BIT(2);\n+\t\toct_csr_write(data, addr);\n+\n+\t\t/* Clear the completion flag */\n+\t\taddr = GSER_BR_RX_EER(priv->node, qlm, 3);\n+\t\tdata = oct_csr_read(addr);\n+\t\tdata &= ~BIT(14);\n+\t\toct_csr_write(data, addr);\n+\n+\t\t/* Initiate a new request on lane 2 */\n+\t\tif (lane == 3) {\n+\t\t\taddr = GSER_BR_RX_EER(priv->node, qlm, 2);\n+\t\t\tdata = oct_csr_read(addr);\n+\t\t\tdata |= BIT(15);\n+\t\t\toct_csr_write(data, addr);\n+\t\t}\n+\t}\n+}\n+\n+static void bgx_common_init_pknd(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\tint\tnum_ports;\n+\n+\t/* Setup pkind */\n+\tpriv->pknd = bgx_port_allocate_pknd(priv->node);\n+\tlmac_pknd[priv->node][priv->bgx][priv->index] = priv->pknd;\n+\tdata = oct_csr_read(BGX_CMR_RX_ID_MAP(priv->node, priv->bgx, priv->index));\n+\tdata &= ~GENMASK_ULL(7, 0);\n+\tdata |= priv->pknd;\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX)) {\n+\t\t/* Change the default reassembly id (max allowed is 14) */\n+\t\tdata &= ~GENMASK_ULL(14, 8);\n+\t\tdata |= ((4 * priv->bgx) + 2 + priv->index) << 8;\n+\t}\n+\toct_csr_write(data, BGX_CMR_RX_ID_MAP(priv->node, priv->bgx, priv->index));\n+\n+\t/* Set backpressure channel mask AND/OR registers */\n+\tdata = oct_csr_read(BGX_CMR_CHAN_MSK_AND(priv->node, priv->bgx));\n+\tdata |= 0xffff << (16 * priv->index);\n+\toct_csr_write(data, BGX_CMR_CHAN_MSK_AND(priv->node, priv->bgx));\n+\n+\tdata = oct_csr_read(BGX_CMR_CHAN_MSK_OR(priv->node, priv->bgx));\n+\tdata |= 0xffff << (16 * priv->index);\n+\toct_csr_write(data, BGX_CMR_CHAN_MSK_OR(priv->node, priv->bgx));\n+\n+\t/* Rx back pressure watermark:\n+\t * Set to 1/4 of the available lmacs buffer (in multiple of 16 bytes)\n+\t */\n+\tdata = oct_csr_read(BGX_CMR_TX_LMACS(priv->node, priv->bgx));\n+\tnum_ports = data & 7;\n+\tdata = BGX_RX_FIFO_SIZE / (num_ports * 4 * 16);\n+\toct_csr_write(data, BGX_CMR_RX_BP_ON(priv->node, priv->bgx, priv->index));\n+}\n+\n+static int bgx_xgmii_hardware_init(struct bgx_port_priv *priv)\n+{\n+\tu64\tclock_mhz;\n+\tu64\tdata;\n+\tu64\tctl;\n+\n+\t/* Set TX Threshold */\n+\tdata = 0x20;\n+\toct_csr_write(data, BGX_GMP_GMI_TX_THRESH(priv->node, priv->bgx, priv->index));\n+\n+\tdata = oct_csr_read(BGX_GMP_PCS_MISC_CTL(priv->node, priv->bgx, priv->index));\n+\tdata &= ~(BIT(8) | BIT(9));\n+\tif (priv->mode_1000basex)\n+\t\tdata |= BIT(8);\n+\tif (priv->bgx_as_phy)\n+\t\tdata |= BIT(9);\n+\toct_csr_write(data, BGX_GMP_PCS_MISC_CTL(priv->node, priv->bgx, priv->index));\n+\n+\tdata = oct_csr_read(BGX_GMP_PCS_LINK_TIMER(priv->node, priv->bgx, priv->index));\n+\tclock_mhz = octeon_get_io_clock_rate() / 1000000;\n+\tif (priv->mode_1000basex)\n+\t\tdata = (10000ull * clock_mhz) >> 10;\n+\telse\n+\t\tdata = (1600ull * clock_mhz) >> 10;\n+\toct_csr_write(data, BGX_GMP_PCS_LINK_TIMER(priv->node, priv->bgx, priv->index));\n+\n+\tif (priv->mode_1000basex) {\n+\t\tdata = oct_csr_read(BGX_GMP_PCS_AN_ADV(priv->node, priv->bgx, priv->index));\n+\t\tdata &= ~(GENMASK_ULL(13, 12) | GENMASK_ULL(8, 7));\n+\t\tdata |= 3 << 7;\n+\t\tdata |= BIT(6) | BIT(5);\n+\t\toct_csr_write(data, BGX_GMP_PCS_AN_ADV(priv->node, priv->bgx, priv->index));\n+\t} else if (priv->bgx_as_phy) {\n+\t\tdata = oct_csr_read(BGX_GMP_PCS_SGM_AN_ADV(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(12);\n+\t\tdata &= ~(GENMASK_ULL(11, 10));\n+\t\tdata |= 2 << 10;\n+\t\toct_csr_write(data, BGX_GMP_PCS_SGM_AN_ADV(priv->node, priv->bgx, priv->index));\n+\t}\n+\n+\tdata = oct_csr_read(BGX_GMP_GMI_TX_APPEND(priv->node, priv->bgx, priv->index));\n+\tctl = oct_csr_read(BGX_GMP_GMI_TX_SGMII_CTL(priv->node, priv->bgx, priv->index));\n+\tctl &= ~BIT(0);\n+\tctl |= (data & BIT(0)) ? 0 : 1;\n+\toct_csr_write(ctl, BGX_GMP_GMI_TX_SGMII_CTL(priv->node, priv->bgx, priv->index));\n+\n+\tif (priv->mode == PORT_MODE_RGMII) {\n+\t\t/* Disable XCV interface when initialized */\n+\t\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\t\tdata &= ~(BIT(63) | BIT(3) | BIT(1));\n+\t\toct_csr_write(data, XCV_RESET(priv->node));\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int bgx_get_tx_fifo_size(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\tint\tnum_ports;\n+\n+\tdata = oct_csr_read(BGX_CMR_TX_LMACS(priv->node, priv->bgx));\n+\tnum_ports = data & 7;\n+\n+\tswitch (num_ports) {\n+\tcase 1:\n+\t\treturn BGX_TX_FIFO_SIZE;\n+\tcase 2:\n+\t\treturn BGX_TX_FIFO_SIZE / 2;\n+\tcase 3:\n+\tcase 4:\n+\t\treturn BGX_TX_FIFO_SIZE / 4;\n+\tdefault:\n+\t\treturn 0;\n+\t}\n+}\n+\n+static int bgx_xaui_hardware_init(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\tu64\tclock_mhz;\n+\tu64\ttx_fifo_size;\n+\n+\tif (octeon_is_simulation()) {\n+\t\t/* Enable the port */\n+\t\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(15);\n+\t\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\t} else {\n+\t\t/* Reset the port */\n+\t\tdata = oct_csr_read(BGX_SPU_CONTROL1(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(15);\n+\t\toct_csr_write(data, BGX_SPU_CONTROL1(priv->node, priv->bgx, priv->index));\n+\n+\t\t/* Wait for reset to complete */\n+\t\tudelay(1);\n+\t\tdata = oct_csr_read(BGX_SPU_CONTROL1(priv->node, priv->bgx, priv->index));\n+\t\tif (data & BIT(15)) {\n+\t\t\tnetdev_err(priv->netdev,\n+\t\t\t\t \"BGX%d:%d: SPU stuck in reset\\n\", priv->bgx, priv->node);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* Reset the SerDes lanes */\n+\t\tdata = oct_csr_read(BGX_SPU_CONTROL1(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(11);\n+\t\toct_csr_write(data, BGX_SPU_CONTROL1(priv->node, priv->bgx, priv->index));\n+\n+\t\t/* Disable packet reception */\n+\t\tdata = oct_csr_read(BGX_SPU_MISC_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(12);\n+\t\toct_csr_write(data, BGX_SPU_MISC_CONTROL(priv->node, priv->bgx, priv->index));\n+\n+\t\t/* Clear/disable interrupts */\n+\t\tdata = oct_csr_read(BGX_SMU_RX_INT(priv->node, priv->bgx, priv->index));\n+\t\toct_csr_write(data, BGX_SMU_RX_INT(priv->node, priv->bgx, priv->index));\n+\t\tdata = oct_csr_read(BGX_SMU_TX_INT(priv->node, priv->bgx, priv->index));\n+\t\toct_csr_write(data, BGX_SMU_TX_INT(priv->node, priv->bgx, priv->index));\n+\t\tdata = oct_csr_read(BGX_SPU_INT(priv->node, priv->bgx, priv->index));\n+\t\toct_csr_write(data, BGX_SPU_INT(priv->node, priv->bgx, priv->index));\n+\n+\t\tif ((priv->mode == PORT_MODE_10G_KR ||\n+\t\t priv->mode == PORT_MODE_40G_KR4) &&\n+\t\t !OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {\n+\t\t\toct_csr_write(0, BGX_SPU_BR_PMD_LP_CUP(priv->node, priv->bgx, priv->index));\n+\t\t\toct_csr_write(0, BGX_SPU_BR_PMD_LD_CUP(priv->node, priv->bgx, priv->index));\n+\t\t\toct_csr_write(0, BGX_SPU_BR_PMD_LD_REP(priv->node, priv->bgx, priv->index));\n+\t\t\tdata = oct_csr_read(BGX_SPU_BR_PMD_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\t\tdata |= BIT(1);\n+\t\t\toct_csr_write(data, BGX_SPU_BR_PMD_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\t}\n+\t}\n+\n+\tdata = oct_csr_read(BGX_SMU_TX_APPEND(priv->node, priv->bgx, priv->index));\n+\tdata |= BIT(3);\n+\toct_csr_write(data, BGX_SMU_TX_APPEND(priv->node, priv->bgx, priv->index));\n+\n+\tif (!octeon_is_simulation()) {\n+\t\t/* Disable fec */\n+\t\tdata = oct_csr_read(BGX_SPU_FEC_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\tdata &= ~BIT(0);\n+\t\toct_csr_write(data, BGX_SPU_FEC_CONTROL(priv->node, priv->bgx, priv->index));\n+\n+\t\t/* Disable/configure auto negotiation */\n+\t\tdata = oct_csr_read(BGX_SPU_AN_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\tdata &= ~(BIT(13) | BIT(12));\n+\t\toct_csr_write(data, BGX_SPU_AN_CONTROL(priv->node, priv->bgx, priv->index));\n+\n+\t\tdata = oct_csr_read(BGX_SPU_AN_ADV(priv->node, priv->bgx, priv->index));\n+\t\tdata &= ~(BIT(47) | BIT(26) | BIT(25) | BIT(22) | BIT(21) |\n+\t\t\t BIT(13) | BIT(12));\n+\t\tdata |= BIT(46);\n+\t\tif (priv->mode == PORT_MODE_40G_KR4)\n+\t\t\tdata |= BIT(24);\n+\t\telse\n+\t\t\tdata &= ~BIT(24);\n+\t\tif (priv->mode == PORT_MODE_10G_KR)\n+\t\t\tdata |= BIT(23);\n+\t\telse\n+\t\t\tdata &= ~BIT(23);\n+\t\toct_csr_write(data, BGX_SPU_AN_ADV(priv->node, priv->bgx, priv->index));\n+\n+\t\tdata = oct_csr_read(BGX_SPU_DBG_CONTROL(priv->node, priv->bgx));\n+\t\tdata |= BIT(29);\n+\t\tif (priv->mode == PORT_MODE_10G_KR ||\n+\t\t priv->mode == PORT_MODE_40G_KR4)\n+\t\t\tdata |= BIT(18);\n+\t\telse\n+\t\t\tdata &= ~BIT(18);\n+\t\toct_csr_write(data, BGX_SPU_DBG_CONTROL(priv->node, priv->bgx));\n+\n+\t\t/* Enable the port */\n+\t\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(15);\n+\t\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && priv->index) {\n+\t\t\t/* BGX-22429 */\n+\t\t\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, 0));\n+\t\t\tdata |= BIT(15);\n+\t\t\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, 0));\n+\t\t}\n+\t}\n+\n+\tdata = oct_csr_read(BGX_SPU_CONTROL1(priv->node, priv->bgx, priv->index));\n+\tdata &= ~BIT(11);\n+\toct_csr_write(data, BGX_SPU_CONTROL1(priv->node, priv->bgx, priv->index));\n+\n+\tdata = oct_csr_read(BGX_SMU_TX_CTL(priv->node, priv->bgx, priv->index));\n+\tdata |= BIT(0);\n+\tdata &= ~BIT(1);\n+\toct_csr_write(data, BGX_SMU_TX_CTL(priv->node, priv->bgx, priv->index));\n+\n+\tclock_mhz = octeon_get_io_clock_rate() / 1000000;\n+\tdata = oct_csr_read(BGX_SPU_DBG_CONTROL(priv->node, priv->bgx));\n+\tdata &= ~GENMASK_ULL(43, 32);\n+\tdata |= (clock_mhz - 1) << 32;\n+\toct_csr_write(data, BGX_SPU_DBG_CONTROL(priv->node, priv->bgx));\n+\n+\t/* Fifo in 16-byte words */\n+\ttx_fifo_size = bgx_get_tx_fifo_size(priv);\n+\ttx_fifo_size >>= 4;\n+\toct_csr_write(tx_fifo_size - 10, BGX_SMU_TX_THRESH(priv->node, priv->bgx, priv->index));\n+\n+\tif (priv->mode == PORT_MODE_RXAUI && priv->phy_np) {\n+\t\tdata = oct_csr_read(BGX_SPU_MISC_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(10);\n+\t\toct_csr_write(data, BGX_SPU_MISC_CONTROL(priv->node, priv->bgx, priv->index));\n+\t}\n+\n+\t/* Some PHYs take up to 250ms to stabilize */\n+\tif (!octeon_is_simulation())\n+\t\tusleep_range(250000, 300000);\n+\n+\treturn 0;\n+}\n+\n+/* Configure/initialize a bgx port. */\n+static int bgx_port_init(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\tint\trc = 0;\n+\n+\t/* GSER-20956 */\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&\n+\t (priv->mode == PORT_MODE_10G_KR ||\n+\t priv->mode == PORT_MODE_XFI ||\n+\t priv->mode == PORT_MODE_40G_KR4 ||\n+\t priv->mode == PORT_MODE_XLAUI)) {\n+\t\t/* Disable link training */\n+\t\tdata = oct_csr_read(BGX_SPU_BR_PMD_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\tdata &= ~(1 << 1);\n+\t\toct_csr_write(data, BGX_SPU_BR_PMD_CONTROL(priv->node, priv->bgx, priv->index));\n+\t}\n+\n+\tbgx_common_init_pknd(priv);\n+\n+\tif (priv->mode == PORT_MODE_SGMII ||\n+\t priv->mode == PORT_MODE_RGMII)\n+\t\trc = bgx_xgmii_hardware_init(priv);\n+\telse\n+\t\trc = bgx_xaui_hardware_init(priv);\n+\n+\treturn rc;\n+}\n+\n+static int bgx_port_get_qlm_speed(struct bgx_port_priv\t*priv,\n+\t\t\t\t int\t\t\tqlm)\n+{\n+\tenum lane_mode\tlmode;\n+\tu64\t\tdata;\n+\n+\tdata = oct_csr_read(GSER_LANE_MODE(priv->node, qlm));\n+\tlmode = data & 0xf;\n+\n+\tswitch (lmode) {\n+\tcase R_25G_REFCLK100:\n+\t\treturn 2500;\n+\tcase R_5G_REFCLK100:\n+\t\treturn 5000;\n+\tcase R_8G_REFCLK100:\n+\t\treturn 8000;\n+\tcase R_125G_REFCLK15625_KX:\n+\t\treturn 1250;\n+\tcase R_3125G_REFCLK15625_XAUI:\n+\t\treturn 3125;\n+\tcase R_103125G_REFCLK15625_KR:\n+\t\treturn 10312;\n+\tcase R_125G_REFCLK15625_SGMII:\n+\t\treturn 1250;\n+\tcase R_5G_REFCLK15625_QSGMII:\n+\t\treturn 5000;\n+\tcase R_625G_REFCLK15625_RXAUI:\n+\t\treturn 6250;\n+\tcase R_25G_REFCLK125:\n+\t\treturn 2500;\n+\tcase R_5G_REFCLK125:\n+\t\treturn 5000;\n+\tcase R_8G_REFCLK125:\n+\t\treturn 8000;\n+\tdefault:\n+\t\treturn 0;\n+\t}\n+}\n+\n+static struct port_status bgx_port_get_sgmii_link(struct bgx_port_priv *priv)\n+{\n+\tstruct port_status\tstatus;\n+\tint\t\t\tspeed;\n+\n+\t/* The simulator always uses a 1Gbps full duplex port */\n+\tif (octeon_is_simulation()) {\n+\t\tstatus.link = 1;\n+\t\tstatus.duplex = DUPLEX_FULL;\n+\t\tstatus.speed = 1000;\n+\t} else {\n+\t\t/* Use the qlm speed */\n+\t\tspeed = bgx_port_get_qlm_speed(priv, priv->qlm);\n+\t\tstatus.link = 1;\n+\t\tstatus.duplex = DUPLEX_FULL;\n+\t\tstatus.speed = speed * 8 / 10;\n+\t}\n+\n+\treturn status;\n+}\n+\n+static int bgx_port_xgmii_set_link_up(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\tint\ttimeout;\n+\n+\tif (!octeon_is_simulation()) {\n+\t\t/* PCS reset sequence */\n+\t\tdata = oct_csr_read(BGX_GMP_PCS_MR_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(15);\n+\t\toct_csr_write(data, BGX_GMP_PCS_MR_CONTROL(priv->node, priv->bgx, priv->index));\n+\n+\t\t/* Wait for reset to complete */\n+\t\tudelay(1);\n+\t\tdata = oct_csr_read(BGX_GMP_PCS_MR_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\tif (data & BIT(15)) {\n+\t\t\tnetdev_err(priv->netdev,\n+\t\t\t\t \"BGX%d:%d: PCS stuck in reset\\n\", priv->bgx, priv->node);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\t/* Autonegotiation */\n+\tif (priv->phy_np) {\n+\t\tdata = oct_csr_read(BGX_GMP_PCS_MR_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(9);\n+\t\tif (priv->mode != PORT_MODE_RGMII)\n+\t\t\tdata |= BIT(12);\n+\t\telse\n+\t\t\tdata &= ~BIT(12);\n+\t\tdata &= ~BIT(11);\n+\t\toct_csr_write(data, BGX_GMP_PCS_MR_CONTROL(priv->node, priv->bgx, priv->index));\n+\t} else {\n+\t\tdata = oct_csr_read(BGX_GMP_PCS_MR_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(6);\n+\t\tdata &= ~(BIT(13) | BIT(12) | BIT(11));\n+\t\toct_csr_write(data, BGX_GMP_PCS_MR_CONTROL(priv->node, priv->bgx, priv->index));\n+\t}\n+\n+\tdata = oct_csr_read(BGX_GMP_PCS_MISC_CTL(priv->node, priv->bgx, priv->index));\n+\tdata &= ~(BIT(9) | BIT(8));\n+\tif (priv->mode_1000basex)\n+\t\tdata |= BIT(8);\n+\tif (priv->bgx_as_phy)\n+\t\tdata |= BIT(9);\n+\toct_csr_write(data, BGX_GMP_PCS_MISC_CTL(priv->node, priv->bgx, priv->index));\n+\n+\t/* Wait for autonegotiation to complete */\n+\tif (!octeon_is_simulation() && !priv->bgx_as_phy &&\n+\t priv->mode != PORT_MODE_RGMII) {\n+\t\ttimeout = 10000;\n+\t\tdo {\n+\t\t\tdata = oct_csr_read(BGX_GMP_PCS_MR_STATUS(priv->node, priv->bgx, priv->index));\n+\t\t\tif (data & BIT(5))\n+\t\t\t\tbreak;\n+\t\t\ttimeout--;\n+\t\t\tudelay(1);\n+\t\t} while (timeout);\n+\t\tif (!timeout) {\n+\t\t\tnetdev_err(priv->netdev, \"BGX%d:%d: AN timeout\\n\", priv->bgx, priv->node);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void bgx_port_rgmii_set_link_down(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\tint\trx_fifo_len;\n+\n+\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\tdata &= ~BIT(1);\n+\toct_csr_write(data, XCV_RESET(priv->node));\n+\t/* Is this read really needed? TODO */\n+\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\n+\t/* Wait for 2 MTUs */\n+\tmdelay(10);\n+\n+\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\tdata &= ~BIT(14);\n+\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\n+\t/* Wait for the rx and tx fifos to drain */\n+\tdo {\n+\t\tdata = oct_csr_read(BGX_CMR_RX_FIFO_LEN(priv->node, priv->bgx, priv->index));\n+\t\trx_fifo_len = data & 0x1fff;\n+\t\tdata = oct_csr_read(BGX_CMR_TX_FIFO_LEN(priv->node, priv->bgx, priv->index));\n+\t} while (rx_fifo_len > 0 || !(data & BIT(13)));\n+\n+\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\tdata &= ~BIT(13);\n+\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\n+\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\tdata &= ~BIT(3);\n+\toct_csr_write(data, XCV_RESET(priv->node));\n+\n+\tdata = oct_csr_read(BGX_GMP_PCS_MR_CONTROL(priv->node, priv->bgx, priv->index));\n+\tdata |= BIT(11);\n+\toct_csr_write(data, BGX_GMP_PCS_MR_CONTROL(priv->node, priv->bgx, priv->index));\n+}\n+\n+static void bgx_port_sgmii_set_link_down(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\n+\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\tdata &= ~(BIT(14) | BIT(13));\n+\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\n+\tdata = oct_csr_read(BGX_GMP_PCS_MR_CONTROL(priv->node, priv->bgx, priv->index));\n+\tdata &= ~BIT(12);\n+\toct_csr_write(data, BGX_GMP_PCS_MR_CONTROL(priv->node, priv->bgx, priv->index));\n+\n+\tdata = oct_csr_read(BGX_GMP_PCS_MISC_CTL(priv->node, priv->bgx, priv->index));\n+\tdata |= BIT(11);\n+\toct_csr_write(data, BGX_GMP_PCS_MISC_CTL(priv->node, priv->bgx, priv->index));\n+\tdata = oct_csr_read(BGX_GMP_PCS_MISC_CTL(priv->node, priv->bgx, priv->index));\n+}\n+\n+static int bgx_port_sgmii_set_link_speed(struct bgx_port_priv *priv, struct port_status status)\n+{\n+\tu64\tdata;\n+\tu64\tprtx;\n+\tu64\tmiscx;\n+\tint\ttimeout;\n+\n+\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\tdata &= ~(BIT(14) | BIT(13));\n+\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\n+\ttimeout = 10000;\n+\tdo {\n+\t\tprtx = oct_csr_read(BGX_GMP_GMI_PRT_CFG(priv->node, priv->bgx, priv->index));\n+\t\tif (prtx & BIT(13) && prtx & BIT(12))\n+\t\t\tbreak;\n+\t\ttimeout--;\n+\t\tudelay(1);\n+\t} while (timeout);\n+\tif (!timeout) {\n+\t\tnetdev_err(priv->netdev, \"BGX%d:%d: GMP idle timeout\\n\", priv->bgx, priv->node);\n+\t\treturn -1;\n+\t}\n+\n+\tprtx = oct_csr_read(BGX_GMP_GMI_PRT_CFG(priv->node, priv->bgx, priv->index));\n+\tmiscx = oct_csr_read(BGX_GMP_PCS_MISC_CTL(priv->node, priv->bgx, priv->index));\n+\tif (status.link) {\n+\t\tmiscx &= ~BIT(11);\n+\t\tif (status.duplex == DUPLEX_FULL)\n+\t\t\tprtx |= BIT(2);\n+\t\telse\n+\t\t\tprtx &= ~BIT(2);\n+\t} else {\n+\t\tmiscx |= BIT(11);\n+\t}\n+\n+\tswitch (status.speed) {\n+\tcase 10:\n+\t\tprtx &= ~(BIT(3) | BIT(1));\n+\t\tprtx |= BIT(8);\n+\t\tmiscx &= ~GENMASK_ULL(6, 0);\n+\t\tmiscx |= 25;\n+\t\toct_csr_write(64, BGX_GMP_GMI_TX_SLOT(priv->node, priv->bgx, priv->index));\n+\t\toct_csr_write(0, BGX_GMP_GMI_TX_BURST(priv->node, priv->bgx, priv->index));\n+\t\tbreak;\n+\tcase 100:\n+\t\tprtx &= ~(BIT(8) | BIT(3) | BIT(1));\n+\t\tmiscx &= ~GENMASK_ULL(6, 0);\n+\t\tmiscx |= 5;\n+\t\toct_csr_write(64, BGX_GMP_GMI_TX_SLOT(priv->node, priv->bgx, priv->index));\n+\t\toct_csr_write(0, BGX_GMP_GMI_TX_BURST(priv->node, priv->bgx, priv->index));\n+\t\tbreak;\n+\tcase 1000:\n+\t\tprtx |= (BIT(3) | BIT(1));\n+\t\tprtx &= ~BIT(8);\n+\t\tmiscx &= ~GENMASK_ULL(6, 0);\n+\t\tmiscx |= 1;\n+\t\toct_csr_write(512, BGX_GMP_GMI_TX_SLOT(priv->node, priv->bgx, priv->index));\n+\t\tif (status.duplex == DUPLEX_FULL)\n+\t\t\toct_csr_write(0, BGX_GMP_GMI_TX_BURST(priv->node, priv->bgx, priv->index));\n+\t\telse\n+\t\t\toct_csr_write(8192, BGX_GMP_GMI_TX_BURST(priv->node, priv->bgx, priv->index));\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\toct_csr_write(miscx, BGX_GMP_PCS_MISC_CTL(priv->node, priv->bgx, priv->index));\n+\toct_csr_write(prtx, BGX_GMP_GMI_PRT_CFG(priv->node, priv->bgx, priv->index));\n+\t/* This read verifies the write completed */\n+\tprtx = oct_csr_read(BGX_GMP_GMI_PRT_CFG(priv->node, priv->bgx, priv->index));\n+\n+\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\tdata |= (BIT(14) | BIT(13));\n+\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\n+\treturn 0;\n+}\n+\n+static int bgx_port_rgmii_set_link_speed(struct bgx_port_priv *priv, struct port_status status)\n+{\n+\tu64\tdata;\n+\tint\tspeed;\n+\tbool\tspeed_changed = false;\n+\tbool\tint_lpbk = false;\n+\tbool\tdo_credits;\n+\n+\tswitch (status.speed) {\n+\tcase 10:\n+\t\tspeed = 0;\n+\t\tbreak;\n+\tcase 100:\n+\t\tspeed = 1;\n+\t\tbreak;\n+\tcase 1000:\n+\tdefault:\n+\t\tspeed = 2;\n+\t\tbreak;\n+\t}\n+\n+\t/* Do credits if link came up */\n+\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\tdo_credits = status.link && !(data & BIT(63));\n+\n+\t/* Was there a speed change */\n+\tdata = oct_csr_read(XCV_CTL(priv->node));\n+\tif ((data & GENMASK_ULL(1, 0)) != speed)\n+\t\tspeed_changed = true;\n+\n+\t/* Clear clkrst when in internal loopback */\n+\tif (data & BIT(2)) {\n+\t\tint_lpbk = true;\n+\t\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\t\tdata &= ~BIT(15);\n+\t\toct_csr_write(data, XCV_RESET(priv->node));\n+\t}\n+\n+\t/* Link came up or there was a speed change */\n+\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\tif (status.link && (!(data & BIT(63)) || speed_changed)) {\n+\t\tdata |= BIT(63);\n+\t\toct_csr_write(data, XCV_RESET(priv->node));\n+\n+\t\tdata = oct_csr_read(XCV_CTL(priv->node));\n+\t\tdata &= ~GENMASK_ULL(1, 0);\n+\t\tdata |= speed;\n+\t\toct_csr_write(data, XCV_CTL(priv->node));\n+\n+\t\tdata = oct_csr_read(XCV_DLL_CTL(priv->node));\n+\t\tdata |= BIT(23);\n+\t\tdata &= ~GENMASK_ULL(22, 16);\n+\t\tdata &= ~BIT(15);\n+\t\toct_csr_write(data, XCV_DLL_CTL(priv->node));\n+\n+\t\tdata = oct_csr_read(XCV_DLL_CTL(priv->node));\n+\t\tdata &= ~GENMASK_ULL(1, 0);\n+\t\toct_csr_write(data, XCV_DLL_CTL(priv->node));\n+\n+\t\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\t\tdata &= ~BIT(11);\n+\t\toct_csr_write(data, XCV_RESET(priv->node));\n+\n+\t\tusleep_range(10, 100);\n+\n+\t\tdata = oct_csr_read(XCV_COMP_CTL(priv->node));\n+\t\tdata &= ~BIT(63);\n+\t\toct_csr_write(data, XCV_COMP_CTL(priv->node));\n+\n+\t\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\t\tdata |= BIT(7);\n+\t\toct_csr_write(data, XCV_RESET(priv->node));\n+\n+\t\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\t\tif (int_lpbk)\n+\t\t\tdata &= ~BIT(15);\n+\t\telse\n+\t\t\tdata |= BIT(15);\n+\t\toct_csr_write(data, XCV_RESET(priv->node));\n+\n+\t\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\t\tdata |= BIT(2) | BIT(0);\n+\t\toct_csr_write(data, XCV_RESET(priv->node));\n+\t}\n+\n+\tdata = oct_csr_read(XCV_RESET(priv->node));\n+\tif (status.link)\n+\t\tdata |= BIT(3) | BIT(1);\n+\telse\n+\t\tdata &= ~(BIT(3) | BIT(1));\n+\toct_csr_write(data, XCV_RESET(priv->node));\n+\n+\tif (!status.link) {\n+\t\tmdelay(10);\n+\t\toct_csr_write(0, XCV_RESET(priv->node));\n+\t}\n+\n+\t/* Grant pko tx credits */\n+\tif (do_credits) {\n+\t\tdata = oct_csr_read(XCV_BATCH_CRD_RET(priv->node));\n+\t\tdata |= BIT(0);\n+\t\toct_csr_write(data, XCV_BATCH_CRD_RET(priv->node));\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int bgx_port_set_xgmii_link(struct bgx_port_priv *priv,\n+\t\t\t\t struct port_status status)\n+{\n+\tu64\tdata;\n+\tint\trc = 0;\n+\n+\tif (status.link) {\n+\t\t/* Link up */\n+\t\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(15);\n+\t\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\n+\t\t/* BGX-22429 */\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && priv->index) {\n+\t\t\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, 0));\n+\t\t\tdata |= BIT(15);\n+\t\t\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, 0));\n+\t\t}\n+\n+\t\trc = bgx_port_xgmii_set_link_up(priv);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t\trc = bgx_port_sgmii_set_link_speed(priv, status);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t\tif (priv->mode == PORT_MODE_RGMII)\n+\t\t\trc = bgx_port_rgmii_set_link_speed(priv, status);\n+\t} else {\n+\t\t/* Link down */\n+\t\tif (priv->mode == PORT_MODE_RGMII) {\n+\t\t\tbgx_port_rgmii_set_link_down(priv);\n+\t\t\trc = bgx_port_sgmii_set_link_speed(priv, status);\n+\t\t\tif (rc)\n+\t\t\t\treturn rc;\n+\t\t\trc = bgx_port_rgmii_set_link_speed(priv, status);\n+\t\t} else {\n+\t\t\tbgx_port_sgmii_set_link_down(priv);\n+\t\t}\n+\t}\n+\n+\treturn rc;\n+}\n+\n+static struct port_status bgx_port_get_xaui_link(struct bgx_port_priv *priv)\n+{\n+\tstruct port_status\tstatus;\n+\tint\t\t\tspeed;\n+\tint\t\t\tlanes;\n+\tu64\t\t\tdata;\n+\n+\tstatus.link = 0;\n+\tstatus.duplex = DUPLEX_HALF;\n+\tstatus.speed = 0;\n+\n+\t/* Get the link state */\n+\tdata = oct_csr_read(BGX_SMU_TX_CTL(priv->node, priv->bgx, priv->index));\n+\tdata &= GENMASK_ULL(5, 4);\n+\tif (!data) {\n+\t\tdata = oct_csr_read(BGX_SMU_RX_CTL(priv->node, priv->bgx, priv->index));\n+\t\tdata &= GENMASK_ULL(1, 0);\n+\t\tif (!data) {\n+\t\t\tdata = oct_csr_read(BGX_SPU_STATUS1(priv->node, priv->bgx, priv->index));\n+\t\t\tif (data & BIT(2))\n+\t\t\t\tstatus.link = 1;\n+\t\t}\n+\t}\n+\n+\tif (status.link) {\n+\t\t/* Always full duplex */\n+\t\tstatus.duplex = DUPLEX_FULL;\n+\n+\t\t/* Speed */\n+\t\tspeed = bgx_port_get_qlm_speed(priv, priv->qlm);\n+\t\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\t\tswitch ((data >> 8) & 7) {\n+\t\tdefault:\n+\t\tcase 1:\n+\t\t\tspeed = (speed * 8 + 5) / 10;\n+\t\t\tlanes = 4;\n+\t\t\tbreak;\n+\t\tcase 2:\n+\t\t\tspeed = (speed * 8 + 5) / 10;\n+\t\t\tlanes = 2;\n+\t\t\tbreak;\n+\t\tcase 3:\n+\t\t\tspeed = (speed * 64 + 33) / 66;\n+\t\t\tlanes = 1;\n+\t\t\tbreak;\n+\t\tcase 4:\n+\t\t\tif (speed == 6250)\n+\t\t\t\tspeed = 6445;\n+\t\t\tspeed = (speed * 64 + 33) / 66;\n+\t\t\tlanes = 4;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tspeed *= lanes;\n+\t\tstatus.speed = speed;\n+\t}\n+\n+\treturn status;\n+}\n+\n+static int bgx_port_init_xaui_an(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {\n+\t\tdata = oct_csr_read(BGX_SPU_INT(priv->node, priv->bgx, priv->index));\n+\t\t/* If autonegotiation is no good */\n+\t\tif (!(data & BIT(11))) {\n+\t\t\tdata = BIT(12) | BIT(11) | BIT(10);\n+\t\t\toct_csr_write(data, BGX_SPU_INT(priv->node, priv->bgx, priv->index));\n+\n+\t\t\tdata = oct_csr_read(BGX_SPU_AN_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\t\tdata |= BIT(9);\n+\t\t\toct_csr_write(data, BGX_SPU_AN_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\t\treturn -1;\n+\t\t}\n+\t} else {\n+\t\tdata = oct_csr_read(BGX_SPU_AN_STATUS(priv->node, priv->bgx, priv->index));\n+\t\t/* If autonegotiation hasn't completed */\n+\t\tif (!(data & BIT(5))) {\n+\t\t\tdata = oct_csr_read(BGX_SPU_AN_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\t\tdata |= BIT(9);\n+\t\t\toct_csr_write(data, BGX_SPU_AN_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void bgx_port_xaui_start_training(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\n+\tdata = BIT(14) | BIT(13);\n+\toct_csr_write(data, BGX_SPU_INT(priv->node, priv->bgx, priv->index));\n+\n+\t/* BGX-20968 */\n+\toct_csr_write(0, BGX_SPU_BR_PMD_LP_CUP(priv->node, priv->bgx, priv->index));\n+\toct_csr_write(0, BGX_SPU_BR_PMD_LD_CUP(priv->node, priv->bgx, priv->index));\n+\toct_csr_write(0, BGX_SPU_BR_PMD_LD_REP(priv->node, priv->bgx, priv->index));\n+\tdata = oct_csr_read(BGX_SPU_AN_CONTROL(priv->node, priv->bgx, priv->index));\n+\tdata &= ~BIT(12);\n+\toct_csr_write(data, BGX_SPU_AN_CONTROL(priv->node, priv->bgx, priv->index));\n+\tudelay(1);\n+\n+\tdata = oct_csr_read(BGX_SPU_BR_PMD_CONTROL(priv->node, priv->bgx, priv->index));\n+\tdata |= BIT(1);\n+\toct_csr_write(data, BGX_SPU_BR_PMD_CONTROL(priv->node, priv->bgx, priv->index));\n+\tudelay(1);\n+\n+\tdata = oct_csr_read(BGX_SPU_BR_PMD_CONTROL(priv->node, priv->bgx, priv->index));\n+\tdata |= BIT(0);\n+\toct_csr_write(data, BGX_SPU_BR_PMD_CONTROL(priv->node, priv->bgx, priv->index));\n+}\n+\n+static int bgx_port_gser_27882(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\tu64\taddr;\n+\tint\ttimeout;\n+\n+\ttimeout = 200;\n+\tdo {\n+\t\tdata = oct_csr_read(GSER_RX_EIE_DETSTS(priv->node, priv->qlm));\n+\t\tif (data & (1 << (priv->index + 8)))\n+\t\t\tbreak;\n+\t\ttimeout--;\n+\t\tudelay(1);\n+\t} while (timeout);\n+\tif (!timeout)\n+\t\treturn -1;\n+\n+\taddr = GSER_LANE_PCS_CTLIFC_0(priv->node, priv->qlm, priv->index);\n+\tdata = oct_csr_read(addr);\n+\tdata |= BIT(12);\n+\toct_csr_write(data, addr);\n+\n+\taddr = GSER_LANE_PCS_CTLIFC_2(priv->node, priv->qlm, priv->index);\n+\tdata = oct_csr_read(addr);\n+\tdata |= BIT(7);\n+\toct_csr_write(data, addr);\n+\n+\tdata = oct_csr_read(addr);\n+\tdata |= BIT(15);\n+\toct_csr_write(data, addr);\n+\n+\tdata = oct_csr_read(addr);\n+\tdata &= ~BIT(7);\n+\toct_csr_write(data, addr);\n+\n+\tdata = oct_csr_read(addr);\n+\tdata |= BIT(15);\n+\toct_csr_write(data, addr);\n+\n+\treturn 0;\n+}\n+\n+static void bgx_port_xaui_restart_training(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\n+\tdata = BIT(14) | BIT(13);\n+\toct_csr_write(data, BGX_SPU_INT(priv->node, priv->bgx, priv->index));\n+\tusleep_range(1700, 2000);\n+\n+\t/* BGX-20968 */\n+\toct_csr_write(0, BGX_SPU_BR_PMD_LP_CUP(priv->node, priv->bgx, priv->index));\n+\toct_csr_write(0, BGX_SPU_BR_PMD_LD_CUP(priv->node, priv->bgx, priv->index));\n+\toct_csr_write(0, BGX_SPU_BR_PMD_LD_REP(priv->node, priv->bgx, priv->index));\n+\n+\t/* Restart training */\n+\tdata = oct_csr_read(BGX_SPU_BR_PMD_CONTROL(priv->node, priv->bgx, priv->index));\n+\tdata |= BIT(0);\n+\toct_csr_write(data, BGX_SPU_BR_PMD_CONTROL(priv->node, priv->bgx, priv->index));\n+}\n+\n+static int bgx_port_get_max_qlm_lanes(int qlm)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX))\n+\t\treturn (qlm < 4) ? 4 : 2;\n+\telse if (OCTEON_IS_MODEL(OCTEON_CNF75XX))\n+\t\treturn 2;\n+\treturn 4;\n+}\n+\n+static int bgx_port_qlm_rx_equalization(struct bgx_port_priv *priv, int qlm, int lane)\n+{\n+\tu64\tdata;\n+\tu64\taddr;\n+\tu64\tlmode;\n+\tint\tmax_lanes = bgx_port_get_max_qlm_lanes(qlm);\n+\tint\tlane_mask = lane == -1 ? ((1 << max_lanes) - 1) : (1 << lane);\n+\tint\ttimeout;\n+\tint\ti;\n+\tint\trc = 0;\n+\n+\t/* Nothing to do for qlms in reset */\n+\tdata = oct_csr_read(GSER_PHY_CTL(priv->node, qlm));\n+\tif (data & (BIT(0) | BIT(1)))\n+\t\treturn -1;\n+\n+\tfor (i = 0; i < max_lanes; i++) {\n+\t\tif (!(i & lane_mask))\n+\t\t\tcontinue;\n+\n+\t\taddr = GSER_LANE_LBERT_CFG(priv->node, qlm, i);\n+\t\tdata = oct_csr_read(addr);\n+\t\t/* Rx equalization can't be completed while pattern matcher is\n+\t\t * enabled because it causes errors.\n+\t\t */\n+\t\tif (data & BIT(6))\n+\t\t\treturn -1;\n+\t}\n+\n+\tlmode = oct_csr_read(GSER_LANE_MODE(priv->node, qlm));\n+\tlmode &= 0xf;\n+\taddr = GSER_LANE_P_MODE_1(priv->node, qlm, lmode);\n+\tdata = oct_csr_read(addr);\n+\t/* Don't complete rx equalization if in VMA manual mode */\n+\tif (data & BIT(14))\n+\t\treturn 0;\n+\n+\t/* Apply rx equalization for speed > 6250 */\n+\tif (bgx_port_get_qlm_speed(priv, qlm) < 6250)\n+\t\treturn 0;\n+\n+\t/* Wait until rx data is valid (CDRLOCK) */\n+\ttimeout = 500;\n+\taddr = GSER_RX_EIE_DETSTS(priv->node, qlm);\n+\tdo {\n+\t\tdata = oct_csr_read(addr);\n+\t\tdata >>= 8;\n+\t\tdata &= lane_mask;\n+\t\tif (data == lane_mask)\n+\t\t\tbreak;\n+\t\ttimeout--;\n+\t\tudelay(1);\n+\t} while (timeout);\n+\tif (!timeout) {\n+\t\tpr_debug(\"QLM%d:%d: CDRLOCK timeout\\n\", qlm, priv->node);\n+\t\treturn -1;\n+\t}\n+\n+\tbgx_port_gser_20075(priv, qlm, lane);\n+\n+\tfor (i = 0; i < max_lanes; i++) {\n+\t\tif (!(i & lane_mask))\n+\t\t\tcontinue;\n+\t\t/* Skip lane 3 on 78p1.x due to gser-20075. Handled above */\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && i == 3)\n+\t\t\tcontinue;\n+\n+\t\t/* Enable software control */\n+\t\taddr = GSER_BR_RX_CTL(priv->node, qlm, i);\n+\t\tdata = oct_csr_read(addr);\n+\t\tdata |= BIT(2);\n+\t\toct_csr_write(data, addr);\n+\n+\t\t/* Clear the completion flag */\n+\t\taddr = GSER_BR_RX_EER(priv->node, qlm, i);\n+\t\tdata = oct_csr_read(addr);\n+\t\tdata &= ~BIT(14);\n+\t\tdata |= BIT(15);\n+\t\toct_csr_write(data, addr);\n+\t}\n+\n+\t/* Wait for rx equalization to complete */\n+\tfor (i = 0; i < max_lanes; i++) {\n+\t\tif (!(i & lane_mask))\n+\t\t\tcontinue;\n+\n+\t\ttimeout = 250000;\n+\t\taddr = GSER_BR_RX_EER(priv->node, qlm, i);\n+\t\tdo {\n+\t\t\tdata = oct_csr_read(addr);\n+\t\t\tif (data & BIT(14))\n+\t\t\t\tbreak;\n+\t\t\ttimeout--;\n+\t\t\tudelay(1);\n+\t\t} while (timeout);\n+\t\tif (!timeout) {\n+\t\t\tpr_debug(\"QLM%d:%d: RXT_ESV timeout\\n\",\n+\t\t\t\t qlm, priv->node);\n+\t\t\trc = -1;\n+\t\t}\n+\n+\t\t/* Switch back to hardware control */\n+\t\taddr = GSER_BR_RX_CTL(priv->node, qlm, i);\n+\t\tdata = oct_csr_read(addr);\n+\t\tdata &= ~BIT(2);\n+\t\toct_csr_write(data, addr);\n+\t}\n+\n+\treturn rc;\n+}\n+\n+static int bgx_port_xaui_equalization(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\tint\tlane;\n+\n+\t/* Nothing to do for loopback mode */\n+\tdata = oct_csr_read(BGX_SPU_CONTROL1(priv->node, priv->bgx,\n+\t\t\t\t\t priv->index));\n+\tif (data & BIT(14))\n+\t\treturn 0;\n+\n+\tif (priv->mode == PORT_MODE_XAUI || priv->mode == PORT_MODE_XLAUI) {\n+\t\tif (bgx_port_qlm_rx_equalization(priv, priv->qlm, -1))\n+\t\t\treturn -1;\n+\n+\t\t/* BGX2 of 73xx uses 2 dlms */\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN73XX) && priv->bgx == 2) {\n+\t\t\tif (bgx_port_qlm_rx_equalization(priv, priv->qlm + 1, -1))\n+\t\t\t\treturn -1;\n+\t\t}\n+\t} else if (priv->mode == PORT_MODE_RXAUI) {\n+\t\t/* Rxaui always uses 2 lanes */\n+\t\tif (bgx_port_qlm_rx_equalization(priv, priv->qlm, -1))\n+\t\t\treturn -1;\n+\t} else if (priv->mode == PORT_MODE_XFI) {\n+\t\tlane = priv->index;\n+\t\tif ((OCTEON_IS_MODEL(OCTEON_CN73XX) && priv->qlm == 6) ||\n+\t\t (OCTEON_IS_MODEL(OCTEON_CNF75XX) && priv->qlm == 5))\n+\t\t\tlane -= 2;\n+\n+\t\tif (bgx_port_qlm_rx_equalization(priv, priv->qlm, lane))\n+\t\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int bgx_port_init_xaui_link(struct bgx_port_priv *priv)\n+{\n+\tu64\tdata;\n+\tint\tuse_training = 0;\n+\tint\tuse_ber = 0;\n+\tint\ttimeout;\n+\tint\trc = 0;\n+\n+\tif (priv->mode == PORT_MODE_10G_KR || priv->mode == PORT_MODE_40G_KR4)\n+\t\tuse_training = 1;\n+\n+\tif (!octeon_is_simulation() &&\n+\t (priv->mode == PORT_MODE_XFI || priv->mode == PORT_MODE_XLAUI ||\n+\t priv->mode == PORT_MODE_10G_KR || priv->mode == PORT_MODE_40G_KR4))\n+\t\tuse_ber = 1;\n+\n+\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\tdata &= ~(BIT(14) | BIT(13));\n+\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\n+\tdata = oct_csr_read(BGX_SPU_MISC_CONTROL(priv->node, priv->bgx, priv->index));\n+\tdata |= BIT(12);\n+\toct_csr_write(data, BGX_SPU_MISC_CONTROL(priv->node, priv->bgx, priv->index));\n+\n+\tif (!octeon_is_simulation()) {\n+\t\tdata = oct_csr_read(BGX_SPU_AN_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\t/* Restart autonegotiation */\n+\t\tif (data & BIT(12)) {\n+\t\t\trc = bgx_port_init_xaui_an(priv);\n+\t\t\tif (rc)\n+\t\t\t\treturn rc;\n+\t\t}\n+\n+\t\tif (use_training) {\n+\t\t\tdata = oct_csr_read(BGX_SPU_BR_PMD_CONTROL(priv->node, priv->bgx, priv->index));\n+\t\t\t/* Check if training is enabled */\n+\t\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&\n+\t\t\t !(data & BIT(1))) {\n+\t\t\t\tbgx_port_xaui_start_training(priv);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\n+\t\t\tif (OCTEON_IS_MODEL(OCTEON_CN73XX) ||\n+\t\t\t OCTEON_IS_MODEL(OCTEON_CNF75XX) ||\n+\t\t\t OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\t\t\tbgx_port_gser_27882(priv);\n+\n+\t\t\tdata = oct_csr_read(BGX_SPU_INT(priv->node, priv->bgx, priv->index));\n+\n+\t\t\t/* Restart training if it failed */\n+\t\t\tif ((data & BIT(14)) &&\n+\t\t\t !OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {\n+\t\t\t\tbgx_port_xaui_restart_training(priv);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\n+\t\t\tif (!(data & BIT(13))) {\n+\t\t\t\tpr_debug(\"Waiting for link training\\n\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tbgx_port_xaui_equalization(priv);\n+\t\t}\n+\n+\t\t/* Wait until the reset is complete */\n+\t\ttimeout = 10000;\n+\t\tdo {\n+\t\t\tdata = oct_csr_read(BGX_SPU_CONTROL1(priv->node, priv->bgx, priv->index));\n+\t\t\tif (!(data & BIT(15)))\n+\t\t\t\tbreak;\n+\t\t\ttimeout--;\n+\t\t\tudelay(1);\n+\t\t} while (timeout);\n+\t\tif (!timeout) {\n+\t\t\tpr_debug(\"BGX%d:%d:%d: Reset timeout\\n\", priv->bgx,\n+\t\t\t\t priv->index, priv->node);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tif (use_ber) {\n+\t\t\ttimeout = 10000;\n+\t\t\tdo {\n+\t\t\t\tdata =\n+\t\t\t\toct_csr_read(BGX_SPU_BR_STATUS1(priv->node, priv->bgx, priv->index));\n+\t\t\t\tif (data & BIT(0))\n+\t\t\t\t\tbreak;\n+\t\t\t\ttimeout--;\n+\t\t\t\tudelay(1);\n+\t\t\t} while (timeout);\n+\t\t\tif (!timeout) {\n+\t\t\t\tpr_debug(\"BGX%d:%d:%d: BLK_LOCK timeout\\n\",\n+\t\t\t\t\t priv->bgx, priv->index, priv->node);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t} else {\n+\t\t\ttimeout = 10000;\n+\t\t\tdo {\n+\t\t\t\tdata =\n+\t\t\t\toct_csr_read(BGX_SPU_BX_STATUS(priv->node, priv->bgx, priv->index));\n+\t\t\t\tif (data & BIT(12))\n+\t\t\t\t\tbreak;\n+\t\t\t\ttimeout--;\n+\t\t\t\tudelay(1);\n+\t\t\t} while (timeout);\n+\t\t\tif (!timeout) {\n+\t\t\t\tpr_debug(\"BGX%d:%d:%d: Lanes align timeout\\n\",\n+\t\t\t\t\t priv->bgx, priv->index, priv->node);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (use_ber) {\n+\t\t\tdata = oct_csr_read(BGX_SPU_BR_STATUS2(priv->node, priv->bgx, priv->index));\n+\t\t\tdata |= BIT(15);\n+\t\t\toct_csr_write(data, BGX_SPU_BR_STATUS2(priv->node, priv->bgx, priv->index));\n+\t\t}\n+\n+\t\tdata = oct_csr_read(BGX_SPU_STATUS2(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(10);\n+\t\toct_csr_write(data, BGX_SPU_STATUS2(priv->node, priv->bgx, priv->index));\n+\n+\t\tdata = oct_csr_read(BGX_SPU_STATUS2(priv->node, priv->bgx, priv->index));\n+\t\tif (data & BIT(10)) {\n+\t\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&\n+\t\t\t use_training)\n+\t\t\t\tbgx_port_xaui_restart_training(priv);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* Wait for mac rx to be ready */\n+\t\ttimeout = 10000;\n+\t\tdo {\n+\t\t\tdata = oct_csr_read(BGX_SMU_RX_CTL(priv->node, priv->bgx, priv->index));\n+\t\t\tdata &= GENMASK_ULL(1, 0);\n+\t\t\tif (!data)\n+\t\t\t\tbreak;\n+\t\t\ttimeout--;\n+\t\t\tudelay(1);\n+\t\t} while (timeout);\n+\t\tif (!timeout) {\n+\t\t\tpr_debug(\"BGX%d:%d:%d: mac ready timeout\\n\",\n+\t\t\t\t priv->bgx, priv->index, priv->node);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* Wait for bgx rx to be idle */\n+\t\ttimeout = 10000;\n+\t\tdo {\n+\t\t\tdata = oct_csr_read(BGX_SMU_CTRL(priv->node, priv->bgx, priv->index));\n+\t\t\tif (data & BIT(0))\n+\t\t\t\tbreak;\n+\t\t\ttimeout--;\n+\t\t\tudelay(1);\n+\t\t} while (timeout);\n+\t\tif (!timeout) {\n+\t\t\tpr_debug(\"BGX%d:%d:%d: rx idle timeout\\n\",\n+\t\t\t\t priv->bgx, priv->index, priv->node);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* Wait for gmx tx to be idle */\n+\t\ttimeout = 10000;\n+\t\tdo {\n+\t\t\tdata = oct_csr_read(BGX_SMU_CTRL(priv->node, priv->bgx, priv->index));\n+\t\t\tif (data & BIT(1))\n+\t\t\t\tbreak;\n+\t\t\ttimeout--;\n+\t\t\tudelay(1);\n+\t\t} while (timeout);\n+\t\tif (!timeout) {\n+\t\t\tpr_debug(\"BGX%d:%d:%d: tx idle timeout\\n\",\n+\t\t\t\t priv->bgx, priv->index, priv->node);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* Check rcvflt is still be 0 */\n+\t\tdata = oct_csr_read(BGX_SPU_STATUS2(priv->node, priv->bgx, priv->index));\n+\t\tif (data & BIT(10)) {\n+\t\t\tpr_debug(\"BGX%d:%d:%d: receive fault\\n\",\n+\t\t\t\t priv->bgx, priv->index, priv->node);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* Receive link is latching low. Force it high and verify it */\n+\t\tdata = oct_csr_read(BGX_SPU_STATUS1(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(2);\n+\t\toct_csr_write(data, BGX_SPU_STATUS1(priv->node, priv->bgx, priv->index));\n+\t\ttimeout = 10000;\n+\t\tdo {\n+\t\t\tdata = oct_csr_read(BGX_SPU_STATUS1(priv->node, priv->bgx, priv->index));\n+\t\t\tif (data & BIT(2))\n+\t\t\t\tbreak;\n+\t\t\ttimeout--;\n+\t\t\tudelay(1);\n+\t\t} while (timeout);\n+\t\tif (!timeout) {\n+\t\t\tpr_debug(\"BGX%d:%d:%d: rx link down\\n\",\n+\t\t\t\t priv->bgx, priv->index, priv->node);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tif (use_ber) {\n+\t\t/* Read error counters to clear */\n+\t\tdata = oct_csr_read(BGX_SPU_BR_BIP_ERR_CNT(priv->node, priv->bgx, priv->index));\n+\t\tdata = oct_csr_read(BGX_SPU_BR_STATUS2(priv->node, priv->bgx, priv->index));\n+\n+\t\t/* Verify latch lock is set */\n+\t\tif (!(data & BIT(15))) {\n+\t\t\tpr_debug(\"BGX%d:%d:%d: latch lock lost\\n\",\n+\t\t\t\t priv->bgx, priv->index, priv->node);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* LATCHED_BER is cleared by writing 1 to it */\n+\t\tif (data & BIT(14))\n+\t\t\toct_csr_write(data, BGX_SPU_BR_STATUS2(priv->node, priv->bgx, priv->index));\n+\n+\t\tusleep_range(1500, 2000);\n+\t\tdata = oct_csr_read(BGX_SPU_BR_STATUS2(priv->node, priv->bgx, priv->index));\n+\t\tif (data & BIT(14)) {\n+\t\t\tpr_debug(\"BGX%d:%d:%d: BER test failed\\n\",\n+\t\t\t\t priv->bgx, priv->index, priv->node);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\t/* Enable packet transmit and receive */\n+\tdata = oct_csr_read(BGX_SPU_MISC_CONTROL(priv->node, priv->bgx, priv->index));\n+\tdata &= ~BIT(12);\n+\toct_csr_write(data, BGX_SPU_MISC_CONTROL(priv->node, priv->bgx, priv->index));\n+\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\tdata |= BIT(14) | BIT(13);\n+\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\n+\treturn 0;\n+}\n+\n+static int bgx_port_set_xaui_link(struct bgx_port_priv *priv,\n+\t\t\t\t struct port_status status)\n+{\n+\tu64\tdata;\n+\tbool\tsmu_tx_ok = false;\n+\tbool\tsmu_rx_ok = false;\n+\tbool\tspu_link_ok = false;\n+\tint\trc = 0;\n+\n+\t/* Initialize hardware if link is up but hardware is not happy */\n+\tif (status.link) {\n+\t\tdata = oct_csr_read(BGX_SMU_TX_CTL(priv->node, priv->bgx, priv->index));\n+\t\tdata &= GENMASK_ULL(5, 4);\n+\t\tsmu_tx_ok = data == 0;\n+\n+\t\tdata = oct_csr_read(BGX_SMU_RX_CTL(priv->node, priv->bgx, priv->index));\n+\t\tdata &= GENMASK_ULL(1, 0);\n+\t\tsmu_rx_ok = data == 0;\n+\n+\t\tdata = oct_csr_read(BGX_SPU_STATUS1(priv->node, priv->bgx, priv->index));\n+\t\tdata &= BIT(2);\n+\t\tspu_link_ok = data == BIT(2);\n+\n+\t\tif (!smu_tx_ok || !smu_rx_ok || !spu_link_ok)\n+\t\t\trc = bgx_port_init_xaui_link(priv);\n+\t}\n+\n+\treturn rc;\n+}\n+\n+static struct bgx_port_priv *bgx_port_netdev2priv(struct net_device *netdev)\n+{\n+\tstruct bgx_port_netdev_priv *nd_priv = netdev_priv(netdev);\n+\n+\treturn nd_priv->bgx_priv;\n+}\n+\n+void bgx_port_set_netdev(struct device *dev, struct net_device *netdev)\n+{\n+\tstruct bgx_port_priv *priv = dev_get_drvdata(dev);\n+\n+\tif (netdev) {\n+\t\tstruct bgx_port_netdev_priv *nd_priv = netdev_priv(netdev);\n+\n+\t\tnd_priv->bgx_priv = priv;\n+\t}\n+\n+\tpriv->netdev = netdev;\n+}\n+EXPORT_SYMBOL(bgx_port_set_netdev);\n+\n+int bgx_port_ethtool_get_link_ksettings(struct net_device *netdev,\n+\t\t\t\t\tstruct ethtool_link_ksettings *cmd)\n+{\n+\tstruct bgx_port_priv\t*priv = bgx_port_netdev2priv(netdev);\n+\n+\tif (priv->phydev) {\n+\t\tphy_ethtool_ksettings_get(priv->phydev, cmd);\n+\t\treturn 0;\n+\t}\n+\treturn -EINVAL;\n+}\n+EXPORT_SYMBOL(bgx_port_ethtool_get_link_ksettings);\n+\n+int bgx_port_ethtool_set_settings(struct net_device\t*netdev,\n+\t\t\t\t struct ethtool_cmd\t*cmd)\n+{\n+\tstruct bgx_port_priv *p = bgx_port_netdev2priv(netdev);\n+\n+\tif (!capable(CAP_NET_ADMIN))\n+\t\treturn -EPERM;\n+\n+\tif (p->phydev)\n+\t\treturn phy_ethtool_sset(p->phydev, cmd);\n+\n+\treturn -EOPNOTSUPP;\n+}\n+EXPORT_SYMBOL(bgx_port_ethtool_set_settings);\n+\n+int bgx_port_ethtool_nway_reset(struct net_device *netdev)\n+{\n+\tstruct bgx_port_priv *p = bgx_port_netdev2priv(netdev);\n+\n+\tif (!capable(CAP_NET_ADMIN))\n+\t\treturn -EPERM;\n+\n+\tif (p->phydev)\n+\t\treturn phy_start_aneg(p->phydev);\n+\n+\treturn -EOPNOTSUPP;\n+}\n+EXPORT_SYMBOL(bgx_port_ethtool_nway_reset);\n+\n+const u8 *bgx_port_get_mac(struct net_device *netdev)\n+{\n+\tstruct bgx_port_priv *priv = bgx_port_netdev2priv(netdev);\n+\n+\treturn priv->mac_addr;\n+}\n+EXPORT_SYMBOL(bgx_port_get_mac);\n+\n+int bgx_port_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)\n+{\n+\tstruct bgx_port_priv *p = bgx_port_netdev2priv(netdev);\n+\n+\tif (p->phydev)\n+\t\treturn phy_mii_ioctl(p->phydev, ifr, cmd);\n+\treturn -EOPNOTSUPP;\n+}\n+EXPORT_SYMBOL(bgx_port_do_ioctl);\n+\n+static void bgx_port_write_cam(struct bgx_port_priv\t*priv,\n+\t\t\t int\t\t\tcam,\n+\t\t\t const u8\t\t\t*mac)\n+{\n+\tu64\tm = 0;\n+\tint\ti;\n+\n+\tif (mac) {\n+\t\tfor (i = 0; i < 6; i++)\n+\t\t\tm |= (((u64)mac[i]) << ((5 - i) * 8));\n+\t\tm |= BIT(48);\n+\t}\n+\n+\tm |= (u64)priv->index << 52;\n+\toct_csr_write(m, BGX_CMR_RX_ADRX_CAM(priv->node, priv->bgx, priv->index * 8 + cam));\n+}\n+\n+/* Set MAC address for the net_device that is attached. */\n+void bgx_port_set_rx_filtering(struct net_device *netdev)\n+{\n+\tu64\tdata;\n+\tstruct bgx_port_priv *priv = bgx_port_netdev2priv(netdev);\n+\tint available_cam_entries, current_cam_entry;\n+\tstruct netdev_hw_addr *ha;\n+\n+\tavailable_cam_entries = 8;\n+\tdata = 0;\n+\tdata |= BIT(0); /* Accept all Broadcast*/\n+\n+\tif ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {\n+\t\tdata &= ~BIT(3); /* Reject CAM match */\n+\t\tavailable_cam_entries = 0;\n+\t} else {\n+\t\t/* One CAM entry for the primary address, leaves seven\n+\t\t * for the secondary addresses.\n+\t\t */\n+\t\tdata |= BIT(3); /* Accept CAM match */\n+\t\tavailable_cam_entries = 7 - netdev->uc.count;\n+\t}\n+\n+\tif (netdev->flags & IFF_PROMISC) {\n+\t\tdata |= 1 << 1; /* Accept all Multicast */\n+\t} else {\n+\t\tif (netdev->flags & IFF_MULTICAST) {\n+\t\t\tif ((netdev->flags & IFF_ALLMULTI) ||\n+\t\t\t netdev_mc_count(netdev) > available_cam_entries)\n+\t\t\t\tdata |= 1 << 1; /* Accept all Multicast */\n+\t\t\telse\n+\t\t\t\tdata |= 2 << 1; /* Accept all Mcast via CAM */\n+\t\t}\n+\t}\n+\tcurrent_cam_entry = 0;\n+\tif (data & BIT(3)) {\n+\t\tbgx_port_write_cam(priv, current_cam_entry, netdev->dev_addr);\n+\t\tcurrent_cam_entry++;\n+\t\tnetdev_for_each_uc_addr(ha, netdev) {\n+\t\t\tbgx_port_write_cam(priv, current_cam_entry, ha->addr);\n+\t\t\tcurrent_cam_entry++;\n+\t\t}\n+\t}\n+\tif (((data & GENMASK_ULL(2, 1)) >> 1) == 2) {\n+\t\t/* Accept all Multicast via CAM */\n+\t\tnetdev_for_each_mc_addr(ha, netdev) {\n+\t\t\tbgx_port_write_cam(priv, current_cam_entry, ha->addr);\n+\t\t\tcurrent_cam_entry++;\n+\t\t}\n+\t}\n+\twhile (current_cam_entry < 8) {\n+\t\tbgx_port_write_cam(priv, current_cam_entry, NULL);\n+\t\tcurrent_cam_entry++;\n+\t}\n+\toct_csr_write(data, BGX_CMR_RX_ADR_CTL(priv->node, priv->bgx,\n+\t\t\t\t\t priv->index));\n+}\n+EXPORT_SYMBOL(bgx_port_set_rx_filtering);\n+\n+static void bgx_port_adjust_link(struct net_device *netdev)\n+{\n+\tstruct bgx_port_priv\t*priv = bgx_port_netdev2priv(netdev);\n+\tint\t\t\tlink_changed = 0;\n+\tunsigned int\t\tlink;\n+\tunsigned int\t\tspeed;\n+\tunsigned int\t\tduplex;\n+\n+\tmutex_lock(&priv->lock);\n+\n+\tif (!priv->phydev->link && priv->last_status.link)\n+\t\tlink_changed = -1;\n+\n+\tif (priv->phydev->link &&\n+\t (priv->last_status.link != priv->phydev->link ||\n+\t priv->last_status.duplex != priv->phydev->duplex ||\n+\t priv->last_status.speed != priv->phydev->speed))\n+\t\tlink_changed = 1;\n+\n+\tlink = priv->phydev->link;\n+\tpriv->last_status.link = priv->phydev->link;\n+\n+\tspeed = priv->phydev->speed;\n+\tpriv->last_status.speed = priv->phydev->speed;\n+\n+\tduplex = priv->phydev->duplex;\n+\tpriv->last_status.duplex = priv->phydev->duplex;\n+\n+\tmutex_unlock(&priv->lock);\n+\n+\tif (link_changed != 0) {\n+\t\tstruct port_status status;\n+\n+\t\tif (link_changed > 0) {\n+\t\t\tnetdev_info(netdev, \"Link is up - %d/%s\\n\",\n+\t\t\t\t priv->phydev->speed,\n+\t\t\t\t priv->phydev->duplex == DUPLEX_FULL ?\n+\t\t\t\t \"Full\" : \"Half\");\n+\t\t} else {\n+\t\t\tnetdev_info(netdev, \"Link is down\\n\");\n+\t\t}\n+\t\tstatus.link = link ? 1 : 0;\n+\t\tstatus.duplex = duplex;\n+\t\tstatus.speed = speed;\n+\t\tif (!link) {\n+\t\t\tnetif_carrier_off(netdev);\n+\t\t\t /* Let TX drain. FIXME check that it is drained. */\n+\t\t\tmdelay(50);\n+\t\t}\n+\t\tpriv->set_link(priv, status);\n+\t\tif (link)\n+\t\t\tnetif_carrier_on(netdev);\n+\t}\n+}\n+\n+static void bgx_port_check_state(struct work_struct *work)\n+{\n+\tstruct bgx_port_priv\t*priv;\n+\tstruct port_status\tstatus;\n+\n+\tpriv = container_of(work, struct bgx_port_priv, dwork.work);\n+\n+\tstatus = priv->get_link(priv);\n+\n+\tif (!status.link &&\n+\t priv->mode != PORT_MODE_SGMII && priv->mode != PORT_MODE_RGMII)\n+\t\tbgx_port_init_xaui_link(priv);\n+\n+\tif (priv->last_status.link != status.link) {\n+\t\tpriv->last_status.link = status.link;\n+\t\tif (status.link)\n+\t\t\tnetdev_info(priv->netdev, \"Link is up - %d/%s\\n\",\n+\t\t\t\t status.speed,\n+\t\t\t\t status.duplex == DUPLEX_FULL ? \"Full\" : \"Half\");\n+\t\telse\n+\t\t\tnetdev_info(priv->netdev, \"Link is down\\n\");\n+\t}\n+\n+\tmutex_lock(&priv->lock);\n+\tif (priv->work_queued)\n+\t\tqueue_delayed_work(check_state_wq, &priv->dwork, HZ);\n+\tmutex_unlock(&priv->lock);\n+}\n+\n+int bgx_port_enable(struct net_device *netdev)\n+{\n+\tstruct bgx_port_priv\t*priv = bgx_port_netdev2priv(netdev);\n+\tu64\t\t\tdata;\n+\tstruct port_status\tstatus;\n+\tbool\t\t\tdont_use_phy;\n+\n+\tif (priv->mode == PORT_MODE_SGMII || priv->mode == PORT_MODE_RGMII) {\n+\t\t/* 1G */\n+\t\tdata = oct_csr_read(BGX_GMP_GMI_TX_APPEND(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(2) | BIT(1);\n+\t\toct_csr_write(data, BGX_GMP_GMI_TX_APPEND(priv->node, priv->bgx, priv->index));\n+\n+\t\t/* Packets are padded (without FCS) to MIN_SIZE + 1 in SGMII */\n+\t\tdata = 60 - 1;\n+\t\toct_csr_write(data, BGX_GMP_GMI_TX_MIN_PKT(priv->node, priv->bgx, priv->index));\n+\t} else {\n+\t\t/* 10G or higher */\n+\t\tdata = oct_csr_read(BGX_SMU_TX_APPEND(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(2) | BIT(1);\n+\t\toct_csr_write(data, BGX_SMU_TX_APPEND(priv->node, priv->bgx, priv->index));\n+\n+\t\t/* Packets are padded(with FCS) to MIN_SIZE in non-SGMII */\n+\t\tdata = 60 + 4;\n+\t\toct_csr_write(data, BGX_SMU_TX_MIN_PKT(priv->node, priv->bgx, priv->index));\n+\t}\n+\n+\tswitch (priv->mode) {\n+\tcase PORT_MODE_XLAUI:\n+\tcase PORT_MODE_XFI:\n+\tcase PORT_MODE_10G_KR:\n+\tcase PORT_MODE_40G_KR4:\n+\t\tdont_use_phy = true;\n+\t\tbreak;\n+\tdefault:\n+\t\tdont_use_phy = false;\n+\t\tbreak;\n+\t}\n+\n+\tif (!priv->phy_np || dont_use_phy) {\n+\t\tstatus = priv->get_link(priv);\n+\t\tpriv->set_link(priv, status);\n+\t\tnetif_carrier_on(netdev);\n+\n+\t\tmutex_lock(&check_state_wq_mutex);\n+\t\tif (!check_state_wq) {\n+\t\t\tcheck_state_wq =\n+\t\t\t\talloc_workqueue(\"check_state_wq\", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);\n+\t\t}\n+\t\tmutex_unlock(&check_state_wq_mutex);\n+\t\tif (!check_state_wq)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tmutex_lock(&priv->lock);\n+\t\tINIT_DELAYED_WORK(&priv->dwork, bgx_port_check_state);\n+\t\tqueue_delayed_work(check_state_wq, &priv->dwork, 0);\n+\t\tpriv->work_queued = true;\n+\t\tmutex_unlock(&priv->lock);\n+\n+\t\tnetdev_info(priv->netdev, \"Link is not ready\\n\");\n+\n+\t} else {\n+\t\tpriv->phydev = of_phy_connect(netdev, priv->phy_np,\n+\t\t\t\t\t bgx_port_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);\n+\t\tif (!priv->phydev)\n+\t\t\treturn -ENODEV;\n+\n+\t\tnetif_carrier_off(netdev);\n+\n+\t\tif (priv->phydev)\n+\t\t\tphy_start_aneg(priv->phydev);\n+\t}\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(bgx_port_enable);\n+\n+int bgx_port_disable(struct net_device *netdev)\n+{\n+\tstruct bgx_port_priv\t*priv = bgx_port_netdev2priv(netdev);\n+\tstruct port_status\tstatus;\n+\n+\tif (priv->phydev) {\n+\t\tphy_stop(priv->phydev);\n+\t\tphy_disconnect(priv->phydev);\n+\t}\n+\tpriv->phydev = NULL;\n+\n+\tnetif_carrier_off(netdev);\n+\tmemset(&status, 0, sizeof(status));\n+\tpriv->last_status.link = 0;\n+\tpriv->set_link(priv, status);\n+\n+\tmutex_lock(&priv->lock);\n+\tif (priv->work_queued) {\n+\t\tcancel_delayed_work_sync(&priv->dwork);\n+\t\tpriv->work_queued = false;\n+\t}\n+\tmutex_unlock(&priv->lock);\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(bgx_port_disable);\n+\n+int bgx_port_change_mtu(struct net_device *netdev, int new_mtu)\n+{\n+\tstruct bgx_port_priv *priv = bgx_port_netdev2priv(netdev);\n+\tint max_frame;\n+\n+\tif (new_mtu < 60 || new_mtu > 65392) {\n+\t\tnetdev_warn(netdev, \"Maximum MTU supported is 65392\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tnetdev->mtu = new_mtu;\n+\n+\tmax_frame = round_up(new_mtu + ETH_HLEN + ETH_FCS_LEN, 8);\n+\n+\tif (priv->mode == PORT_MODE_SGMII || priv->mode == PORT_MODE_RGMII) {\n+\t\t/* 1G */\n+\t\toct_csr_write(max_frame, BGX_GMP_GMI_RX_JABBER(priv->node, priv->bgx, priv->index));\n+\t} else {\n+\t\t/* 10G or higher */\n+\t\toct_csr_write(max_frame, BGX_SMU_RX_JABBER(priv->node, priv->bgx, priv->index));\n+\t}\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(bgx_port_change_mtu);\n+\n+void bgx_port_mix_assert_reset(struct net_device *netdev, int mix, bool v)\n+{\n+\tstruct bgx_port_priv *priv = bgx_port_netdev2priv(netdev);\n+\tu64 mask = 1ull << (3 + (mix & 1));\n+\tu64 data;\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && v) {\n+\t\t/* Need to disable the mix before resetting the bgx-mix\n+\t\t * interface as not doing so confuses the other already up\n+\t\t * lmacs.\n+\t\t */\n+\t\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\t\tdata &= ~BIT(11);\n+\t\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\t}\n+\n+\tdata = oct_csr_read(BGX_CMR_GLOBAL_CONFIG(priv->node, priv->bgx));\n+\tif (v)\n+\t\tdata |= mask;\n+\telse\n+\t\tdata &= ~mask;\n+\toct_csr_write(data, BGX_CMR_GLOBAL_CONFIG(priv->node, priv->bgx));\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && !v) {\n+\t\tdata = oct_csr_read(BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\t\tdata |= BIT(11);\n+\t\toct_csr_write(data, BGX_CMR_CONFIG(priv->node, priv->bgx, priv->index));\n+\t}\n+}\n+EXPORT_SYMBOL(bgx_port_mix_assert_reset);\n+\n+static int bgx_port_probe(struct platform_device *pdev)\n+{\n+\tu64 addr;\n+\tconst u8 *mac;\n+\tconst __be32 *reg;\n+\tu32 index;\n+\tint rc;\n+\tstruct bgx_port_priv *priv;\n+\tint numa_node;\n+\n+\treg = of_get_property(pdev->dev.parent->of_node, \"reg\", NULL);\n+\taddr = of_translate_address(pdev->dev.parent->of_node, reg);\n+\tmac = of_get_mac_address(pdev->dev.of_node);\n+\n+\tnuma_node = (addr >> 36) & 0x7;\n+\n+\trc = of_property_read_u32(pdev->dev.of_node, \"reg\", &index);\n+\tif (rc)\n+\t\treturn -ENODEV;\n+\tpriv = kzalloc_node(sizeof(*priv), GFP_KERNEL, numa_node);\n+\tif (!priv)\n+\t\treturn -ENOMEM;\n+\tpriv->phy_np = of_parse_phandle(pdev->dev.of_node, \"phy-handle\", 0);\n+\tif (of_get_property(pdev->dev.of_node, \"cavium,sgmii-mac-1000x-mode\",\n+\t\t\t NULL))\n+\t\tpriv->mode_1000basex = true;\n+\tif (of_get_property(pdev->dev.of_node, \"cavium,sgmii-mac-phy-mode\",\n+\t\t\t NULL))\n+\t\tpriv->bgx_as_phy = true;\n+\n+\tmutex_init(&priv->lock);\n+\tpriv->node = numa_node;\n+\tpriv->bgx = (addr >> 24) & 0xf;\n+\tpriv->index = index;\n+\tif (mac)\n+\t\tpriv->mac_addr = mac;\n+\n+\tpriv->qlm = bgx_port_get_qlm(priv->node, priv->bgx, priv->index);\n+\tpriv->mode = bgx_port_get_mode(priv->node, priv->bgx, priv->index);\n+\n+\tswitch (priv->mode) {\n+\tcase PORT_MODE_SGMII:\n+\tcase PORT_MODE_RGMII:\n+\t\tpriv->get_link = bgx_port_get_sgmii_link;\n+\t\tpriv->set_link = bgx_port_set_xgmii_link;\n+\t\tbreak;\n+\tcase PORT_MODE_XAUI:\n+\tcase PORT_MODE_RXAUI:\n+\tcase PORT_MODE_XLAUI:\n+\tcase PORT_MODE_XFI:\n+\tcase PORT_MODE_10G_KR:\n+\tcase PORT_MODE_40G_KR4:\n+\t\tpriv->get_link = bgx_port_get_xaui_link;\n+\t\tpriv->set_link = bgx_port_set_xaui_link;\n+\t\tbreak;\n+\tdefault:\n+\t\tgoto err;\n+\t}\n+\n+\tdev_set_drvdata(&pdev->dev, priv);\n+\n+\tbgx_port_init(priv);\n+\n+\tdev_info(&pdev->dev, \"Probed\\n\");\n+\treturn 0;\n+ err:\n+\tkfree(priv);\n+\treturn rc;\n+}\n+\n+static int bgx_port_remove(struct platform_device *pdev)\n+{\n+\tstruct bgx_port_priv *priv = dev_get_drvdata(&pdev->dev);\n+\n+\tkfree(priv);\n+\treturn 0;\n+}\n+\n+static void bgx_port_shutdown(struct platform_device *pdev)\n+{\n+}\n+\n+static const struct of_device_id bgx_port_match[] = {\n+\t{\n+\t\t.compatible = \"cavium,octeon-7890-bgx-port\",\n+\t},\n+\t{\n+\t\t.compatible = \"cavium,octeon-7360-xcv\",\n+\t},\n+\t{},\n+};\n+MODULE_DEVICE_TABLE(of, bgx_port_match);\n+\n+static struct platform_driver bgx_port_driver = {\n+\t.probe\t\t= bgx_port_probe,\n+\t.remove\t\t= bgx_port_remove,\n+\t.shutdown = bgx_port_shutdown,\n+\t.driver\t\t= {\n+\t\t.owner\t= THIS_MODULE,\n+\t\t.name\t= KBUILD_MODNAME,\n+\t\t.of_match_table = bgx_port_match,\n+\t},\n+};\n+\n+static int __init bgx_port_driver_init(void)\n+{\n+\tint r;\n+\tint i;\n+\tint j;\n+\tint k;\n+\n+\tfor (i = 0; i < MAX_NODES; i++) {\n+\t\tfor (j = 0; j < MAX_BGX_PER_NODE; j++) {\n+\t\t\tfor (k = 0; k < MAX_LMAC_PER_BGX; k++)\n+\t\t\t\tlmac_pknd[i][j][k] = -1;\n+\t\t}\n+\t}\n+\n+\tbgx_nexus_load();\n+\tr = platform_driver_register(&bgx_port_driver);\n+\treturn r;\n+}\n+module_init(bgx_port_driver_init);\n+\n+static void __exit bgx_port_driver_exit(void)\n+{\n+\tplatform_driver_unregister(&bgx_port_driver);\n+\tif (check_state_wq)\n+\t\tdestroy_workqueue(check_state_wq);\n+}\n+module_exit(bgx_port_driver_exit);\n+\n+MODULE_LICENSE(\"GPL\");\n+MODULE_AUTHOR(\"Cavium, Inc. <support@caviumnetworks.com>\");\n+MODULE_DESCRIPTION(\"Cavium, Inc. BGX Ethernet MAC driver.\");\ndiff --git a/drivers/net/ethernet/cavium/octeon/octeon3-core.c b/drivers/net/ethernet/cavium/octeon/octeon3-core.c\nnew file mode 100644\nindex 000000000000..a07b32bc5808\n--- /dev/null\n+++ b/drivers/net/ethernet/cavium/octeon/octeon3-core.c\n@@ -0,0 +1,2075 @@\n+/*\n+ * Copyright (c) 2017 Cavium, Inc.\n+ *\n+ * This file is subject to the terms and conditions of the GNU General Public\n+ * License. See the file \"COPYING\" in the main directory of this archive\n+ * for more details.\n+ */\n+#include <linux/module.h>\n+#include <linux/wait.h>\n+#include <linux/rculist.h>\n+#include <linux/atomic.h>\n+#include <linux/kthread.h>\n+#include <linux/interrupt.h>\n+#include <linux/netdevice.h>\n+#include <linux/etherdevice.h>\n+#include <linux/platform_device.h>\n+#include <linux/ip.h>\n+#include <linux/ipv6.h>\n+#include <linux/if_vlan.h>\n+#include <linux/rio_drv.h>\n+#include <linux/rio_ids.h>\n+#include <linux/net_tstamp.h>\n+#include <linux/timecounter.h>\n+#include <linux/ptp_clock_kernel.h>\n+\n+#include <asm/octeon/octeon.h>\n+\n+#include \"octeon3.h\"\n+\n+/* First buffer:\n+ *\n+ * +---SKB---------+\n+ * | |\n+ * | |\n+ * +--+--*data |\n+ * | | |\n+ * | | |\n+ * | +---------------+\n+ * | /|\\\n+ * | |\n+ * | |\n+ * \\|/ |\n+ * WQE - 128 -+-----> +-------------+-------+ -+-\n+ * | | *skb ----+ | |\n+ * | | | |\n+ * | | | |\n+ * WQE_SKIP = 128 | | |\n+ * | | | |\n+ * | | | |\n+ * | | | |\n+ * | | | First Skip\n+ * WQE -----+-----> +---------------------+ |\n+ * | word 0 | |\n+ * | word 1 | |\n+ * | word 2 | |\n+ * | word 3 | |\n+ * | word 4 | |\n+ * +---------------------+ -+-\n+ * +----+- packet link |\n+ * | | packet data |\n+ * | | |\n+ * | | |\n+ * | | . |\n+ * | | . |\n+ * | | . |\n+ * | +---------------------+\n+ * |\n+ * |\n+ * Later buffers:|\n+ * |\n+ * |\n+ * |\n+ * |\n+ * |\n+ * | +---SKB---------+\n+ * | | |\n+ * | | |\n+ * | +--+--*data |\n+ * | | | |\n+ * | | | |\n+ * | | +---------------+\n+ * | | /|\\\n+ * | | |\n+ * | | |\n+ * | \\|/ |\n+ * WQE - 128 ----+--> +-------------+-------+ -+-\n+ * | | *skb ----+ | |\n+ * | | | |\n+ * | | | |\n+ * | | | |\n+ * | | | LATER_SKIP = 128\n+ * | | | |\n+ * | | | |\n+ * | | | |\n+ * | +---------------------+ -+-\n+ * | | packet link |\n+ * +--> | packet data |\n+ * | |\n+ * | |\n+ * | . |\n+ * | . |\n+ * | . |\n+ * +---------------------+\n+ */\n+\n+#define MAX_TX_QUEUE_DEPTH 512\n+#define SSO_INTSN_EXE 0x61\n+#define MAX_RX_QUEUES 32\n+\n+#define SKB_PTR_OFFSET\t\t0\n+\n+#define MAX_CORES\t\t48\n+#define FPA3_NUM_AURAS\t\t1024\n+\n+#define USE_ASYNC_IOBDMA\t1\n+#define SCR_SCRATCH\t\t0ull\n+#define SSO_NO_WAIT\t\t0ull\n+#define DID_TAG_SWTAG\t\t0x60ull\n+#define IOBDMA_SENDSINGLE\t0xffffffffffffa200ull\n+\n+/* Values for the value of wqe word2 [ERRLEV] */\n+#define PKI_ERRLEV_LA\t\t0x01\n+\n+/* Values for the value of wqe word2 [OPCODE] */\n+#define PKI_OPCODE_NONE\t\t0x00\n+#define PKI_OPCODE_JABBER\t0x02\n+#define PKI_OPCODE_FCS\t\t0x07\n+\n+/* Values for the layer type in the wqe */\n+#define PKI_LTYPE_IP4\t\t0x08\n+#define PKI_LTYPE_IP6\t\t0x0a\n+#define PKI_LTYPE_TCP\t\t0x10\n+#define PKI_LTYPE_UDP\t\t0x11\n+#define PKI_LTYPE_SCTP\t\t0x12\n+\n+/* Registers are accessed via xkphys */\n+#define SSO_BASE\t\t\t0x1670000000000ull\n+#define SSO_ADDR(node)\t\t\t(SET_XKPHYS + NODE_OFFSET(node) + \\\n+\t\t\t\t\t SSO_BASE)\n+#define GRP_OFFSET(grp)\t\t\t((grp) << 16)\n+#define GRP_ADDR(n, g)\t\t\t(SSO_ADDR(n) + GRP_OFFSET(g))\n+#define SSO_GRP_AQ_CNT(n, g)\t\t(GRP_ADDR(n, g)\t\t + 0x20000700)\n+\n+#define MIO_PTP_BASE\t\t\t0x1070000000000ull\n+#define MIO_PTP_ADDR(node)\t\t(SET_XKPHYS + NODE_OFFSET(node) + \\\n+\t\t\t\t\t MIO_PTP_BASE)\n+#define MIO_PTP_CLOCK_CFG(node)\t\t(MIO_PTP_ADDR(node)\t\t+ 0xf00)\n+#define MIO_PTP_CLOCK_HI(node)\t\t(MIO_PTP_ADDR(node)\t\t+ 0xf10)\n+#define MIO_PTP_CLOCK_COMP(node)\t(MIO_PTP_ADDR(node)\t\t+ 0xf18)\n+\n+struct octeon3_ethernet;\n+\n+struct octeon3_rx {\n+\tstruct napi_struct\tnapi;\n+\tstruct octeon3_ethernet *parent;\n+\tint rx_grp;\n+\tint rx_irq;\n+\tcpumask_t rx_affinity_hint;\n+} ____cacheline_aligned_in_smp;\n+\n+struct octeon3_ethernet {\n+\tstruct bgx_port_netdev_priv bgx_priv; /* Must be first element. */\n+\tstruct list_head list;\n+\tstruct net_device *netdev;\n+\tenum octeon3_mac_type mac_type;\n+\tstruct octeon3_rx rx_cxt[MAX_RX_QUEUES];\n+\tstruct ptp_clock_info ptp_info;\n+\tstruct ptp_clock *ptp_clock;\n+\tstruct cyclecounter cc;\n+\tstruct timecounter tc;\n+\tspinlock_t ptp_lock;\t\t/* Serialize ptp clock adjustments */\n+\tint num_rx_cxt;\n+\tint pki_aura;\n+\tint pknd;\n+\tint pko_queue;\n+\tint node;\n+\tint interface;\n+\tint index;\n+\tint rx_buf_count;\n+\tint tx_complete_grp;\n+\tint rx_timestamp_hw:1;\n+\tint tx_timestamp_hw:1;\n+\tspinlock_t stat_lock;\t\t/* Protects stats counters */\n+\tu64 last_packets;\n+\tu64 last_octets;\n+\tu64 last_dropped;\n+\tatomic64_t rx_packets;\n+\tatomic64_t rx_octets;\n+\tatomic64_t rx_dropped;\n+\tatomic64_t rx_errors;\n+\tatomic64_t rx_length_errors;\n+\tatomic64_t rx_crc_errors;\n+\tatomic64_t tx_packets;\n+\tatomic64_t tx_octets;\n+\tatomic64_t tx_dropped;\n+\t/* The following two fields need to be on a different cache line as\n+\t * they are updated by pko which invalidates the cache every time it\n+\t * updates them. The idea is to prevent other fields from being\n+\t * invalidated unnecessarily.\n+\t */\n+\tchar cacheline_pad1[CVMX_CACHE_LINE_SIZE];\n+\tatomic64_t buffers_needed;\n+\tatomic64_t tx_backlog;\n+\tchar cacheline_pad2[CVMX_CACHE_LINE_SIZE];\n+};\n+\n+static DEFINE_MUTEX(octeon3_eth_init_mutex);\n+\n+struct octeon3_ethernet_node;\n+\n+struct octeon3_ethernet_worker {\n+\twait_queue_head_t queue;\n+\tstruct task_struct *task;\n+\tstruct octeon3_ethernet_node *oen;\n+\tatomic_t kick;\n+\tint order;\n+};\n+\n+struct octeon3_ethernet_node {\n+\tbool init_done;\n+\tint next_cpu_irq_affinity;\n+\tint node;\n+\tint pki_packet_pool;\n+\tint sso_pool;\n+\tint pko_pool;\n+\tvoid *sso_pool_stack;\n+\tvoid *pko_pool_stack;\n+\tvoid *pki_packet_pool_stack;\n+\tint sso_aura;\n+\tint pko_aura;\n+\tint tx_complete_grp;\n+\tint tx_irq;\n+\tcpumask_t tx_affinity_hint;\n+\tstruct octeon3_ethernet_worker workers[8];\n+\tstruct mutex device_list_lock;\t/* Protects the device list */\n+\tstruct list_head device_list;\n+\tspinlock_t napi_alloc_lock;\t/* Protects napi allocations */\n+};\n+\n+static int wait_pko_response;\n+module_param(wait_pko_response, int, 0644);\n+MODULE_PARM_DESC(wait_pko_response, \"Wait for response after each pko command.\");\n+\n+static int num_packet_buffers = 768;\n+module_param(num_packet_buffers, int, 0444);\n+MODULE_PARM_DESC(num_packet_buffers,\n+\t\t \"Number of packet buffers to allocate per port.\");\n+\n+static int packet_buffer_size = 2048;\n+module_param(packet_buffer_size, int, 0444);\n+MODULE_PARM_DESC(packet_buffer_size, \"Size of each RX packet buffer.\");\n+\n+static int rx_queues = 1;\n+module_param(rx_queues, int, 0444);\n+MODULE_PARM_DESC(rx_queues, \"Number of RX threads per port.\");\n+\n+int ilk0_lanes = 1;\n+module_param(ilk0_lanes, int, 0444);\n+MODULE_PARM_DESC(ilk0_lanes, \"Number of SerDes lanes used by ILK link 0.\");\n+\n+int ilk1_lanes = 1;\n+module_param(ilk1_lanes, int, 0444);\n+MODULE_PARM_DESC(ilk1_lanes, \"Number of SerDes lanes used by ILK link 1.\");\n+\n+static struct octeon3_ethernet_node octeon3_eth_node[MAX_NODES];\n+static struct kmem_cache *octeon3_eth_sso_pko_cache;\n+\n+/**\n+ * Reads a 64 bit value from the processor local scratchpad memory.\n+ *\n+ * @param offset byte offset into scratch pad to read\n+ *\n+ * @return value read\n+ */\n+static inline u64 scratch_read64(u64 offset)\n+{\n+\treturn *(u64 *)((long)SCRATCH_BASE + offset);\n+}\n+\n+/**\n+ * Write a 64 bit value to the processor local scratchpad memory.\n+ *\n+ * @param offset byte offset into scratch pad to write\n+ @ @praram value to write\n+ */\n+static inline void scratch_write64(u64 offset, u64 value)\n+{\n+\t*(u64 *)((long)SCRATCH_BASE + offset) = value;\n+}\n+\n+static int get_pki_chan(int node, int interface, int index)\n+{\n+\tint\tpki_chan;\n+\n+\tpki_chan = node << 12;\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CNF75XX) &&\n+\t (interface == 1 || interface == 2)) {\n+\t\t/* SRIO */\n+\t\tpki_chan |= 0x240 + (2 * (interface - 1)) + index;\n+\t} else {\n+\t\t/* BGX */\n+\t\tpki_chan |= 0x800 + (0x100 * interface) + (0x10 * index);\n+\t}\n+\n+\treturn pki_chan;\n+}\n+\n+/* Map auras to the field priv->buffers_needed. Used to speed up packet\n+ * transmission.\n+ */\n+static void *aura2bufs_needed[MAX_NODES][FPA3_NUM_AURAS];\n+\n+static int octeon3_eth_lgrp_to_ggrp(int node, int grp)\n+{\n+\treturn (node << 8) | grp;\n+}\n+\n+static void octeon3_eth_gen_affinity(int node, cpumask_t *mask)\n+{\n+\tint cpu;\n+\n+\tdo {\n+\t\tcpu = cpumask_next(octeon3_eth_node[node].next_cpu_irq_affinity, cpu_online_mask);\n+\t\tocteon3_eth_node[node].next_cpu_irq_affinity++;\n+\t\tif (cpu >= nr_cpu_ids) {\n+\t\t\tocteon3_eth_node[node].next_cpu_irq_affinity = -1;\n+\t\t\tcontinue;\n+\t\t}\n+\t} while (false);\n+\tcpumask_clear(mask);\n+\tcpumask_set_cpu(cpu, mask);\n+}\n+\n+struct wr_ret {\n+\tvoid *work;\n+\tu16 grp;\n+};\n+\n+static inline struct wr_ret octeon3_core_get_work_sync(int grp)\n+{\n+\tu64\t\tnode = cvmx_get_node_num();\n+\tu64\t\taddr;\n+\tu64\t\tresponse;\n+\tstruct wr_ret\tr;\n+\n+\t/* See SSO_GET_WORK_LD_S for the address to read */\n+\taddr = 1ull << 63;\n+\taddr |= BIT(48);\n+\taddr |= DID_TAG_SWTAG << 40;\n+\taddr |= node << 36;\n+\taddr |= BIT(30);\n+\taddr |= BIT(29);\n+\taddr |= octeon3_eth_lgrp_to_ggrp(node, grp) << 4;\n+\taddr |= SSO_NO_WAIT << 3;\n+\tresponse = __raw_readq((void __iomem *)addr);\n+\n+\t/* See SSO_GET_WORK_RTN_S for the format of the response */\n+\tr.grp = (response & GENMASK_ULL(57, 48)) >> 48;\n+\tif (response & BIT(63))\n+\t\tr.work = NULL;\n+\telse\n+\t\tr.work = phys_to_virt(response & GENMASK_ULL(41, 0));\n+\n+\treturn r;\n+}\n+\n+/**\n+ * octeon3_core_get_work_async - Request work via a iobdma command. Doesn't wait\n+ *\t\t\t\t for the response.\n+ *\n+ * @grp: Group to request work for.\n+ */\n+static inline void octeon3_core_get_work_async(unsigned int grp)\n+{\n+\tu64\tdata;\n+\tu64\tnode = cvmx_get_node_num();\n+\n+\t/* See SSO_GET_WORK_DMA_S for the command structure */\n+\tdata = SCR_SCRATCH << 56;\n+\tdata |= 1ull << 48;\n+\tdata |= DID_TAG_SWTAG << 40;\n+\tdata |= node << 36;\n+\tdata |= 1ull << 30;\n+\tdata |= 1ull << 29;\n+\tdata |= octeon3_eth_lgrp_to_ggrp(node, grp) << 4;\n+\tdata |= SSO_NO_WAIT << 3;\n+\n+\t__raw_writeq(data, (void __iomem *)IOBDMA_SENDSINGLE);\n+}\n+\n+/**\n+ * octeon3_core_get_response_async - Read the request work response. Must be\n+ *\t\t\t\t called after calling\n+ *\t\t\t\t octeon3_core_get_work_async().\n+ *\n+ * Returns work queue entry.\n+ */\n+static inline struct wr_ret octeon3_core_get_response_async(void)\n+{\n+\tstruct wr_ret\tr;\n+\tu64\t\tresponse;\n+\n+\tCVMX_SYNCIOBDMA;\n+\tresponse = scratch_read64(SCR_SCRATCH);\n+\n+\t/* See SSO_GET_WORK_RTN_S for the format of the response */\n+\tr.grp = (response & GENMASK_ULL(57, 48)) >> 48;\n+\tif (response & BIT(63))\n+\t\tr.work = NULL;\n+\telse\n+\t\tr.work = phys_to_virt(response & GENMASK_ULL(41, 0));\n+\n+\treturn r;\n+}\n+\n+static void octeon3_eth_replenish_rx(struct octeon3_ethernet *priv, int count)\n+{\n+\tstruct sk_buff *skb;\n+\tint i;\n+\n+\tfor (i = 0; i < count; i++) {\n+\t\tvoid **buf;\n+\n+\t\tskb = __alloc_skb(packet_buffer_size, GFP_ATOMIC, 0, priv->node);\n+\t\tif (!skb)\n+\t\t\tbreak;\n+\t\tbuf = (void **)PTR_ALIGN(skb->head, 128);\n+\t\tbuf[SKB_PTR_OFFSET] = skb;\n+\t\tocteon_fpa3_free(priv->node, priv->pki_aura, buf);\n+\t}\n+}\n+\n+static bool octeon3_eth_tx_complete_runnable(struct octeon3_ethernet_worker *worker)\n+{\n+\treturn atomic_read(&worker->kick) != 0 || kthread_should_stop();\n+}\n+\n+static int octeon3_eth_replenish_all(struct octeon3_ethernet_node *oen)\n+{\n+\tint pending = 0;\n+\tint batch_size = 32;\n+\tstruct octeon3_ethernet *priv;\n+\n+\trcu_read_lock();\n+\tlist_for_each_entry_rcu(priv, &oen->device_list, list) {\n+\t\tint amount = atomic64_sub_if_positive(batch_size, &priv->buffers_needed);\n+\n+\t\tif (amount >= 0) {\n+\t\t\tocteon3_eth_replenish_rx(priv, batch_size);\n+\t\t\tpending += amount;\n+\t\t}\n+\t}\n+\trcu_read_unlock();\n+\treturn pending;\n+}\n+\n+static int octeon3_eth_tx_complete_hwtstamp(struct octeon3_ethernet *priv,\n+\t\t\t\t\t struct sk_buff *skb)\n+{\n+\tstruct skb_shared_hwtstamps\tshts;\n+\tu64\t\t\t\thwts;\n+\tu64\t\t\t\tns;\n+\n+\thwts = *((u64 *)(skb->cb) + 1);\n+\tns = timecounter_cyc2time(&priv->tc, hwts);\n+\tmemset(&shts, 0, sizeof(shts));\n+\tshts.hwtstamp = ns_to_ktime(ns);\n+\tskb_tstamp_tx(skb, &shts);\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_eth_tx_complete_worker(void *data)\n+{\n+\tstruct octeon3_ethernet_worker *worker = data;\n+\tstruct octeon3_ethernet_node *oen = worker->oen;\n+\tint backlog;\n+\tint order = worker->order;\n+\tint tx_complete_stop_thresh = order * 100;\n+\tint backlog_stop_thresh = order == 0 ? 31 : order * 80;\n+\tu64 aq_cnt;\n+\tint i;\n+\n+\twhile (!kthread_should_stop()) {\n+\t\twait_event_interruptible(worker->queue, octeon3_eth_tx_complete_runnable(worker));\n+\t\tatomic_dec_if_positive(&worker->kick); /* clear the flag */\n+\n+\t\tdo {\n+\t\t\tbacklog = octeon3_eth_replenish_all(oen);\n+\t\t\tfor (i = 0; i < 100; i++) {\n+\t\t\t\tvoid **work;\n+\t\t\t\tstruct net_device *tx_netdev;\n+\t\t\t\tstruct octeon3_ethernet *tx_priv;\n+\t\t\t\tstruct sk_buff *skb;\n+\t\t\t\tstruct wr_ret r;\n+\n+\t\t\t\tr = octeon3_core_get_work_sync(oen->tx_complete_grp);\n+\t\t\t\twork = r.work;\n+\t\t\t\tif (!work)\n+\t\t\t\t\tbreak;\n+\t\t\t\ttx_netdev = work[0];\n+\t\t\t\ttx_priv = netdev_priv(tx_netdev);\n+\t\t\t\tif (unlikely(netif_queue_stopped(tx_netdev)) &&\n+\t\t\t\t atomic64_read(&tx_priv->tx_backlog) < MAX_TX_QUEUE_DEPTH)\n+\t\t\t\t\tnetif_wake_queue(tx_netdev);\n+\t\t\t\tskb = container_of((void *)work, struct sk_buff, cb);\n+\t\t\t\tif (unlikely(tx_priv->tx_timestamp_hw) &&\n+\t\t\t\t unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))\n+\t\t\t\t\tocteon3_eth_tx_complete_hwtstamp(tx_priv, skb);\n+\t\t\t\tdev_kfree_skb(skb);\n+\t\t\t}\n+\n+\t\t\taq_cnt = oct_csr_read(SSO_GRP_AQ_CNT(oen->node, oen->tx_complete_grp));\n+\t\t\taq_cnt &= GENMASK_ULL(32, 0);\n+\t\t\tif ((backlog > backlog_stop_thresh || aq_cnt > tx_complete_stop_thresh) &&\n+\t\t\t order < ARRAY_SIZE(oen->workers) - 1) {\n+\t\t\t\tatomic_set(&oen->workers[order + 1].kick, 1);\n+\t\t\t\twake_up(&oen->workers[order + 1].queue);\n+\t\t\t}\n+\t\t} while (!need_resched() &&\n+\t\t\t (backlog > backlog_stop_thresh ||\n+\t\t\t aq_cnt > tx_complete_stop_thresh));\n+\n+\t\tcond_resched();\n+\n+\t\tif (!octeon3_eth_tx_complete_runnable(worker))\n+\t\t\tocteon3_sso_irq_set(oen->node, oen->tx_complete_grp, true);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static irqreturn_t octeon3_eth_tx_handler(int irq, void *info)\n+{\n+\tstruct octeon3_ethernet_node *oen = info;\n+\t/* Disarm the irq. */\n+\tocteon3_sso_irq_set(oen->node, oen->tx_complete_grp, false);\n+\tatomic_set(&oen->workers[0].kick, 1);\n+\twake_up(&oen->workers[0].queue);\n+\treturn IRQ_HANDLED;\n+}\n+\n+static int octeon3_eth_global_init(unsigned int node,\n+\t\t\t\t struct platform_device *pdev)\n+{\n+\tint i;\n+\tint rv = 0;\n+\tunsigned int sso_intsn;\n+\tstruct octeon3_ethernet_node *oen;\n+\n+\tmutex_lock(&octeon3_eth_init_mutex);\n+\n+\toen = octeon3_eth_node + node;\n+\n+\tif (oen->init_done)\n+\t\tgoto done;\n+\n+\t/* CN78XX-P1.0 cannot un-initialize PKO, so get a module\n+\t * reference to prevent it from being unloaded.\n+\t */\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))\n+\t\tif (!try_module_get(THIS_MODULE))\n+\t\t\tdev_err(&pdev->dev,\n+\t\t\t\t\"ERROR: Could not obtain module reference for CN78XX-P1.0\\n\");\n+\n+\tINIT_LIST_HEAD(&oen->device_list);\n+\tmutex_init(&oen->device_list_lock);\n+\tspin_lock_init(&oen->napi_alloc_lock);\n+\n+\toen->node = node;\n+\n+\tocteon_fpa3_init(node);\n+\trv = octeon_fpa3_pool_init(node, -1, &oen->sso_pool,\n+\t\t\t\t &oen->sso_pool_stack, 40960);\n+\tif (rv)\n+\t\tgoto done;\n+\n+\trv = octeon_fpa3_pool_init(node, -1, &oen->pko_pool,\n+\t\t\t\t &oen->pko_pool_stack, 40960);\n+\tif (rv)\n+\t\tgoto done;\n+\n+\trv = octeon_fpa3_pool_init(node, -1, &oen->pki_packet_pool,\n+\t\t\t\t &oen->pki_packet_pool_stack, 64 * num_packet_buffers);\n+\tif (rv)\n+\t\tgoto done;\n+\n+\trv = octeon_fpa3_aura_init(node, oen->sso_pool, -1,\n+\t\t\t\t &oen->sso_aura, num_packet_buffers, 20480);\n+\tif (rv)\n+\t\tgoto done;\n+\n+\trv = octeon_fpa3_aura_init(node, oen->pko_pool, -1,\n+\t\t\t\t &oen->pko_aura, num_packet_buffers, 20480);\n+\tif (rv)\n+\t\tgoto done;\n+\n+\tdev_info(&pdev->dev, \"SSO:%d:%d, PKO:%d:%d\\n\", oen->sso_pool,\n+\t\t oen->sso_aura, oen->pko_pool, oen->pko_aura);\n+\n+\tif (!octeon3_eth_sso_pko_cache) {\n+\t\tocteon3_eth_sso_pko_cache = kmem_cache_create(\"sso_pko\", 4096, 128, 0, NULL);\n+\t\tif (!octeon3_eth_sso_pko_cache) {\n+\t\t\trv = -ENOMEM;\n+\t\t\tgoto done;\n+\t\t}\n+\t}\n+\n+\trv = octeon_fpa3_mem_fill(node, octeon3_eth_sso_pko_cache,\n+\t\t\t\t oen->sso_aura, 1024);\n+\tif (rv)\n+\t\tgoto done;\n+\n+\trv = octeon_fpa3_mem_fill(node, octeon3_eth_sso_pko_cache,\n+\t\t\t\t oen->pko_aura, 1024);\n+\tif (rv)\n+\t\tgoto done;\n+\n+\trv = octeon3_sso_init(node, oen->sso_aura);\n+\tif (rv)\n+\t\tgoto done;\n+\n+\toen->tx_complete_grp = octeon3_sso_alloc_grp(node, -1);\n+\tif (oen->tx_complete_grp < 0)\n+\t\tgoto done;\n+\n+\tsso_intsn = SSO_INTSN_EXE << 12 | oen->tx_complete_grp;\n+\toen->tx_irq = irq_create_mapping(NULL, sso_intsn);\n+\tif (!oen->tx_irq) {\n+\t\trv = -ENODEV;\n+\t\tgoto done;\n+\t}\n+\n+\trv = octeon3_pko_init_global(node, oen->pko_aura);\n+\tif (rv) {\n+\t\trv = -ENODEV;\n+\t\tgoto done;\n+\t}\n+\n+\tocteon3_pki_vlan_init(node);\n+\tocteon3_pki_cluster_init(node, pdev);\n+\tocteon3_pki_ltype_init(node);\n+\tocteon3_pki_enable(node);\n+\n+\tfor (i = 0; i < ARRAY_SIZE(oen->workers); i++) {\n+\t\toen->workers[i].oen = oen;\n+\t\tinit_waitqueue_head(&oen->workers[i].queue);\n+\t\toen->workers[i].order = i;\n+\t}\n+\tfor (i = 0; i < ARRAY_SIZE(oen->workers); i++) {\n+\t\toen->workers[i].task = kthread_create_on_node(octeon3_eth_tx_complete_worker,\n+\t\t\t\t\t\t\t oen->workers + i, node,\n+\t\t\t\t\t\t\t \"oct3_eth/%d:%d\", node, i);\n+\t\tif (IS_ERR(oen->workers[i].task)) {\n+\t\t\trv = PTR_ERR(oen->workers[i].task);\n+\t\t\tgoto done;\n+\t\t} else {\n+#ifdef CONFIG_NUMA\n+\t\t\tset_cpus_allowed_ptr(oen->workers[i].task, cpumask_of_node(node));\n+#endif\n+\t\t\twake_up_process(oen->workers[i].task);\n+\t\t}\n+\t}\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))\n+\t\tocteon3_sso_pass1_limit(node, oen->tx_complete_grp);\n+\n+\trv = request_irq(oen->tx_irq, octeon3_eth_tx_handler,\n+\t\t\t IRQ_TYPE_EDGE_RISING, \"oct3_eth_tx_done\", oen);\n+\tif (rv)\n+\t\tgoto done;\n+\tocteon3_eth_gen_affinity(node, &oen->tx_affinity_hint);\n+\tirq_set_affinity_hint(oen->tx_irq, &oen->tx_affinity_hint);\n+\n+\tocteon3_sso_irq_set(node, oen->tx_complete_grp, true);\n+\n+\toen->init_done = true;\n+done:\n+\tmutex_unlock(&octeon3_eth_init_mutex);\n+\treturn rv;\n+}\n+\n+static struct sk_buff *octeon3_eth_work_to_skb(void *w)\n+{\n+\tstruct sk_buff *skb;\n+\tvoid **f = w;\n+\n+\tskb = f[-16];\n+\treturn skb;\n+}\n+\n+/* Receive one packet.\n+ * returns the number of RX buffers consumed.\n+ */\n+static int octeon3_eth_rx_one(struct octeon3_rx *rx, bool is_async, bool req_next)\n+{\n+\tint segments;\n+\tint ret;\n+\tunsigned int packet_len;\n+\tstruct wqe *work;\n+\tu8 *data;\n+\tint len_remaining;\n+\tstruct sk_buff *skb;\n+\tunion buf_ptr packet_ptr;\n+\tstruct wr_ret r;\n+\tstruct octeon3_ethernet *priv = rx->parent;\n+\n+\tif (is_async)\n+\t\tr = octeon3_core_get_response_async();\n+\telse\n+\t\tr = octeon3_core_get_work_sync(rx->rx_grp);\n+\twork = r.work;\n+\tif (!work)\n+\t\treturn 0;\n+\n+\t/* Request the next work so it'll be ready when we need it */\n+\tif (is_async && req_next)\n+\t\tocteon3_core_get_work_async(rx->rx_grp);\n+\n+\tskb = octeon3_eth_work_to_skb(work);\n+\n+\tsegments = work->word0.bufs;\n+\tret = segments;\n+\tpacket_ptr = work->packet_ptr;\n+\tif (unlikely(work->word2.err_level <= PKI_ERRLEV_LA &&\n+\t\t work->word2.err_code != PKI_OPCODE_NONE)) {\n+\t\tatomic64_inc(&priv->rx_errors);\n+\t\tswitch (work->word2.err_code) {\n+\t\tcase PKI_OPCODE_JABBER:\n+\t\t\tatomic64_inc(&priv->rx_length_errors);\n+\t\t\tbreak;\n+\t\tcase PKI_OPCODE_FCS:\n+\t\t\tatomic64_inc(&priv->rx_crc_errors);\n+\t\t\tbreak;\n+\t\t}\n+\t\tdata = phys_to_virt(packet_ptr.addr);\n+\t\tfor (;;) {\n+\t\t\tdev_kfree_skb_any(skb);\n+\t\t\tsegments--;\n+\t\t\tif (segments <= 0)\n+\t\t\t\tbreak;\n+\t\t\tpacket_ptr.u64 = *(u64 *)(data - 8);\n+#ifndef __LITTLE_ENDIAN\n+\t\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {\n+\t\t\t\t/* PKI_BUFLINK_S's are endian-swapped */\n+\t\t\t\tpacket_ptr.u64 = swab64(packet_ptr.u64);\n+\t\t\t}\n+#endif\n+\t\t\tdata = phys_to_virt(packet_ptr.addr);\n+\t\t\tskb = octeon3_eth_work_to_skb((void *)round_down((unsigned long)data, 128ull));\n+\t\t}\n+\t\tgoto out;\n+\t}\n+\n+\tpacket_len = work->word1.len;\n+\tdata = phys_to_virt(packet_ptr.addr);\n+\tskb->data = data;\n+\tskb->len = packet_len;\n+\tlen_remaining = packet_len;\n+\tif (segments == 1) {\n+\t\t/* Strip the ethernet fcs */\n+\t\tskb->len -= 4;\n+\t\tskb_set_tail_pointer(skb, skb->len);\n+\t} else {\n+\t\tbool first_frag = true;\n+\t\tstruct sk_buff *current_skb = skb;\n+\t\tstruct sk_buff *next_skb = NULL;\n+\t\tunsigned int segment_size;\n+\n+\t\tskb_frag_list_init(skb);\n+\t\tfor (;;) {\n+\t\t\tsegment_size = (segments == 1) ? len_remaining : packet_ptr.size;\n+\t\t\tlen_remaining -= segment_size;\n+\t\t\tif (!first_frag) {\n+\t\t\t\tcurrent_skb->len = segment_size;\n+\t\t\t\tskb->data_len += segment_size;\n+\t\t\t\tskb->truesize += current_skb->truesize;\n+\t\t\t}\n+\t\t\tskb_set_tail_pointer(current_skb, segment_size);\n+\t\t\tsegments--;\n+\t\t\tif (segments == 0)\n+\t\t\t\tbreak;\n+\t\t\tpacket_ptr.u64 = *(u64 *)(data - 8);\n+#ifndef __LITTLE_ENDIAN\n+\t\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {\n+\t\t\t\t/* PKI_BUFLINK_S's are endian-swapped */\n+\t\t\t\tpacket_ptr.u64 = swab64(packet_ptr.u64);\n+\t\t\t}\n+#endif\n+\t\t\tdata = phys_to_virt(packet_ptr.addr);\n+\t\t\tnext_skb = octeon3_eth_work_to_skb((void *)round_down((unsigned long)data, 128ull));\n+\t\t\tif (first_frag) {\n+\t\t\t\tnext_skb->next = skb_shinfo(current_skb)->frag_list;\n+\t\t\t\tskb_shinfo(current_skb)->frag_list = next_skb;\n+\t\t\t} else {\n+\t\t\t\tcurrent_skb->next = next_skb;\n+\t\t\t\tnext_skb->next = NULL;\n+\t\t\t}\n+\t\t\tcurrent_skb = next_skb;\n+\t\t\tfirst_frag = false;\n+\t\t\tcurrent_skb->data = data;\n+\t\t}\n+\n+\t\t/* Strip the ethernet fcs */\n+\t\tpskb_trim(skb, skb->len - 4);\n+\t}\n+\n+\tif (likely(priv->netdev->flags & IFF_UP)) {\n+\t\tskb_checksum_none_assert(skb);\n+\t\tif (unlikely(priv->rx_timestamp_hw)) {\n+\t\t\t/* The first 8 bytes are the timestamp */\n+\t\t\tu64 hwts = *(u64 *)skb->data;\n+\t\t\tu64 ns;\n+\t\t\tstruct skb_shared_hwtstamps *shts;\n+\n+\t\t\tns = timecounter_cyc2time(&priv->tc, hwts);\n+\t\t\tshts = skb_hwtstamps(skb);\n+\t\t\tmemset(shts, 0, sizeof(*shts));\n+\t\t\tshts->hwtstamp = ns_to_ktime(ns);\n+\t\t\t__skb_pull(skb, 8);\n+\t\t}\n+\n+\t\tskb->protocol = eth_type_trans(skb, priv->netdev);\n+\t\tskb->dev = priv->netdev;\n+\t\tif (priv->netdev->features & NETIF_F_RXCSUM) {\n+\t\t\tif ((work->word2.lc_hdr_type == PKI_LTYPE_IP4 ||\n+\t\t\t work->word2.lc_hdr_type == PKI_LTYPE_IP6) &&\n+\t\t\t (work->word2.lf_hdr_type == PKI_LTYPE_TCP ||\n+\t\t\t work->word2.lf_hdr_type == PKI_LTYPE_UDP ||\n+\t\t\t work->word2.lf_hdr_type == PKI_LTYPE_SCTP))\n+\t\t\t\tif (work->word2.err_code == 0)\n+\t\t\t\t\tskb->ip_summed = CHECKSUM_UNNECESSARY;\n+\t\t}\n+\n+\t\tnapi_gro_receive(&rx->napi, skb);\n+\t} else {\n+\t\t/* Drop any packet received for a device that isn't up */\n+\t\tatomic64_inc(&priv->rx_dropped);\n+\t\tdev_kfree_skb_any(skb);\n+\t}\n+out:\n+\treturn ret;\n+}\n+\n+static int octeon3_eth_napi(struct napi_struct *napi, int budget)\n+{\n+\tint rx_count = 0;\n+\tstruct octeon3_rx *cxt;\n+\tstruct octeon3_ethernet *priv;\n+\tu64 aq_cnt;\n+\tint n = 0;\n+\tint n_bufs = 0;\n+\tu64 old_scratch;\n+\n+\tcxt = container_of(napi, struct octeon3_rx, napi);\n+\tpriv = cxt->parent;\n+\n+\t/* Get the amount of work pending */\n+\taq_cnt = oct_csr_read(SSO_GRP_AQ_CNT(priv->node, cxt->rx_grp));\n+\taq_cnt &= GENMASK_ULL(32, 0);\n+\n+\tif (likely(USE_ASYNC_IOBDMA)) {\n+\t\t/* Save scratch in case userspace is using it */\n+\t\tCVMX_SYNCIOBDMA;\n+\t\told_scratch = scratch_read64(SCR_SCRATCH);\n+\n+\t\tocteon3_core_get_work_async(cxt->rx_grp);\n+\t}\n+\n+\twhile (rx_count < budget) {\n+\t\tn = 0;\n+\n+\t\tif (likely(USE_ASYNC_IOBDMA)) {\n+\t\t\tbool req_next = rx_count < (budget - 1) ? true : false;\n+\n+\t\t\tn = octeon3_eth_rx_one(cxt, true, req_next);\n+\t\t} else {\n+\t\t\tn = octeon3_eth_rx_one(cxt, false, false);\n+\t\t}\n+\n+\t\tif (n == 0)\n+\t\t\tbreak;\n+\n+\t\tn_bufs += n;\n+\t\trx_count++;\n+\t}\n+\n+\t/* Wake up worker threads */\n+\tn_bufs = atomic64_add_return(n_bufs, &priv->buffers_needed);\n+\tif (n_bufs >= 32) {\n+\t\tstruct octeon3_ethernet_node *oen;\n+\n+\t\toen = octeon3_eth_node + priv->node;\n+\t\tatomic_set(&oen->workers[0].kick, 1);\n+\t\twake_up(&oen->workers[0].queue);\n+\t}\n+\n+\t/* Stop the thread when no work is pending */\n+\tif (rx_count < budget) {\n+\t\tnapi_complete(napi);\n+\t\tocteon3_sso_irq_set(cxt->parent->node, cxt->rx_grp, true);\n+\t}\n+\n+\tif (likely(USE_ASYNC_IOBDMA)) {\n+\t\t/* Restore the scratch area */\n+\t\tscratch_write64(SCR_SCRATCH, old_scratch);\n+\t}\n+\n+\treturn rx_count;\n+}\n+\n+#undef BROKEN_SIMULATOR_CSUM\n+\n+static void ethtool_get_drvinfo(struct net_device *netdev,\n+\t\t\t\tstruct ethtool_drvinfo *info)\n+{\n+\tstrcpy(info->driver, \"octeon3-ethernet\");\n+\tstrcpy(info->version, \"1.0\");\n+\tstrcpy(info->bus_info, \"Builtin\");\n+}\n+\n+static int ethtool_get_ts_info(struct net_device *ndev,\n+\t\t\t struct ethtool_ts_info *info)\n+{\n+\tstruct octeon3_ethernet *priv = netdev_priv(ndev);\n+\n+\tinfo->so_timestamping =\n+\t\tSOF_TIMESTAMPING_TX_HARDWARE |\n+\t\tSOF_TIMESTAMPING_RX_HARDWARE |\n+\t\tSOF_TIMESTAMPING_RAW_HARDWARE;\n+\n+\tif (priv->ptp_clock)\n+\t\tinfo->phc_index = ptp_clock_index(priv->ptp_clock);\n+\telse\n+\t\tinfo->phc_index = -1;\n+\n+\tinfo->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);\n+\n+\tinfo->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_ALL);\n+\n+\treturn 0;\n+}\n+\n+static const struct ethtool_ops octeon3_ethtool_ops = {\n+\t.get_drvinfo = ethtool_get_drvinfo,\n+\t.get_link_ksettings = bgx_port_ethtool_get_link_ksettings,\n+\t.set_settings = bgx_port_ethtool_set_settings,\n+\t.nway_reset = bgx_port_ethtool_nway_reset,\n+\t.get_link = ethtool_op_get_link,\n+\t.get_ts_info = ethtool_get_ts_info,\n+};\n+\n+static int octeon3_eth_ndo_change_mtu(struct net_device *netdev, int new_mtu)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {\n+\t\tint fifo_size;\n+\t\tint max_mtu = 1500;\n+\t\tstruct octeon3_ethernet *priv = netdev_priv(netdev);\n+\n+\t\t/* On 78XX-Pass1 the mtu must be limited. The PKO may\n+\t\t * to lock up when calculating the L4 checksum for\n+\t\t * large packets. How large the packets can be depends\n+\t\t * on the amount of pko fifo assigned to the port.\n+\t\t *\n+\t\t * FIFO size Max frame size\n+\t\t *\t2.5 KB\t\t\t\t1920\n+\t\t *\t5.0 KB\t\t\t\t4480\n+\t\t * 10.0 KB\t\t\t\t9600\n+\t\t *\n+\t\t * The maximum mtu is set to the largest frame size minus the\n+\t\t * l2 header.\n+\t\t */\n+\t\tfifo_size = octeon3_pko_get_fifo_size(priv->node, priv->interface,\n+\t\t\t\t\t\t priv->index, priv->mac_type);\n+\n+\t\tswitch (fifo_size) {\n+\t\tcase 2560:\n+\t\t\tmax_mtu = 1920 - ETH_HLEN - ETH_FCS_LEN - (2 * VLAN_HLEN);\n+\t\t\tbreak;\n+\n+\t\tcase 5120:\n+\t\t\tmax_mtu = 4480 - ETH_HLEN - ETH_FCS_LEN - (2 * VLAN_HLEN);\n+\t\t\tbreak;\n+\n+\t\tcase 10240:\n+\t\t\tmax_mtu = 9600 - ETH_HLEN - ETH_FCS_LEN - (2 * VLAN_HLEN);\n+\t\t\tbreak;\n+\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (new_mtu > max_mtu) {\n+\t\t\tnetdev_warn(netdev,\n+\t\t\t\t \"Maximum MTU supported is %d\", max_mtu);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\treturn bgx_port_change_mtu(netdev, new_mtu);\n+}\n+\n+static int octeon3_eth_common_ndo_init(struct net_device *netdev, int extra_skip)\n+{\n+\tstruct octeon3_ethernet *priv = netdev_priv(netdev);\n+\tstruct octeon3_ethernet_node *oen = octeon3_eth_node + priv->node;\n+\tint pki_chan, dq;\n+\tint base_rx_grp[MAX_RX_QUEUES];\n+\tint r, i;\n+\tint aura;\n+\n+\tnetif_carrier_off(netdev);\n+\n+\tnetdev->features |=\n+#ifndef BROKEN_SIMULATOR_CSUM\n+\t\tNETIF_F_IP_CSUM |\n+\t\tNETIF_F_IPV6_CSUM |\n+#endif\n+\t\tNETIF_F_SG |\n+\t\tNETIF_F_FRAGLIST |\n+\t\tNETIF_F_RXCSUM |\n+\t\tNETIF_F_LLTX;\n+\n+\tif (!OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))\n+\t\tnetdev->features |= NETIF_F_SCTP_CRC;\n+\n+\tnetdev->features |= NETIF_F_TSO | NETIF_F_TSO6;\n+\n+\t/* Set user changeable settings */\n+\tnetdev->hw_features = netdev->features;\n+\n+\tpriv->rx_buf_count = num_packet_buffers;\n+\n+\tpki_chan = get_pki_chan(priv->node, priv->interface, priv->index);\n+\n+\tdq = octeon3_pko_interface_init(priv->node, priv->interface,\n+\t\t\t\t\tpriv->index, priv->mac_type, pki_chan);\n+\tif (dq < 0) {\n+\t\tdev_err(netdev->dev.parent, \"Failed to initialize pko\\n\");\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tr = octeon3_pko_activate_dq(priv->node, dq, 1);\n+\tif (r < 0) {\n+\t\tdev_err(netdev->dev.parent, \"Failed to activate dq\\n\");\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tpriv->pko_queue = dq;\n+\tocteon_fpa3_aura_init(priv->node, oen->pki_packet_pool, -1, &aura,\n+\t\t\t num_packet_buffers, num_packet_buffers * 2);\n+\tpriv->pki_aura = aura;\n+\taura2bufs_needed[priv->node][priv->pki_aura] = &priv->buffers_needed;\n+\n+\tr = octeon3_sso_alloc_grp_range(priv->node, -1, rx_queues, false, base_rx_grp);\n+\tif (r) {\n+\t\tdev_err(netdev->dev.parent, \"Failed to allocated SSO group\\n\");\n+\t\treturn -ENODEV;\n+\t}\n+\tfor (i = 0; i < rx_queues; i++) {\n+\t\tpriv->rx_cxt[i].rx_grp = base_rx_grp[i];\n+\t\tpriv->rx_cxt[i].parent = priv;\n+\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))\n+\t\t\tocteon3_sso_pass1_limit(priv->node, priv->rx_cxt[i].rx_grp);\n+\t}\n+\tpriv->num_rx_cxt = rx_queues;\n+\n+\tpriv->tx_complete_grp = oen->tx_complete_grp;\n+\tdev_info(netdev->dev.parent,\n+\t\t \"rx sso grp:%d..%d aura:%d pknd:%d pko_queue:%d\\n\",\n+\t\t *base_rx_grp, *(base_rx_grp + priv->num_rx_cxt - 1),\n+\t\t priv->pki_aura, priv->pknd, priv->pko_queue);\n+\n+\tocteon3_pki_port_init(priv->node, priv->pki_aura, *base_rx_grp,\n+\t\t\t extra_skip, (packet_buffer_size - 128),\n+\t\t\t priv->pknd, priv->num_rx_cxt);\n+\n+\tpriv->last_packets = 0;\n+\tpriv->last_octets = 0;\n+\tpriv->last_dropped = 0;\n+\n+\t/* Register ethtool methods */\n+\tnetdev->ethtool_ops = &octeon3_ethtool_ops;\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_eth_bgx_ndo_init(struct net_device *netdev)\n+{\n+\tstruct octeon3_ethernet\t*priv = netdev_priv(netdev);\n+\tconst u8\t\t*mac;\n+\tint\t\t\tr;\n+\n+\tpriv->pknd = bgx_port_get_pknd(priv->node, priv->interface, priv->index);\n+\tocteon3_eth_common_ndo_init(netdev, 0);\n+\n+\t/* Padding and FCS are done in BGX */\n+\tr = octeon3_pko_set_mac_options(priv->node, priv->interface, priv->index,\n+\t\t\t\t\tpriv->mac_type, false, false, 0);\n+\tif (r)\n+\t\treturn r;\n+\n+\tmac = bgx_port_get_mac(netdev);\n+\tif (mac && is_valid_ether_addr(mac)) {\n+\t\tmemcpy(netdev->dev_addr, mac, ETH_ALEN);\n+\t\tnetdev->addr_assign_type &= ~NET_ADDR_RANDOM;\n+\t} else {\n+\t\teth_hw_addr_random(netdev);\n+\t}\n+\n+\tbgx_port_set_rx_filtering(netdev);\n+\tocteon3_eth_ndo_change_mtu(netdev, netdev->mtu);\n+\n+\treturn 0;\n+}\n+\n+static void octeon3_eth_ndo_uninit(struct net_device *netdev)\n+{\n+\tstruct octeon3_ethernet\t*priv = netdev_priv(netdev);\n+\tint\t\t\tgrp[MAX_RX_QUEUES];\n+\tint\t\t\ti;\n+\n+\t/* Shutdwon pki for this interface */\n+\tocteon3_pki_port_shutdown(priv->node, priv->pknd);\n+\tocteon_fpa3_release_aura(priv->node, priv->pki_aura);\n+\taura2bufs_needed[priv->node][priv->pki_aura] = NULL;\n+\n+\t/* Shutdown pko for this interface */\n+\tocteon3_pko_interface_uninit(priv->node, &priv->pko_queue, 1);\n+\n+\t/* Free the receive contexts sso groups */\n+\tfor (i = 0; i < rx_queues; i++)\n+\t\tgrp[i] = priv->rx_cxt[i].rx_grp;\n+\tocteon3_sso_free_grp_range(priv->node, grp, rx_queues);\n+}\n+\n+static irqreturn_t octeon3_eth_rx_handler(int irq, void *info)\n+{\n+\tstruct octeon3_rx *rx = info;\n+\n+\t/* Disarm the irq. */\n+\tocteon3_sso_irq_set(rx->parent->node, rx->rx_grp, false);\n+\n+\tnapi_schedule(&rx->napi);\n+\treturn IRQ_HANDLED;\n+}\n+\n+static int octeon3_eth_common_ndo_open(struct net_device *netdev)\n+{\n+\tstruct octeon3_ethernet *priv = netdev_priv(netdev);\n+\tstruct octeon3_rx *rx;\n+\tint i;\n+\tint r;\n+\n+\tfor (i = 0; i < priv->num_rx_cxt; i++) {\n+\t\tunsigned int\tsso_intsn;\n+\n+\t\trx = priv->rx_cxt + i;\n+\t\tsso_intsn = SSO_INTSN_EXE << 12 | rx->rx_grp;\n+\n+\t\trx->rx_irq = irq_create_mapping(NULL, sso_intsn);\n+\t\tif (!rx->rx_irq) {\n+\t\t\tnetdev_err(netdev,\n+\t\t\t\t \"ERROR: Couldn't map hwirq: %x\\n\", sso_intsn);\n+\t\t\tr = -EINVAL;\n+\t\t\tgoto err1;\n+\t\t}\n+\t\tr = request_irq(rx->rx_irq, octeon3_eth_rx_handler,\n+\t\t\t\tIRQ_TYPE_EDGE_RISING, netdev_name(netdev), rx);\n+\t\tif (r) {\n+\t\t\tnetdev_err(netdev, \"ERROR: Couldn't request irq: %d\\n\",\n+\t\t\t\t rx->rx_irq);\n+\t\t\tr = -ENOMEM;\n+\t\t\tgoto err2;\n+\t\t}\n+\n+\t\tocteon3_eth_gen_affinity(priv->node, &rx->rx_affinity_hint);\n+\t\tirq_set_affinity_hint(rx->rx_irq, &rx->rx_affinity_hint);\n+\n+\t\tnetif_napi_add(priv->netdev, &rx->napi,\n+\t\t\t octeon3_eth_napi, NAPI_POLL_WEIGHT);\n+\t\tnapi_enable(&rx->napi);\n+\n+\t\t/* Arm the irq. */\n+\t\tocteon3_sso_irq_set(priv->node, rx->rx_grp, true);\n+\t}\n+\tocteon3_eth_replenish_rx(priv, priv->rx_buf_count);\n+\n+\treturn 0;\n+\n+err2:\n+\tirq_dispose_mapping(rx->rx_irq);\n+err1:\n+\tfor (i--; i >= 0; i--) {\n+\t\trx = priv->rx_cxt + i;\n+\t\tfree_irq(rx->rx_irq, rx);\n+\t\tirq_dispose_mapping(rx->rx_irq);\n+\t\tnapi_disable(&rx->napi);\n+\t\tnetif_napi_del(&rx->napi);\n+\t}\n+\n+\treturn r;\n+}\n+\n+static int octeon3_eth_bgx_ndo_open(struct net_device *netdev)\n+{\n+\tint\trc;\n+\n+\trc = octeon3_eth_common_ndo_open(netdev);\n+\tif (rc == 0)\n+\t\trc = bgx_port_enable(netdev);\n+\n+\treturn rc;\n+}\n+\n+static int octeon3_eth_common_ndo_stop(struct net_device *netdev)\n+{\n+\tstruct octeon3_ethernet *priv = netdev_priv(netdev);\n+\tvoid **w;\n+\tstruct sk_buff *skb;\n+\tstruct octeon3_rx *rx;\n+\tint i;\n+\n+\t/* Allow enough time for ingress in transit packets to be drained */\n+\tmsleep(20);\n+\n+\t/* Wait until sso has no more work for this interface */\n+\tfor (i = 0; i < priv->num_rx_cxt; i++) {\n+\t\trx = priv->rx_cxt + i;\n+\t\twhile (oct_csr_read(SSO_GRP_AQ_CNT(priv->node, rx->rx_grp)))\n+\t\t\tmsleep(20);\n+\t}\n+\n+\t/* Free the irq and napi context for each rx context */\n+\tfor (i = 0; i < priv->num_rx_cxt; i++) {\n+\t\trx = priv->rx_cxt + i;\n+\t\tocteon3_sso_irq_set(priv->node, rx->rx_grp, false);\n+\t\tirq_set_affinity_hint(rx->rx_irq, NULL);\n+\t\tfree_irq(rx->rx_irq, rx);\n+\t\tirq_dispose_mapping(rx->rx_irq);\n+\t\trx->rx_irq = 0;\n+\t\tnapi_disable(&rx->napi);\n+\t\tnetif_napi_del(&rx->napi);\n+\t}\n+\n+\t/* Free the packet buffers */\n+\tfor (;;) {\n+\t\tw = octeon_fpa3_alloc(priv->node, priv->pki_aura);\n+\t\tif (!w)\n+\t\t\tbreak;\n+\t\tskb = w[0];\n+\t\tdev_kfree_skb(skb);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_eth_bgx_ndo_stop(struct net_device *netdev)\n+{\n+\tint r;\n+\n+\tr = bgx_port_disable(netdev);\n+\tif (r)\n+\t\treturn r;\n+\n+\treturn octeon3_eth_common_ndo_stop(netdev);\n+}\n+\n+static inline u64 build_pko_send_hdr_desc(struct sk_buff *skb)\n+{\n+\tu64\tsend_hdr = 0;\n+\tu8\tl4_hdr = 0;\n+\tu64\tchecksum_alg;\n+\n+\t/* See PKO_SEND_HDR_S in the HRM for the send header descriptor\n+\t * format.\n+\t */\n+#ifdef __LITTLE_ENDIAN\n+\tsend_hdr |= BIT(43);\n+#endif\n+\n+\tif (!OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {\n+\t\t/* Don't allocate to L2 */\n+\t\tsend_hdr |= BIT(42);\n+\t}\n+\n+\t/* Don't automatically free to FPA */\n+\tsend_hdr |= BIT(40);\n+\n+\tsend_hdr |= skb->len;\n+\n+\tif (skb->ip_summed != CHECKSUM_NONE &&\n+\t skb->ip_summed != CHECKSUM_UNNECESSARY) {\n+#ifndef BROKEN_SIMULATOR_CSUM\n+\t\tswitch (skb->protocol) {\n+\t\tcase htons(ETH_P_IP):\n+\t\t\tsend_hdr |= ETH_HLEN << 16;\n+\t\t\tsend_hdr |= BIT(45);\n+\t\t\tl4_hdr = ip_hdr(skb)->protocol;\n+\t\t\tsend_hdr |= (ETH_HLEN + (4 * ip_hdr(skb)->ihl)) << 24;\n+\t\t\tbreak;\n+\n+\t\tcase htons(ETH_P_IPV6):\n+\t\t\tl4_hdr = ipv6_hdr(skb)->nexthdr;\n+\t\t\tsend_hdr |= ETH_HLEN << 16;\n+\t\t\tbreak;\n+\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+#endif\n+\n+\t\tchecksum_alg = 1; /* UDP == 1 */\n+\t\tswitch (l4_hdr) {\n+\t\tcase IPPROTO_SCTP:\n+\t\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))\n+\t\t\t\tbreak;\n+\t\t\tchecksum_alg++; /* SCTP == 3 */\n+\t\t\t/* Fall through */\n+\t\tcase IPPROTO_TCP: /* TCP == 2 */\n+\t\t\tchecksum_alg++;\n+\t\t\t/* Fall through */\n+\t\tcase IPPROTO_UDP:\n+\t\t\tif (skb_transport_header_was_set(skb)) {\n+\t\t\t\tint l4ptr = skb_transport_header(skb) -\n+\t\t\t\t\tskb->data;\n+\t\t\t\tsend_hdr &= ~GENMASK_ULL(31, 24);\n+\t\t\t\tsend_hdr |= l4ptr << 24;\n+\t\t\t\tsend_hdr |= checksum_alg << 46;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn send_hdr;\n+}\n+\n+static inline u64 build_pko_send_ext_desc(struct sk_buff *skb)\n+{\n+\tu64\tsend_ext = 0;\n+\n+\t/* See PKO_SEND_EXT_S in the HRM for the send extended descriptor\n+\t * format.\n+\t */\n+\tskb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;\n+\tsend_ext |= (u64)PKO_SENDSUBDC_EXT << 44;\n+\tsend_ext |= 1ull << 40;\n+\tsend_ext |= BIT(39);\n+\tsend_ext |= ETH_HLEN << 16;\n+\n+\treturn send_ext;\n+}\n+\n+static inline u64 build_pko_send_tso(struct sk_buff *skb, uint mtu)\n+{\n+\tu64\tsend_tso = 0;\n+\n+\t/* See PKO_SEND_TSO_S in the HRM for the send tso descriptor format */\n+\tsend_tso |= 12ull << 56;\n+\tsend_tso |= (u64)PKO_SENDSUBDC_TSO << 44;\n+\tsend_tso |= (skb_transport_offset(skb) + tcp_hdrlen(skb)) << 24;\n+\tsend_tso |= (mtu + ETH_HLEN) << 8;\n+\n+\treturn send_tso;\n+}\n+\n+static inline u64 build_pko_send_mem_sub(u64 addr)\n+{\n+\tu64\tsend_mem = 0;\n+\n+\t/* See PKO_SEND_MEM_S in the HRM for the send mem descriptor format */\n+\tsend_mem |= (u64)PKO_SENDSUBDC_MEM << 44;\n+\tsend_mem |= (u64)MEMDSZ_B64 << 60;\n+\tsend_mem |= (u64)MEMALG_SUB << 56;\n+\tsend_mem |= 1ull << 48;\n+\tsend_mem |= addr;\n+\n+\treturn send_mem;\n+}\n+\n+static inline u64 build_pko_send_mem_ts(u64 addr)\n+{\n+\tu64\tsend_mem = 0;\n+\n+\t/* See PKO_SEND_MEM_S in the HRM for the send mem descriptor format */\n+\tsend_mem |= 1ull << 62;\n+\tsend_mem |= (u64)PKO_SENDSUBDC_MEM << 44;\n+\tsend_mem |= (u64)MEMDSZ_B64 << 60;\n+\tsend_mem |= (u64)MEMALG_SETTSTMP << 56;\n+\tsend_mem |= addr;\n+\n+\treturn send_mem;\n+}\n+\n+static inline u64 build_pko_send_free(u64 addr)\n+{\n+\tu64\tsend_free = 0;\n+\n+\t/* See PKO_SEND_FREE_S in the HRM for the send free descriptor format */\n+\tsend_free |= (u64)PKO_SENDSUBDC_FREE << 44;\n+\tsend_free |= addr;\n+\n+\treturn send_free;\n+}\n+\n+static inline u64 build_pko_send_work(int grp, u64 addr)\n+{\n+\tu64\tsend_work = 0;\n+\n+\t/* See PKO_SEND_WORK_S in the HRM for the send work descriptor format */\n+\tsend_work |= (u64)PKO_SENDSUBDC_WORK << 44;\n+\tsend_work |= (u64)grp << 52;\n+\tsend_work |= 2ull << 50;\n+\tsend_work |= addr;\n+\n+\treturn send_work;\n+}\n+\n+static int octeon3_eth_ndo_start_xmit(struct sk_buff *skb,\n+\t\t\t\t struct net_device *netdev)\n+{\n+\tstruct sk_buff *skb_tmp;\n+\tstruct octeon3_ethernet *priv = netdev_priv(netdev);\n+\tu64 scr_off = LMTDMA_SCR_OFFSET;\n+\tu64 pko_send_desc;\n+\tu64 lmtdma_data;\n+\tu64 aq_cnt = 0;\n+\tstruct octeon3_ethernet_node *oen;\n+\tlong backlog;\n+\tint frag_count;\n+\tu64 head_len;\n+\tint i;\n+\tu64 *dma_addr;\n+\tvoid **work;\n+\tunsigned int mss;\n+\tint grp;\n+\n+\tfrag_count = 0;\n+\tif (skb_has_frag_list(skb))\n+\t\tskb_walk_frags(skb, skb_tmp)\n+\t\t\tfrag_count++;\n+\n+\t/* Stop the queue if pko or sso are not keeping up */\n+\toen = octeon3_eth_node + priv->node;\n+\taq_cnt = oct_csr_read(SSO_GRP_AQ_CNT(oen->node, oen->tx_complete_grp));\n+\taq_cnt &= GENMASK_ULL(32, 0);\n+\tbacklog = atomic64_inc_return(&priv->tx_backlog);\n+\tif (unlikely(backlog > MAX_TX_QUEUE_DEPTH || aq_cnt > 100000))\n+\t\tnetif_stop_queue(netdev);\n+\n+\t/* We have space for 11 segment pointers, If there will be\n+\t * more than that, we must linearize. The count is: 1 (base\n+\t * SKB) + frag_count + nr_frags.\n+\t */\n+\tif (unlikely(skb_shinfo(skb)->nr_frags + frag_count > 10)) {\n+\t\tif (unlikely(__skb_linearize(skb)))\n+\t\t\tgoto skip_xmit;\n+\t\tfrag_count = 0;\n+\t}\n+\n+\twork = (void **)skb->cb;\n+\twork[0] = netdev;\n+\twork[1] = NULL;\n+\n+\t/* Adjust the port statistics. */\n+\tatomic64_inc(&priv->tx_packets);\n+\tatomic64_add(skb->len, &priv->tx_octets);\n+\n+\t/* Make sure packet data writes are committed before\n+\t * submitting the command below\n+\t */\n+\twmb();\n+\n+\t/* Build the pko command */\n+\tpko_send_desc = build_pko_send_hdr_desc(skb);\n+\tpreempt_disable();\n+\tscratch_write64(scr_off, pko_send_desc);\n+\tscr_off += sizeof(pko_send_desc);\n+\n+\t/* Request packet to be ptp timestamped */\n+\tif ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&\n+\t unlikely(priv->tx_timestamp_hw)) {\n+\t\tpko_send_desc = build_pko_send_ext_desc(skb);\n+\t\tscratch_write64(scr_off, pko_send_desc);\n+\t\tscr_off += sizeof(pko_send_desc);\n+\t}\n+\n+\t/* Add the tso descriptor if needed */\n+\tmss = skb_shinfo(skb)->gso_size;\n+\tif (unlikely(mss)) {\n+\t\tpko_send_desc = build_pko_send_tso(skb, netdev->mtu);\n+\t\tscratch_write64(scr_off, pko_send_desc);\n+\t\tscr_off += sizeof(pko_send_desc);\n+\t}\n+\n+\t/* Add a gather descriptor for each segment. See PKO_SEND_GATHER_S for\n+\t * the send gather descriptor format.\n+\t */\n+\tpko_send_desc = 0;\n+\tpko_send_desc |= (u64)PKO_SENDSUBDC_GATHER << 45;\n+\thead_len = skb_headlen(skb);\n+\tif (head_len > 0) {\n+\t\tpko_send_desc |= head_len << 48;\n+\t\tpko_send_desc |= virt_to_phys(skb->data);\n+\t\tscratch_write64(scr_off, pko_send_desc);\n+\t\tscr_off += sizeof(pko_send_desc);\n+\t}\n+\tfor (i = 1; i <= skb_shinfo(skb)->nr_frags; i++) {\n+\t\tstruct skb_frag_struct *fs = skb_shinfo(skb)->frags + i - 1;\n+\n+\t\tpko_send_desc &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(41, 0));\n+\t\tpko_send_desc |= (u64)fs->size << 48;\n+\t\tpko_send_desc |= virt_to_phys((u8 *)page_address(fs->page.p) + fs->page_offset);\n+\t\tscratch_write64(scr_off, pko_send_desc);\n+\t\tscr_off += sizeof(pko_send_desc);\n+\t}\n+\tskb_walk_frags(skb, skb_tmp) {\n+\t\tpko_send_desc &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(41, 0));\n+\t\tpko_send_desc |= (u64)skb_tmp->len << 48;\n+\t\tpko_send_desc |= virt_to_phys(skb_tmp->data);\n+\t\tscratch_write64(scr_off, pko_send_desc);\n+\t\tscr_off += sizeof(pko_send_desc);\n+\t}\n+\n+\t/* Subtract 1 from the tx_backlog. */\n+\tpko_send_desc = build_pko_send_mem_sub(virt_to_phys(&priv->tx_backlog));\n+\tscratch_write64(scr_off, pko_send_desc);\n+\tscr_off += sizeof(pko_send_desc);\n+\n+\t/* Write the ptp timestamp in the skb itself */\n+\tif ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&\n+\t unlikely(priv->tx_timestamp_hw)) {\n+\t\tpko_send_desc = build_pko_send_mem_ts(virt_to_phys(&work[1]));\n+\t\tscratch_write64(scr_off, pko_send_desc);\n+\t\tscr_off += sizeof(pko_send_desc);\n+\t}\n+\n+\t/* Send work when finished with the packet. */\n+\tgrp = octeon3_eth_lgrp_to_ggrp(priv->node, priv->tx_complete_grp);\n+\tpko_send_desc = build_pko_send_work(grp, virt_to_phys(work));\n+\tscratch_write64(scr_off, pko_send_desc);\n+\tscr_off += sizeof(pko_send_desc);\n+\n+\t/* See PKO_SEND_DMA_S in the HRM for the lmtdam data format */\n+\tlmtdma_data = 0;\n+\tlmtdma_data |= (u64)(LMTDMA_SCR_OFFSET >> 3) << 56;\n+\tif (wait_pko_response)\n+\t\tlmtdma_data |= 1ull << 48;\n+\tlmtdma_data |= 0x51ull << 40;\n+\tlmtdma_data |= (u64)priv->node << 36;\n+\tlmtdma_data |= priv->pko_queue << 16;\n+\n+\tdma_addr = (u64 *)(LMTDMA_ORDERED_IO_ADDR | ((scr_off & 0x78) - 8));\n+\t*dma_addr = lmtdma_data;\n+\n+\tpreempt_enable();\n+\n+\tif (wait_pko_response) {\n+\t\tu64\tquery_rtn;\n+\n+\t\tCVMX_SYNCIOBDMA;\n+\n+\t\t/* See PKO_QUERY_RTN_S in the HRM for the return format */\n+\t\tquery_rtn = scratch_read64(LMTDMA_SCR_OFFSET);\n+\t\tquery_rtn >>= 60;\n+\t\tif (unlikely(query_rtn != PKO_DQSTATUS_PASS)) {\n+\t\t\tnetdev_err(netdev, \"PKO enqueue failed %llx\\n\",\n+\t\t\t\t (unsigned long long)query_rtn);\n+\t\t\tdev_kfree_skb_any(skb);\n+\t\t}\n+\t}\n+\n+\treturn NETDEV_TX_OK;\n+skip_xmit:\n+\tatomic64_inc(&priv->tx_dropped);\n+\tdev_kfree_skb_any(skb);\n+\treturn NETDEV_TX_OK;\n+}\n+\n+static void octeon3_eth_ndo_get_stats64(struct net_device *netdev,\n+\t\t\t\t\tstruct rtnl_link_stats64 *s)\n+{\n+\tstruct octeon3_ethernet *priv = netdev_priv(netdev);\n+\tu64 packets, octets, dropped;\n+\tu64 delta_packets, delta_octets, delta_dropped;\n+\n+\tspin_lock(&priv->stat_lock);\n+\n+\tocteon3_pki_get_stats(priv->node, priv->pknd, &packets, &octets, &dropped);\n+\n+\tdelta_packets = (packets - priv->last_packets) & ((1ull << 48) - 1);\n+\tdelta_octets = (octets - priv->last_octets) & ((1ull << 48) - 1);\n+\tdelta_dropped = (dropped - priv->last_dropped) & ((1ull << 48) - 1);\n+\n+\tpriv->last_packets = packets;\n+\tpriv->last_octets = octets;\n+\tpriv->last_dropped = dropped;\n+\n+\tspin_unlock(&priv->stat_lock);\n+\n+\tatomic64_add(delta_packets, &priv->rx_packets);\n+\tatomic64_add(delta_octets, &priv->rx_octets);\n+\tatomic64_add(delta_dropped, &priv->rx_dropped);\n+\n+\ts->rx_packets = atomic64_read(&priv->rx_packets);\n+\ts->rx_bytes = atomic64_read(&priv->rx_octets);\n+\ts->rx_dropped = atomic64_read(&priv->rx_dropped);\n+\ts->rx_errors = atomic64_read(&priv->rx_errors);\n+\ts->rx_length_errors = atomic64_read(&priv->rx_length_errors);\n+\ts->rx_crc_errors = atomic64_read(&priv->rx_crc_errors);\n+\n+\ts->tx_packets = atomic64_read(&priv->tx_packets);\n+\ts->tx_bytes = atomic64_read(&priv->tx_octets);\n+\ts->tx_dropped = atomic64_read(&priv->tx_dropped);\n+}\n+\n+static int octeon3_eth_set_mac_address(struct net_device *netdev, void *addr)\n+{\n+\tint r = eth_mac_addr(netdev, addr);\n+\n+\tif (r)\n+\t\treturn r;\n+\n+\tbgx_port_set_rx_filtering(netdev);\n+\n+\treturn 0;\n+}\n+\n+static u64 octeon3_cyclecounter_read(const struct cyclecounter *cc)\n+{\n+\tstruct octeon3_ethernet\t*priv;\n+\tu64\t\t\tcount;\n+\n+\tpriv = container_of(cc, struct octeon3_ethernet, cc);\n+\tcount = oct_csr_read(MIO_PTP_CLOCK_HI(priv->node));\n+\treturn count;\n+}\n+\n+static int octeon3_bgx_hwtstamp(struct net_device *netdev, int en)\n+{\n+\tstruct octeon3_ethernet\t\t*priv = netdev_priv(netdev);\n+\tu64\t\t\t\tdata;\n+\n+\tswitch (bgx_port_get_mode(priv->node, priv->interface, priv->index)) {\n+\tcase PORT_MODE_RGMII:\n+\tcase PORT_MODE_SGMII:\n+\t\tdata = oct_csr_read(BGX_GMP_GMI_RX_FRM_CTL(priv->node, priv->interface, priv->index));\n+\t\tif (en)\n+\t\t\tdata |= BIT(12);\n+\t\telse\n+\t\t\tdata &= ~BIT(12);\n+\t\toct_csr_write(data, BGX_GMP_GMI_RX_FRM_CTL(priv->node, priv->interface, priv->index));\n+\t\tbreak;\n+\n+\tcase PORT_MODE_XAUI:\n+\tcase PORT_MODE_RXAUI:\n+\tcase PORT_MODE_10G_KR:\n+\tcase PORT_MODE_XLAUI:\n+\tcase PORT_MODE_40G_KR4:\n+\tcase PORT_MODE_XFI:\n+\t\tdata = oct_csr_read(BGX_SMU_RX_FRM_CTL(priv->node, priv->interface, priv->index));\n+\t\tif (en)\n+\t\t\tdata |= BIT(12);\n+\t\telse\n+\t\t\tdata &= ~BIT(12);\n+\t\toct_csr_write(data, BGX_SMU_RX_FRM_CTL(priv->node, priv->interface, priv->index));\n+\t\tbreak;\n+\n+\tdefault:\n+\t\t/* No timestamp support*/\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_pki_hwtstamp(struct net_device *netdev, int en)\n+{\n+\tstruct octeon3_ethernet\t\t*priv = netdev_priv(netdev);\n+\tint\t\t\t\tskip = en ? 8 : 0;\n+\n+\tocteon3_pki_set_ptp_skip(priv->node, priv->pknd, skip);\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_ioctl_hwtstamp(struct net_device *netdev,\n+\t\t\t\t struct ifreq *rq, int cmd)\n+{\n+\tstruct octeon3_ethernet\t\t*priv = netdev_priv(netdev);\n+\tu64\t\t\t\tdata;\n+\tstruct hwtstamp_config\t\tconfig;\n+\tint\t\t\t\ten;\n+\n+\t/* The PTP block should be enabled */\n+\tdata = oct_csr_read(MIO_PTP_CLOCK_CFG(priv->node));\n+\tif (!(data & BIT(0))) {\n+\t\tnetdev_err(netdev, \"Error: PTP clock not enabled\\n\");\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\tif (copy_from_user(&config, rq->ifr_data, sizeof(config)))\n+\t\treturn -EFAULT;\n+\n+\tif (config.flags) /* reserved for future extensions */\n+\t\treturn -EINVAL;\n+\n+\tswitch (config.tx_type) {\n+\tcase HWTSTAMP_TX_OFF:\n+\t\tpriv->tx_timestamp_hw = 0;\n+\t\tbreak;\n+\tcase HWTSTAMP_TX_ON:\n+\t\tpriv->tx_timestamp_hw = 1;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -ERANGE;\n+\t}\n+\n+\tswitch (config.rx_filter) {\n+\tcase HWTSTAMP_FILTER_NONE:\n+\t\tpriv->rx_timestamp_hw = 0;\n+\t\ten = 0;\n+\t\tbreak;\n+\tcase HWTSTAMP_FILTER_ALL:\n+\tcase HWTSTAMP_FILTER_SOME:\n+\tcase HWTSTAMP_FILTER_PTP_V1_L4_EVENT:\n+\tcase HWTSTAMP_FILTER_PTP_V1_L4_SYNC:\n+\tcase HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:\n+\tcase HWTSTAMP_FILTER_PTP_V2_L4_EVENT:\n+\tcase HWTSTAMP_FILTER_PTP_V2_L4_SYNC:\n+\tcase HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:\n+\tcase HWTSTAMP_FILTER_PTP_V2_L2_EVENT:\n+\tcase HWTSTAMP_FILTER_PTP_V2_L2_SYNC:\n+\tcase HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:\n+\tcase HWTSTAMP_FILTER_PTP_V2_EVENT:\n+\tcase HWTSTAMP_FILTER_PTP_V2_SYNC:\n+\tcase HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:\n+\t\tpriv->rx_timestamp_hw = 1;\n+\t\ten = 1;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -ERANGE;\n+\t}\n+\n+\tocteon3_bgx_hwtstamp(netdev, en);\n+\tocteon3_pki_hwtstamp(netdev, en);\n+\n+\tpriv->cc.read = octeon3_cyclecounter_read;\n+\tpriv->cc.mask = CYCLECOUNTER_MASK(64);\n+\t/* Ptp counter is always in nsec */\n+\tpriv->cc.mult = 1;\n+\tpriv->cc.shift = 0;\n+\ttimecounter_init(&priv->tc, &priv->cc, ktime_to_ns(ktime_get_real()));\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_adjfreq(struct ptp_clock_info *ptp, s32 ppb)\n+{\n+\tstruct octeon3_ethernet\t*priv;\n+\tu64\t\t\tcomp;\n+\tu64\t\t\tdiff;\n+\tint\t\t\tneg_ppb = 0;\n+\n+\tpriv = container_of(ptp, struct octeon3_ethernet, ptp_info);\n+\n+\tif (ppb < 0) {\n+\t\tppb = -ppb;\n+\t\tneg_ppb = 1;\n+\t}\n+\n+\t/* The part per billion (ppb) is a delta from the base frequency */\n+\tcomp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();\n+\n+\tdiff = comp;\n+\tdiff *= ppb;\n+\tdiff = div_u64(diff, 1000000000ULL);\n+\n+\tcomp = neg_ppb ? comp - diff : comp + diff;\n+\n+\toct_csr_write(comp, MIO_PTP_CLOCK_COMP(priv->node));\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_adjtime(struct ptp_clock_info *ptp, s64 delta)\n+{\n+\tstruct octeon3_ethernet\t*priv;\n+\ts64\t\t\tnow;\n+\tunsigned long\t\tflags;\n+\n+\tpriv = container_of(ptp, struct octeon3_ethernet, ptp_info);\n+\n+\tspin_lock_irqsave(&priv->ptp_lock, flags);\n+\tnow = timecounter_read(&priv->tc);\n+\tnow += delta;\n+\ttimecounter_init(&priv->tc, &priv->cc, now);\n+\tspin_unlock_irqrestore(&priv->ptp_lock, flags);\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_gettime(struct ptp_clock_info *ptp, struct timespec *ts)\n+{\n+\tstruct octeon3_ethernet\t*priv;\n+\tu64\t\t\tns;\n+\tu32\t\t\tremainder;\n+\tunsigned long\t\tflags;\n+\n+\tpriv = container_of(ptp, struct octeon3_ethernet, ptp_info);\n+\n+\tspin_lock_irqsave(&priv->ptp_lock, flags);\n+\tns = timecounter_read(&priv->tc);\n+\tspin_unlock_irqrestore(&priv->ptp_lock, flags);\n+\tts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);\n+\tts->tv_nsec = remainder;\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_settime(struct ptp_clock_info *ptp,\n+\t\t\t const struct timespec *ts)\n+{\n+\tstruct octeon3_ethernet\t*priv;\n+\tu64\t\t\tns;\n+\tunsigned long\t\tflags;\n+\n+\tpriv = container_of(ptp, struct octeon3_ethernet, ptp_info);\n+\tns = timespec_to_ns(ts);\n+\n+\tspin_lock_irqsave(&priv->ptp_lock, flags);\n+\ttimecounter_init(&priv->tc, &priv->cc, ns);\n+\tspin_unlock_irqrestore(&priv->ptp_lock, flags);\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_enable(struct ptp_clock_info *ptp,\n+\t\t\t struct ptp_clock_request *rq, int on)\n+{\n+\treturn -EOPNOTSUPP;\n+}\n+\n+static int octeon3_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)\n+{\n+\tint rc;\n+\n+\tswitch (cmd) {\n+\tcase SIOCSHWTSTAMP:\n+\t\trc = octeon3_ioctl_hwtstamp(netdev, ifr, cmd);\n+\t\tbreak;\n+\n+\tdefault:\n+\t\trc = bgx_port_do_ioctl(netdev, ifr, cmd);\n+\t\tbreak;\n+\t}\n+\n+\treturn rc;\n+}\n+\n+static const struct net_device_ops octeon3_eth_netdev_ops = {\n+\t.ndo_init\t\t= octeon3_eth_bgx_ndo_init,\n+\t.ndo_uninit\t\t= octeon3_eth_ndo_uninit,\n+\t.ndo_open\t\t= octeon3_eth_bgx_ndo_open,\n+\t.ndo_stop\t\t= octeon3_eth_bgx_ndo_stop,\n+\t.ndo_start_xmit\t\t= octeon3_eth_ndo_start_xmit,\n+\t.ndo_get_stats64\t= octeon3_eth_ndo_get_stats64,\n+\t.ndo_set_rx_mode\t= bgx_port_set_rx_filtering,\n+\t.ndo_set_mac_address\t= octeon3_eth_set_mac_address,\n+\t.ndo_change_mtu\t\t= octeon3_eth_ndo_change_mtu,\n+\t.ndo_do_ioctl\t\t= octeon3_ioctl,\n+};\n+\n+static int octeon3_eth_probe(struct platform_device *pdev)\n+{\n+\tstruct octeon3_ethernet *priv;\n+\tstruct net_device *netdev;\n+\tint r;\n+\n+\tstruct mac_platform_data *pd = dev_get_platdata(&pdev->dev);\n+\n+\tr = octeon3_eth_global_init(pd->numa_node, pdev);\n+\tif (r)\n+\t\treturn r;\n+\n+\tdev_info(&pdev->dev, \"Probing %d-%d:%d\\n\",\n+\t\t pd->numa_node, pd->interface, pd->port);\n+\tnetdev = alloc_etherdev(sizeof(struct octeon3_ethernet));\n+\tif (!netdev) {\n+\t\tdev_err(&pdev->dev, \"Failed to allocated ethernet device\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tSET_NETDEV_DEV(netdev, &pdev->dev);\n+\tdev_set_drvdata(&pdev->dev, netdev);\n+\n+\tif (pd->mac_type == BGX_MAC)\n+\t\tbgx_port_set_netdev(pdev->dev.parent, netdev);\n+\tpriv = netdev_priv(netdev);\n+\tpriv->netdev = netdev;\n+\tpriv->mac_type = pd->mac_type;\n+\tINIT_LIST_HEAD(&priv->list);\n+\tpriv->node = pd->numa_node;\n+\n+\tmutex_lock(&octeon3_eth_node[priv->node].device_list_lock);\n+\tlist_add_tail_rcu(&priv->list, &octeon3_eth_node[priv->node].device_list);\n+\tmutex_unlock(&octeon3_eth_node[priv->node].device_list_lock);\n+\n+\tpriv->index = pd->port;\n+\tpriv->interface = pd->interface;\n+\tspin_lock_init(&priv->stat_lock);\n+\n+\tif (pd->src_type == XCV)\n+\t\tsnprintf(netdev->name, IFNAMSIZ, \"rgmii%d\", pd->port);\n+\n+\tif (priv->mac_type == BGX_MAC)\n+\t\tnetdev->netdev_ops = &octeon3_eth_netdev_ops;\n+\n+\tif (register_netdev(netdev) < 0) {\n+\t\tdev_err(&pdev->dev, \"Failed to register ethernet device\\n\");\n+\t\tlist_del(&priv->list);\n+\t\tfree_netdev(netdev);\n+\t}\n+\n+\tspin_lock_init(&priv->ptp_lock);\n+\tpriv->ptp_info.owner = THIS_MODULE;\n+\tsnprintf(priv->ptp_info.name, 16, \"octeon3 ptp\");\n+\tpriv->ptp_info.max_adj = 250000000;\n+\tpriv->ptp_info.n_alarm = 0;\n+\tpriv->ptp_info.n_ext_ts = 0;\n+\tpriv->ptp_info.n_per_out = 0;\n+\tpriv->ptp_info.pps = 0;\n+\tpriv->ptp_info.adjfreq = octeon3_adjfreq;\n+\tpriv->ptp_info.adjtime = octeon3_adjtime;\n+\tpriv->ptp_info.gettime64 = octeon3_gettime;\n+\tpriv->ptp_info.settime64 = octeon3_settime;\n+\tpriv->ptp_info.enable = octeon3_enable;\n+\tpriv->ptp_clock = ptp_clock_register(&priv->ptp_info, &pdev->dev);\n+\n+\tnetdev_info(netdev, \"Registered\\n\");\n+\treturn 0;\n+}\n+\n+/**\n+ * octeon3_eth_global_exit - Free all the used resources and restore the\n+ *\t\t\t hardware to the default state.\n+ * @node: Node to free/reset.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+static int octeon3_eth_global_exit(int node)\n+{\n+\tstruct octeon3_ethernet_node\t*oen = octeon3_eth_node + node;\n+\tint\t\t\t\ti;\n+\n+\t/* Free the tx_complete irq */\n+\tocteon3_sso_irq_set(node, oen->tx_complete_grp, false);\n+\tirq_set_affinity_hint(oen->tx_irq, NULL);\n+\tfree_irq(oen->tx_irq, oen);\n+\tirq_dispose_mapping(oen->tx_irq);\n+\toen->tx_irq = 0;\n+\n+\t/* Stop the worker threads */\n+\tfor (i = 0; i < ARRAY_SIZE(oen->workers); i++)\n+\t\tkthread_stop(oen->workers[i].task);\n+\n+\t/* Shutdown pki */\n+\tocteon3_pki_shutdown(node);\n+\tocteon_fpa3_release_pool(node, oen->pki_packet_pool);\n+\tkfree(oen->pki_packet_pool_stack);\n+\n+\t/* Shutdown pko */\n+\tocteon3_pko_exit_global(node);\n+\tfor (;;) {\n+\t\tvoid **w;\n+\n+\t\tw = octeon_fpa3_alloc(node, oen->pko_aura);\n+\t\tif (!w)\n+\t\t\tbreak;\n+\t\tkmem_cache_free(octeon3_eth_sso_pko_cache, w);\n+\t}\n+\tocteon_fpa3_release_aura(node, oen->pko_aura);\n+\tocteon_fpa3_release_pool(node, oen->pko_pool);\n+\tkfree(oen->pko_pool_stack);\n+\n+\t/* Shutdown sso */\n+\tocteon3_sso_shutdown(node, oen->sso_aura);\n+\tocteon3_sso_free_grp(node, oen->tx_complete_grp);\n+\tfor (;;) {\n+\t\tvoid **w;\n+\n+\t\tw = octeon_fpa3_alloc(node, oen->sso_aura);\n+\t\tif (!w)\n+\t\t\tbreak;\n+\t\tkmem_cache_free(octeon3_eth_sso_pko_cache, w);\n+\t}\n+\tocteon_fpa3_release_aura(node, oen->sso_aura);\n+\tocteon_fpa3_release_pool(node, oen->sso_pool);\n+\tkfree(oen->sso_pool_stack);\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_eth_remove(struct platform_device *pdev)\n+{\n+\tstruct net_device\t\t*netdev = dev_get_drvdata(&pdev->dev);\n+\tstruct octeon3_ethernet\t\t*priv = netdev_priv(netdev);\n+\tint\t\t\t\tnode = priv->node;\n+\tstruct octeon3_ethernet_node\t*oen = octeon3_eth_node + node;\n+\tstruct mac_platform_data\t*pd = dev_get_platdata(&pdev->dev);\n+\n+\tptp_clock_unregister(priv->ptp_clock);\n+\tunregister_netdev(netdev);\n+\tif (pd->mac_type == BGX_MAC)\n+\t\tbgx_port_set_netdev(pdev->dev.parent, NULL);\n+\tdev_set_drvdata(&pdev->dev, NULL);\n+\n+\t/* Free all resources when there are no more devices */\n+\tmutex_lock(&octeon3_eth_init_mutex);\n+\tmutex_lock(&oen->device_list_lock);\n+\tlist_del_rcu(&priv->list);\n+\tif (oen->init_done && list_empty(&oen->device_list)) {\n+\t\toen->init_done = false;\n+\t\tocteon3_eth_global_exit(node);\n+\t}\n+\n+\tmutex_unlock(&oen->device_list_lock);\n+\tmutex_unlock(&octeon3_eth_init_mutex);\n+\tfree_netdev(netdev);\n+\n+\treturn 0;\n+}\n+\n+static void octeon3_eth_shutdown(struct platform_device *pdev)\n+{\n+\tocteon3_eth_remove(pdev);\n+}\n+\n+static struct platform_driver octeon3_eth_driver = {\n+\t.probe\t\t= octeon3_eth_probe,\n+\t.remove\t\t= octeon3_eth_remove,\n+\t.shutdown = octeon3_eth_shutdown,\n+\t.driver\t\t= {\n+\t\t.owner\t= THIS_MODULE,\n+\t\t.name\t= \"ethernet-mac-pki\",\n+\t},\n+};\n+\n+static int __init octeon3_eth_init(void)\n+{\n+\tif (rx_queues <= 0)\n+\t\trx_queues = 1;\n+\tif (rx_queues > MAX_RX_QUEUES)\n+\t\trx_queues = MAX_RX_QUEUES;\n+\n+\treturn platform_driver_register(&octeon3_eth_driver);\n+}\n+module_init(octeon3_eth_init);\n+\n+static void __exit octeon3_eth_exit(void)\n+{\n+\tplatform_driver_unregister(&octeon3_eth_driver);\n+\n+\t/* Destroy the memory cache used by sso and pko */\n+\tkmem_cache_destroy(octeon3_eth_sso_pko_cache);\n+}\n+module_exit(octeon3_eth_exit);\n+\n+MODULE_LICENSE(\"GPL\");\n+MODULE_AUTHOR(\"Cavium, Inc. <support@caviumnetworks.com>\");\n+MODULE_DESCRIPTION(\"Cavium, Inc. PKI/PKO Ethernet driver.\");\ndiff --git a/drivers/net/ethernet/cavium/octeon/octeon3-pki.c b/drivers/net/ethernet/cavium/octeon/octeon3-pki.c\nnew file mode 100644\nindex 000000000000..38bc294808e9\n--- /dev/null\n+++ b/drivers/net/ethernet/cavium/octeon/octeon3-pki.c\n@@ -0,0 +1,833 @@\n+/*\n+ * Copyright (c) 2017 Cavium, Inc.\n+ *\n+ * This file is subject to the terms and conditions of the GNU General Public\n+ * License. See the file \"COPYING\" in the main directory of this archive\n+ * for more details.\n+ */\n+#include <linux/module.h>\n+#include <linux/firmware.h>\n+\n+#include <asm/octeon/octeon.h>\n+\n+#include \"octeon3.h\"\n+\n+#define PKI_CLUSTER_FIRMWARE\t\t\"cavium/pki-cluster.bin\"\n+#define VERSION_LEN\t\t\t8\n+\n+#define MAX_CLUSTERS\t\t\t4\n+#define MAX_BANKS\t\t\t2\n+#define MAX_BANK_ENTRIES\t\t192\n+#define PKI_NUM_QPG_ENTRY\t\t2048\n+#define PKI_NUM_STYLE\t\t\t256\n+#define PKI_NUM_FINAL_STYLE\t\t64\n+#define MAX_PKNDS\t\t\t64\n+\n+/* Registers are accessed via xkphys */\n+#define PKI_BASE\t\t\t0x1180044000000ull\n+#define PKI_ADDR(node)\t\t\t(SET_XKPHYS + NODE_OFFSET(node) + \\\n+\t\t\t\t\t PKI_BASE)\n+\n+#define PKI_SFT_RST(n)\t\t\t(PKI_ADDR(n)\t\t + 0x000010)\n+#define PKI_BUF_CTL(n)\t\t\t(PKI_ADDR(n)\t\t + 0x000100)\n+#define PKI_STAT_CTL(n)\t\t\t(PKI_ADDR(n)\t\t + 0x000110)\n+#define PKI_ICG_CFG(n)\t\t\t(PKI_ADDR(n)\t\t + 0x00a000)\n+\n+#define CLUSTER_OFFSET(c)\t\t((c) << 16)\n+#define CL_ADDR(n, c)\t\t\t(PKI_ADDR(n) + CLUSTER_OFFSET(c))\n+#define PKI_CL_ECC_CTL(n, c)\t\t(CL_ADDR(n, c)\t\t + 0x00c020)\n+\n+#define PKI_STYLE_BUF(n, s)\t\t(PKI_ADDR(n) + ((s) << 3) + 0x024000)\n+\n+#define PKI_LTYPE_MAP(n, l)\t\t(PKI_ADDR(n) + ((l) << 3) + 0x005000)\n+#define PKI_IMEM(n, i)\t\t\t(PKI_ADDR(n) + ((i) << 3) + 0x100000)\n+\n+#define PKI_CL_PKIND_CFG(n, c, p)\t(CL_ADDR(n, c) + ((p) << 8) + 0x300040)\n+#define PKI_CL_PKIND_STYLE(n, c, p)\t(CL_ADDR(n, c) + ((p) << 8) + 0x300048)\n+#define PKI_CL_PKIND_SKIP(n, c, p)\t(CL_ADDR(n, c) + ((p) << 8) + 0x300050)\n+#define PKI_CL_PKIND_L2_CUSTOM(n, c, p)\t(CL_ADDR(n, c) + ((p) << 8) + 0x300058)\n+#define PKI_CL_PKIND_LG_CUSTOM(n, c, p)\t(CL_ADDR(n, c) + ((p) << 8) + 0x300060)\n+\n+#define STYLE_OFFSET(s)\t\t\t((s) << 3)\n+#define STYLE_ADDR(n, c, s)\t\t(PKI_ADDR(n) + CLUSTER_OFFSET(c) + \\\n+\t\t\t\t\t STYLE_OFFSET(s))\n+#define PKI_CL_STYLE_CFG(n, c, s)\t(STYLE_ADDR(n, c, s)\t + 0x500000)\n+#define PKI_CL_STYLE_CFG2(n, c, s)\t(STYLE_ADDR(n, c, s)\t + 0x500800)\n+#define PKI_CLX_STYLEX_ALG(n, c, s)\t(STYLE_ADDR(n, c, s)\t + 0x501000)\n+\n+#define PCAM_OFFSET(bank)\t\t((bank) << 12)\n+#define PCAM_ENTRY_OFFSET(entry)\t((entry) << 3)\n+#define PCAM_ADDR(n, c, b, e)\t\t(PKI_ADDR(n) + CLUSTER_OFFSET(c) + \\\n+\t\t\t\t\t PCAM_OFFSET(b) + PCAM_ENTRY_OFFSET(e))\n+#define PKI_CL_PCAM_TERM(n, c, b, e)\t(PCAM_ADDR(n, c, b, e)\t + 0x700000)\n+#define PKI_CL_PCAM_MATCH(n, c, b, e)\t(PCAM_ADDR(n, c, b, e)\t + 0x704000)\n+#define PKI_CL_PCAM_ACTION(n, c, b, e)\t(PCAM_ADDR(n, c, b, e)\t + 0x708000)\n+\n+#define PKI_QPG_TBLX(n, i)\t\t(PKI_ADDR(n) + ((i) << 3) + 0x800000)\n+#define PKI_AURAX_CFG(n, a)\t\t(PKI_ADDR(n) + ((a) << 3) + 0x900000)\n+#define PKI_STATX_STAT0(n, p)\t\t(PKI_ADDR(n) + ((p) << 8) + 0xe00038)\n+#define PKI_STATX_STAT1(n, p)\t\t(PKI_ADDR(n) + ((p) << 8) + 0xe00040)\n+#define PKI_STATX_STAT3(n, p)\t\t(PKI_ADDR(n) + ((p) << 8) + 0xe00050)\n+\n+enum pcam_term {\n+\tNONE\t\t= 0x0,\n+\tL2_CUSTOM\t= 0x2,\n+\tHIGIGD\t\t= 0x4,\n+\tHIGIG\t\t= 0x5,\n+\tSMACH\t\t= 0x8,\n+\tSMACL\t\t= 0x9,\n+\tDMACH\t\t= 0xa,\n+\tDMACL\t\t= 0xb,\n+\tGLORT\t\t= 0x12,\n+\tDSA\t\t= 0x13,\n+\tETHTYPE0\t= 0x18,\n+\tETHTYPE1\t= 0x19,\n+\tETHTYPE2\t= 0x1a,\n+\tETHTYPE3\t= 0x1b,\n+\tMPLS0\t\t= 0x1e,\n+\tL3_SIPHH\t= 0x1f,\n+\tL3_SIPMH\t= 0x20,\n+\tL3_SIPML\t= 0x21,\n+\tL3_SIPLL\t= 0x22,\n+\tL3_FLAGS\t= 0x23,\n+\tL3_DIPHH\t= 0x24,\n+\tL3_DIPMH\t= 0x25,\n+\tL3_DIPML\t= 0x26,\n+\tL3_DIPLL\t= 0x27,\n+\tLD_VNI\t\t= 0x28,\n+\tIL3_FLAGS\t= 0x2b,\n+\tLF_SPI\t\t= 0x2e,\n+\tL4_SPORT\t= 0x2f,\n+\tL4_PORT\t\t= 0x30,\n+\tLG_CUSTOM\t= 0x39\n+};\n+\n+enum pki_ltype {\n+\tLTYPE_NONE\t\t= 0x00,\n+\tLTYPE_ENET\t\t= 0x01,\n+\tLTYPE_VLAN\t\t= 0x02,\n+\tLTYPE_SNAP_PAYLD\t= 0x05,\n+\tLTYPE_ARP\t\t= 0x06,\n+\tLTYPE_RARP\t\t= 0x07,\n+\tLTYPE_IP4\t\t= 0x08,\n+\tLTYPE_IP4_OPT\t\t= 0x09,\n+\tLTYPE_IP6\t\t= 0x0a,\n+\tLTYPE_IP6_OPT\t\t= 0x0b,\n+\tLTYPE_IPSEC_ESP\t\t= 0x0c,\n+\tLTYPE_IPFRAG\t\t= 0x0d,\n+\tLTYPE_IPCOMP\t\t= 0x0e,\n+\tLTYPE_TCP\t\t= 0x10,\n+\tLTYPE_UDP\t\t= 0x11,\n+\tLTYPE_SCTP\t\t= 0x12,\n+\tLTYPE_UDP_VXLAN\t\t= 0x13,\n+\tLTYPE_GRE\t\t= 0x14,\n+\tLTYPE_NVGRE\t\t= 0x15,\n+\tLTYPE_GTP\t\t= 0x16,\n+\tLTYPE_UDP_GENEVE\t= 0x17,\n+\tLTYPE_SW28\t\t= 0x1c,\n+\tLTYPE_SW29\t\t= 0x1d,\n+\tLTYPE_SW30\t\t= 0x1e,\n+\tLTYPE_SW31\t\t= 0x1f\n+};\n+\n+enum pki_beltype {\n+\tBELTYPE_NONE\t= 0x00,\n+\tBELTYPE_MISC\t= 0x01,\n+\tBELTYPE_IP4\t= 0x02,\n+\tBELTYPE_IP6\t= 0x03,\n+\tBELTYPE_TCP\t= 0x04,\n+\tBELTYPE_UDP\t= 0x05,\n+\tBELTYPE_SCTP\t= 0x06,\n+\tBELTYPE_SNAP\t= 0x07\n+};\n+\n+struct ltype_beltype {\n+\tenum pki_ltype\t\tltype;\n+\tenum pki_beltype\tbeltype;\n+};\n+\n+/**\n+ * struct pcam_term_info - Describes a term to configure in the pcam.\n+ * @term: Identifies the term to configure.\n+ * @term_mask: Specifies don't cares in the term.\n+ * @style: Style to compare.\n+ * @style_mask: Specifies don't cares in the style.\n+ * @data: Data to compare.\n+ * @data_mask: Specifies don't cares in the data.\n+ */\n+struct pcam_term_info {\n+\tu8\tterm;\n+\tu8\tterm_mask;\n+\tu8\tstyle;\n+\tu8\tstyle_mask;\n+\tu32\tdata;\n+\tu32\tdata_mask;\n+};\n+\n+/**\n+ * struct fw_hdr - Describes the firmware.\n+ * @version: Firmware version.\n+ * @size: Size of the data in bytes.\n+ * @data: Actual firmware data.\n+ */\n+struct fw_hdr {\n+\tchar\tversion[VERSION_LEN];\n+\tu64\tsize;\n+\tu64\tdata[];\n+};\n+\n+static struct ltype_beltype\tdflt_ltype_config[] = {\n+\t{ LTYPE_NONE,\t\tBELTYPE_NONE },\n+\t{ LTYPE_ENET,\t\tBELTYPE_MISC },\n+\t{ LTYPE_VLAN,\t\tBELTYPE_MISC },\n+\t{ LTYPE_SNAP_PAYLD,\tBELTYPE_MISC },\n+\t{ LTYPE_ARP,\t\tBELTYPE_MISC },\n+\t{ LTYPE_RARP,\t\tBELTYPE_MISC },\n+\t{ LTYPE_IP4,\t\tBELTYPE_IP4 },\n+\t{ LTYPE_IP4_OPT,\tBELTYPE_IP4 },\n+\t{ LTYPE_IP6,\t\tBELTYPE_IP6 },\n+\t{ LTYPE_IP6_OPT,\tBELTYPE_IP6 },\n+\t{ LTYPE_IPSEC_ESP,\tBELTYPE_MISC },\n+\t{ LTYPE_IPFRAG,\t\tBELTYPE_MISC },\n+\t{ LTYPE_IPCOMP,\t\tBELTYPE_MISC },\n+\t{ LTYPE_TCP,\t\tBELTYPE_TCP },\n+\t{ LTYPE_UDP,\t\tBELTYPE_UDP },\n+\t{ LTYPE_SCTP,\t\tBELTYPE_SCTP },\n+\t{ LTYPE_UDP_VXLAN,\tBELTYPE_UDP },\n+\t{ LTYPE_GRE,\t\tBELTYPE_MISC },\n+\t{ LTYPE_NVGRE,\t\tBELTYPE_MISC },\n+\t{ LTYPE_GTP,\t\tBELTYPE_MISC },\n+\t{ LTYPE_UDP_GENEVE,\tBELTYPE_UDP },\n+\t{ LTYPE_SW28,\t\tBELTYPE_MISC },\n+\t{ LTYPE_SW29,\t\tBELTYPE_MISC },\n+\t{ LTYPE_SW30,\t\tBELTYPE_MISC },\n+\t{ LTYPE_SW31,\t\tBELTYPE_MISC }\n+};\n+\n+static int get_num_clusters(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))\n+\t\treturn 2;\n+\treturn 4;\n+}\n+\n+static int octeon3_pki_pcam_alloc_entry(int\tnode,\n+\t\t\t\t\tint\tentry,\n+\t\t\t\t\tint\tbank)\n+{\n+\tstruct global_resource_tag\ttag;\n+\tchar\t\t\t\tbuf[16];\n+\tint\t\t\t\tnum_clusters;\n+\tint\t\t\t\trc;\n+\tint\t\t\t\ti;\n+\n+\t/* Allocate a pcam entry for cluster0*/\n+\tstrncpy((char *)&tag.lo, \"cvm_pcam\", 8);\n+\tsnprintf(buf, 16, \"_%d%d%d....\", node, 0, bank);\n+\tmemcpy(&tag.hi, buf, 8);\n+\n+\tres_mgr_create_resource(tag, MAX_BANK_ENTRIES);\n+\trc = res_mgr_alloc(tag, entry, false);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\n+\tentry = rc;\n+\n+\t/* Need to allocate entries for all clusters as se code needs it */\n+\tnum_clusters = get_num_clusters();\n+\tfor (i = 1; i < num_clusters; i++) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pcam\", 8);\n+\t\tsnprintf(buf, 16, \"_%d%d%d....\", node, i, bank);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\n+\t\tres_mgr_create_resource(tag, MAX_BANK_ENTRIES);\n+\t\trc = res_mgr_alloc(tag, entry, false);\n+\t\tif (rc < 0) {\n+\t\t\tint\tj;\n+\n+\t\t\tpr_err(\"octeon3-pki: Failed to allocate pcam entry\\n\");\n+\t\t\t/* Undo whatever we've did */\n+\t\t\tfor (j = 0; i < i; j++) {\n+\t\t\t\tstrncpy((char *)&tag.lo, \"cvm_pcam\", 8);\n+\t\t\t\tsnprintf(buf, 16, \"_%d%d%d....\", node, j, bank);\n+\t\t\t\tmemcpy(&tag.hi, buf, 8);\n+\t\t\t\tres_mgr_free(tag, entry);\n+\t\t\t}\n+\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn entry;\n+}\n+\n+static int octeon3_pki_pcam_write_entry(int\t\t\tnode,\n+\t\t\t\t\tstruct pcam_term_info\t*term_info)\n+{\n+\tint\tbank;\n+\tint\tentry;\n+\tint\tnum_clusters;\n+\tu64\tterm;\n+\tu64\tmatch;\n+\tu64\taction;\n+\tint\ti;\n+\n+\t/* Bit 0 of the pcam term determines the bank to use */\n+\tbank = term_info->term & 1;\n+\n+\t/* Allocate a pcam entry */\n+\tentry = octeon3_pki_pcam_alloc_entry(node, -1, bank);\n+\tif (entry < 0)\n+\t\treturn entry;\n+\n+\tterm = 1ull << 63;\n+\tterm |= (u64)(term_info->term & term_info->term_mask) << 40;\n+\tterm |= (~term_info->term & term_info->term_mask) << 8;\n+\tterm |= (u64)(term_info->style & term_info->style_mask) << 32;\n+\tterm |= ~term_info->style & term_info->style_mask;\n+\n+\tmatch = (u64)(term_info->data & term_info->data_mask) << 32;\n+\tmatch |= ~term_info->data & term_info->data_mask;\n+\n+\taction = 0;\n+\tif (term_info->term >= ETHTYPE0 && term_info->term <= ETHTYPE3) {\n+\t\taction |= 2 << 8;\n+\t\taction |= 4;\n+\t}\n+\n+\t/* Must write the term to all clusters */\n+\tnum_clusters = get_num_clusters();\n+\tfor (i = 0; i < num_clusters; i++) {\n+\t\toct_csr_write(0, PKI_CL_PCAM_TERM(node, i, bank, entry));\n+\t\toct_csr_write(match, PKI_CL_PCAM_MATCH(node, i, bank, entry));\n+\t\toct_csr_write(action, PKI_CL_PCAM_ACTION(node, i, bank, entry));\n+\t\toct_csr_write(term, PKI_CL_PCAM_TERM(node, i, bank, entry));\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int octeon3_pki_alloc_qpg_entry(int node)\n+{\n+\tstruct global_resource_tag\ttag;\n+\tchar\t\t\t\tbuf[16];\n+\tint\t\t\t\tentry;\n+\n+\t/* Allocate a qpg entry */\n+\tstrncpy((char *)&tag.lo, \"cvm_qpge\", 8);\n+\tsnprintf(buf, 16, \"t_%d.....\", node);\n+\tmemcpy(&tag.hi, buf, 8);\n+\n+\tres_mgr_create_resource(tag, PKI_NUM_QPG_ENTRY);\n+\tentry = res_mgr_alloc(tag, -1, false);\n+\tif (entry < 0)\n+\t\tpr_err(\"octeon3-pki: Failed to allocate qpg entry\");\n+\n+\treturn entry;\n+}\n+\n+static int octeon3_pki_alloc_style(int node)\n+{\n+\tstruct global_resource_tag\ttag;\n+\tchar\t\t\t\tbuf[16];\n+\tint\t\t\t\tentry;\n+\n+\t/* Allocate a style entry */\n+\tstrncpy((char *)&tag.lo, \"cvm_styl\", 8);\n+\tsnprintf(buf, 16, \"e_%d.....\", node);\n+\tmemcpy(&tag.hi, buf, 8);\n+\n+\tres_mgr_create_resource(tag, PKI_NUM_STYLE);\n+\tentry = res_mgr_alloc(tag, -1, false);\n+\tif (entry < 0)\n+\t\tpr_err(\"octeon3-pki: Failed to allocate style\");\n+\n+\treturn entry;\n+}\n+\n+int octeon3_pki_set_ptp_skip(int node, int pknd, int skip)\n+{\n+\tu64\tdata;\n+\tint\tnum_clusters;\n+\tu64\ti;\n+\n+\tnum_clusters = get_num_clusters();\n+\tfor (i = 0; i < num_clusters; i++) {\n+\t\tdata = oct_csr_read(PKI_CL_PKIND_SKIP(node, i, pknd));\n+\t\tdata &= ~(GENMASK_ULL(15, 8) | GENMASK_ULL(7, 0));\n+\t\tdata |= (skip << 8) | skip;\n+\t\toct_csr_write(data, PKI_CL_PKIND_SKIP(node, i, pknd));\n+\n+\t\tdata = oct_csr_read(PKI_CL_PKIND_L2_CUSTOM(node, i, pknd));\n+\t\tdata &= ~GENMASK_ULL(7, 0);\n+\t\tdata |= skip;\n+\t\toct_csr_write(data, PKI_CL_PKIND_L2_CUSTOM(node, i, pknd));\n+\t}\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pki_set_ptp_skip);\n+\n+/**\n+ * octeon3_pki_get_stats - Get the statistics for a given pknd (port).\n+ * @node: Node to get statistics for..\n+ * @pknd: Pknd to get statistis for.\n+ * @packets: Updated with the number of packets received.\n+ * @octets: Updated with the number of octets received.\n+ * @dropped: Updated with the number of dropped packets.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+int octeon3_pki_get_stats(int\tnode,\n+\t\t\t int\tpknd,\n+\t\t\t u64\t*packets,\n+\t\t\t u64\t*octets,\n+\t\t\t u64\t*dropped)\n+{\n+\t/* PKI-20775, must read until not all ones. */\n+\tdo {\n+\t\t*packets = oct_csr_read(PKI_STATX_STAT0(node, pknd));\n+\t} while (*packets == 0xffffffffffffffffull);\n+\n+\tdo {\n+\t\t*octets = oct_csr_read(PKI_STATX_STAT1(node, pknd));\n+\t} while (*octets == 0xffffffffffffffffull);\n+\n+\tdo {\n+\t\t*dropped = oct_csr_read(PKI_STATX_STAT3(node, pknd));\n+\t} while (*dropped == 0xffffffffffffffffull);\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pki_get_stats);\n+\n+/**\n+ * octeon3_pki_port_init - Initialize a port.\n+ * @node: Node port is using.\n+ * @aura: Aura to use for packet buffers.\n+ * @grp: SSO group packets will be queued up for.\n+ * @skip: Extra bytes to skip before packet data.\n+ * @mb_size: Size of packet buffers.\n+ * @pknd: Port kind assigned to the port.\n+ * @num_rx_cxt: Number of sso groups used by the port.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+int octeon3_pki_port_init(int\tnode,\n+\t\t\t int\taura,\n+\t\t\t int\tgrp,\n+\t\t\t int\tskip,\n+\t\t\t int\tmb_size,\n+\t\t\t int\tpknd,\n+\t\t\t int\tnum_rx_cxt)\n+{\n+\tint\tqpg_entry;\n+\tint\tstyle;\n+\tu64\tdata;\n+\tint\tnum_clusters;\n+\tu64\ti;\n+\n+\t/* Allocate and configure a qpg table entry for the port's group */\n+\ti = 0;\n+\twhile ((num_rx_cxt & (1 << i)) == 0)\n+\t\ti++;\n+\tqpg_entry = octeon3_pki_alloc_qpg_entry(node);\n+\tdata = oct_csr_read(PKI_QPG_TBLX(node, qpg_entry));\n+\tdata &= ~(GENMASK_ULL(59, 48) | GENMASK_ULL(47, 45) |\n+\t\t GENMASK_ULL(41, 32) | GENMASK_ULL(31, 29) |\n+\t\t GENMASK_ULL(25, 16) | GENMASK_ULL(9, 0));\n+\tdata |= i << 45;\n+\tdata |= ((u64)((node << 8) | grp) << 32);\n+\tdata |= i << 29;\n+\tdata |= (((node << 8) | grp) << 16);\n+\tdata |= aura;\n+\toct_csr_write(data, PKI_QPG_TBLX(node, qpg_entry));\n+\n+\t/* Allocate a style for the port */\n+\tstyle = octeon3_pki_alloc_style(node);\n+\n+\t/* Map the qpg table entry to the style */\n+\tnum_clusters = get_num_clusters();\n+\tfor (i = 0; i < num_clusters; i++) {\n+\t\tdata = BIT(29) | BIT(22) | qpg_entry;\n+\t\toct_csr_write(data, PKI_CL_STYLE_CFG(node, i, style));\n+\n+\t\t/* Specify the tag generation rules and checksum to use */\n+\t\toct_csr_write(0xfff49f, PKI_CL_STYLE_CFG2(node, i, style));\n+\n+\t\tdata = BIT(31);\n+\t\toct_csr_write(data, PKI_CLX_STYLEX_ALG(node, i, style));\n+\t}\n+\n+\t/* Set the style's buffer size and skips:\n+\t *\tEvery buffer has 128 bytes reserved for Linux.\n+\t *\tThe first buffer must also skip the wqe (40 bytes).\n+\t *\tSrio also requires skipping its header (skip)\n+\t */\n+\tdata = 1ull << 28;\n+\tdata |= ((128 + 40 + skip) / 8) << 22;\n+\tdata |= (128 / 8) << 16;\n+\tdata |= (mb_size & ~0xf) / 8;\n+\toct_csr_write(data, PKI_STYLE_BUF(node, style));\n+\n+\t/* Assign the initial style to the port via the pknd */\n+\tfor (i = 0; i < num_clusters; i++) {\n+\t\tdata = oct_csr_read(PKI_CL_PKIND_STYLE(node, i, pknd));\n+\t\tdata &= ~GENMASK_ULL(7, 0);\n+\t\tdata |= style;\n+\t\toct_csr_write(data, PKI_CL_PKIND_STYLE(node, i, pknd));\n+\t}\n+\n+\t/* Enable red */\n+\tdata = BIT(18);\n+\toct_csr_write(data, PKI_AURAX_CFG(node, aura));\n+\n+\t/* Clear statistic counters */\n+\toct_csr_write(0, PKI_STATX_STAT0(node, pknd));\n+\toct_csr_write(0, PKI_STATX_STAT1(node, pknd));\n+\toct_csr_write(0, PKI_STATX_STAT3(node, pknd));\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pki_port_init);\n+\n+/**\n+ * octeon3_pki_port_shutdown - Release all the resources used by a port.\n+ * @node: Node port is on.\n+ * @pknd: Pknd assigned to the port.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+int octeon3_pki_port_shutdown(int node, int pknd)\n+{\n+\t/* Nothing at the moment */\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pki_port_shutdown);\n+\n+/**\n+ * octeon3_pki_cluster_init - Loads the cluster firmware into the pki clusters.\n+ * @node: Node to configure.\n+ * @pdev: Device requesting the firmware.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+int octeon3_pki_cluster_init(int node, struct platform_device *pdev)\n+{\n+\tconst struct firmware\t*pki_fw;\n+\tconst struct fw_hdr\t*hdr;\n+\tconst u64\t\t*data;\n+\tint\t\t\ti;\n+\tint\t\t\trc;\n+\n+\trc = request_firmware(&pki_fw, PKI_CLUSTER_FIRMWARE, &pdev->dev);\n+\tif (rc) {\n+\t\tdev_err(&pdev->dev, \"octeon3-pki: Failed to load %s error=%d\\n\",\n+\t\t\tPKI_CLUSTER_FIRMWARE, rc);\n+\t\treturn rc;\n+\t}\n+\n+\t/* Verify the firmware is valid */\n+\thdr = (const struct fw_hdr *)pki_fw->data;\n+\tif ((pki_fw->size - sizeof(const struct fw_hdr) != hdr->size) ||\n+\t hdr->size % 8) {\n+\t\tdev_err(&pdev->dev, (\"octeon3-pki: Corrupted PKI firmware\\n\"));\n+\t\tgoto err;\n+\t}\n+\n+\tdev_info(&pdev->dev, \"octeon3-pki: Loading PKI firmware %s\\n\",\n+\t\t hdr->version);\n+\tdata = hdr->data;\n+\tfor (i = 0; i < hdr->size / 8; i++) {\n+\t\toct_csr_write(cpu_to_be64(*data), PKI_IMEM(node, i));\n+\t\tdata++;\n+\t}\n+\n+err:\n+\trelease_firmware(pki_fw);\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pki_cluster_init);\n+\n+/**\n+ * octeon3_pki_vlan_init - Configures the pcam to recognize the vlan ethtypes.\n+ * @node:\t\t\tNode to configure.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+int octeon3_pki_vlan_init(int node)\n+{\n+\tu64\tdata;\n+\tint\ti;\n+\tint\trc;\n+\n+\t/* PKI-20858 */\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {\n+\t\tfor (i = 0; i < 4; i++) {\n+\t\t\tdata = oct_csr_read(PKI_CL_ECC_CTL(node, i));\n+\t\t\tdata &= ~BIT(63);\n+\t\t\tdata |= BIT(4) | BIT(3);\n+\t\t\toct_csr_write(data, PKI_CL_ECC_CTL(node, i));\n+\t\t}\n+\t}\n+\n+\t/* Configure the pcam ethtype0 and ethtype1 terms */\n+\tfor (i = ETHTYPE0; i <= ETHTYPE1; i++) {\n+\t\tstruct pcam_term_info\tterm_info;\n+\n+\t\t/* Term for 0x8100 ethtype */\n+\t\tterm_info.term = i;\n+\t\tterm_info.term_mask = 0xfd;\n+\t\tterm_info.style = 0;\n+\t\tterm_info.style_mask = 0;\n+\t\tterm_info.data = 0x81000000;\n+\t\tterm_info.data_mask = 0xffff0000;\n+\t\trc = octeon3_pki_pcam_write_entry(node, &term_info);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\n+\t\t/* Term for 0x88a8 ethtype */\n+\t\tterm_info.data = 0x88a80000;\n+\t\trc = octeon3_pki_pcam_write_entry(node, &term_info);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\n+\t\t/* Term for 0x9200 ethtype */\n+\t\tterm_info.data = 0x92000000;\n+\t\trc = octeon3_pki_pcam_write_entry(node, &term_info);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\n+\t\t/* Term for 0x9100 ethtype */\n+\t\tterm_info.data = 0x91000000;\n+\t\trc = octeon3_pki_pcam_write_entry(node, &term_info);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t}\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pki_vlan_init);\n+\n+/**\n+ * octeon3_pki_ltype_init - Configures the pki layer types.\n+ * @node:\t\t\tNode to configure.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+int octeon3_pki_ltype_init(int node)\n+{\n+\tenum pki_ltype\tltype;\n+\tu64\t\tdata;\n+\tint\t\ti;\n+\n+\tfor (i = 0; i < ARRAY_SIZE(dflt_ltype_config); i++) {\n+\t\tltype = dflt_ltype_config[i].ltype;\n+\t\tdata = oct_csr_read(PKI_LTYPE_MAP(node, ltype));\n+\t\tdata &= ~GENMASK_ULL(2, 0);\n+\t\tdata |= dflt_ltype_config[i].beltype;\n+\t\toct_csr_write(data, PKI_LTYPE_MAP(node, ltype));\n+\t}\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pki_ltype_init);\n+\n+int octeon3_pki_srio_init(int node, int pknd)\n+{\n+\tu64\tdata;\n+\tint\tnum_clusters;\n+\tint\tstyle;\n+\tint\ti;\n+\n+\tnum_clusters = get_num_clusters();\n+\tfor (i = 0; i < num_clusters; i++) {\n+\t\tdata = oct_csr_read(PKI_CL_PKIND_STYLE(node, i, pknd));\n+\t\tstyle = data & GENMASK_ULL(7, 0);\n+\t\tdata &= ~GENMASK_ULL(14, 8);\n+\t\toct_csr_write(data, PKI_CL_PKIND_STYLE(node, i, pknd));\n+\n+\t\t/* Disable packet length errors and fcs */\n+\t\tdata = oct_csr_read(PKI_CL_STYLE_CFG(node, i, style));\n+\t\tdata &= ~(BIT(29) | BIT(26) | BIT(25) | BIT(23) | BIT(22));\n+\t\toct_csr_write(data, PKI_CL_STYLE_CFG(node, i, style));\n+\n+\t\t/* Packets have no fcs */\n+\t\tdata = oct_csr_read(PKI_CL_PKIND_CFG(node, i, pknd));\n+\t\tdata &= ~BIT(7);\n+\t\toct_csr_write(data, PKI_CL_PKIND_CFG(node, i, pknd));\n+\n+\t\t/* Skip the srio header and the INST_HDR_S data */\n+\t\tdata = oct_csr_read(PKI_CL_PKIND_SKIP(node, i, pknd));\n+\t\tdata &= ~(GENMASK_ULL(15, 8) | GENMASK_ULL(7, 0));\n+\t\tdata |= (16 << 8) | 16;\n+\t\toct_csr_write(data, PKI_CL_PKIND_SKIP(node, i, pknd));\n+\n+\t\t/* Exclude port number from qpg */\n+\t\tdata = oct_csr_read(PKI_CLX_STYLEX_ALG(node, i, style));\n+\t\tdata &= ~GENMASK_ULL(20, 17);\n+\t\toct_csr_write(data, PKI_CLX_STYLEX_ALG(node, i, style));\n+\t}\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pki_srio_init);\n+\n+/**\n+ * octeon3_pki_enable - Enable the pki.\n+ * @node: Node to configure.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+int octeon3_pki_enable(int node)\n+{\n+\tu64\tdata;\n+\tint\ttimeout;\n+\n+\t/* Enable backpressure */\n+\tdata = oct_csr_read(PKI_BUF_CTL(node));\n+\tdata |= BIT(2);\n+\toct_csr_write(data, PKI_BUF_CTL(node));\n+\n+\t/* Enable cluster parsing */\n+\tdata = oct_csr_read(PKI_ICG_CFG(node));\n+\tdata |= BIT(24);\n+\toct_csr_write(data, PKI_ICG_CFG(node));\n+\n+\t/* Wait until the pki is out of reset */\n+\ttimeout = 10000;\n+\tdo {\n+\t\tdata = oct_csr_read(PKI_SFT_RST(node));\n+\t\tif (!(data & BIT(63)))\n+\t\t\tbreak;\n+\t\ttimeout--;\n+\t\tudelay(1);\n+\t} while (timeout);\n+\tif (!timeout) {\n+\t\tpr_err(\"octeon3-pki: timeout waiting for reset\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\t/* Enable the pki */\n+\tdata = oct_csr_read(PKI_BUF_CTL(node));\n+\tdata |= BIT(0);\n+\toct_csr_write(data, PKI_BUF_CTL(node));\n+\n+\t/* Statistics are kept per pkind */\n+\toct_csr_write(0, PKI_STAT_CTL(node));\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pki_enable);\n+\n+void octeon3_pki_shutdown(int node)\n+{\n+\tstruct global_resource_tag\ttag;\n+\tchar\t\t\t\tbuf[16];\n+\tu64\t\t\t\tdata;\n+\tint\t\t\t\ttimeout;\n+\tint\t\t\t\ti;\n+\tint\t\t\t\tj;\n+\tint\t\t\t\tk;\n+\n+\t/* Disable the pki */\n+\tdata = oct_csr_read(PKI_BUF_CTL(node));\n+\tif (data & BIT(0)) {\n+\t\tdata &= ~BIT(0);\n+\t\toct_csr_write(data, PKI_BUF_CTL(node));\n+\n+\t\t/* Wait until the pki has finished processing packets */\n+\t\ttimeout = 10000;\n+\t\tdo {\n+\t\t\tdata = oct_csr_read(PKI_SFT_RST(node));\n+\t\t\tif (data & BIT(32))\n+\t\t\t\tbreak;\n+\t\t\ttimeout--;\n+\t\t\tudelay(1);\n+\t\t} while (timeout);\n+\t\tif (!timeout)\n+\t\t\tpr_warn(\"octeon3_pki: disable timeout\\n\");\n+\t}\n+\n+\t/* Free all prefetched fpa buffers back to the fpa */\n+\tdata = oct_csr_read(PKI_BUF_CTL(node));\n+\tdata |= BIT(5) | BIT(9);\n+\toct_csr_write(data, PKI_BUF_CTL(node));\n+\t/* Dummy read to get the register write to take effect */\n+\tdata = oct_csr_read(PKI_BUF_CTL(node));\n+\n+\t/* Now we can reset the pki */\n+\tdata = oct_csr_read(PKI_SFT_RST(node));\n+\tdata |= BIT(0);\n+\toct_csr_write(data, PKI_SFT_RST(node));\n+\ttimeout = 10000;\n+\tdo {\n+\t\tdata = oct_csr_read(PKI_SFT_RST(node));\n+\t\tif ((data & BIT(63)) == 0)\n+\t\t\tbreak;\n+\t\ttimeout--;\n+\t\tudelay(1);\n+\t} while (timeout);\n+\tif (!timeout)\n+\t\tpr_warn(\"octeon3_pki: reset timeout\\n\");\n+\n+\t/* Free all the allocated resources. We should only free the resources\n+\t * allocated by us (TODO).\n+\t */\n+\tfor (i = 0; i < PKI_NUM_STYLE; i++) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_styl\", 8);\n+\t\tsnprintf(buf, 16, \"e_%d.....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\t\tres_mgr_free(tag, i);\n+\t}\n+\tfor (i = 0; i < PKI_NUM_QPG_ENTRY; i++) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_qpge\", 8);\n+\t\tsnprintf(buf, 16, \"t_%d.....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\t\tres_mgr_free(tag, i);\n+\t}\n+\tfor (i = 0; i < get_num_clusters(); i++) {\n+\t\tfor (j = 0; j < MAX_BANKS; j++) {\n+\t\t\tstrncpy((char *)&tag.lo, \"cvm_pcam\", 8);\n+\t\t\tsnprintf(buf, 16, \"_%d%d%d....\", node, i, j);\n+\t\t\tmemcpy(&tag.hi, buf, 8);\n+\t\t\tfor (k = 0; k < MAX_BANK_ENTRIES; k++)\n+\t\t\t\tres_mgr_free(tag, k);\n+\t\t}\n+\t}\n+\n+\t/* Restore the registers back to their reset state. We should only reset\n+\t * the registers used by us (TODO).\n+\t */\n+\tfor (i = 0; i < get_num_clusters(); i++) {\n+\t\tfor (j = 0; j < MAX_PKNDS; j++) {\n+\t\t\toct_csr_write(0, PKI_CL_PKIND_CFG(node, i, j));\n+\t\t\toct_csr_write(0, PKI_CL_PKIND_STYLE(node, i, j));\n+\t\t\toct_csr_write(0, PKI_CL_PKIND_SKIP(node, i, j));\n+\t\t\toct_csr_write(0, PKI_CL_PKIND_L2_CUSTOM(node, i, j));\n+\t\t\toct_csr_write(0, PKI_CL_PKIND_LG_CUSTOM(node, i, j));\n+\t\t}\n+\n+\t\tfor (j = 0; j < PKI_NUM_FINAL_STYLE; j++) {\n+\t\t\toct_csr_write(0, PKI_CL_STYLE_CFG(node, i, j));\n+\t\t\toct_csr_write(0, PKI_CL_STYLE_CFG2(node, i, j));\n+\t\t\toct_csr_write(0, PKI_CLX_STYLEX_ALG(node, i, j));\n+\t\t}\n+\t}\n+\tfor (i = 0; i < PKI_NUM_FINAL_STYLE; i++)\n+\t\toct_csr_write((0x5 << 22) | 0x20, PKI_STYLE_BUF(node, i));\n+}\n+EXPORT_SYMBOL(octeon3_pki_shutdown);\n+\n+MODULE_LICENSE(\"GPL\");\n+MODULE_FIRMWARE(PKI_CLUSTER_FIRMWARE);\n+MODULE_AUTHOR(\"Carlos Munoz <cmunoz@cavium.com>\");\n+MODULE_DESCRIPTION(\"Cavium, Inc. PKI management.\");\ndiff --git a/drivers/net/ethernet/cavium/octeon/octeon3-pko.c b/drivers/net/ethernet/cavium/octeon/octeon3-pko.c\nnew file mode 100644\nindex 000000000000..74dc55356e87\n--- /dev/null\n+++ b/drivers/net/ethernet/cavium/octeon/octeon3-pko.c\n@@ -0,0 +1,1719 @@\n+/*\n+ * Copyright (c) 2017 Cavium, Inc.\n+ *\n+ * This file is subject to the terms and conditions of the GNU General Public\n+ * License. See the file \"COPYING\" in the main directory of this archive\n+ * for more details.\n+ */\n+#include <linux/module.h>\n+\n+#include <asm/octeon/octeon.h>\n+\n+#include \"octeon3.h\"\n+\n+#define MAX_OUTPUT_MAC\t\t\t28\n+#define MAX_FIFO_GRP\t\t\t8\n+\n+#define FIFO_SIZE\t\t\t2560\n+\n+/* Registers are accessed via xkphys */\n+#define PKO_BASE\t\t\t0x1540000000000ull\n+#define PKO_ADDR(node)\t\t\t(SET_XKPHYS + NODE_OFFSET(node) + \\\n+\t\t\t\t\t PKO_BASE)\n+\n+#define PKO_L1_SQ_SHAPE(n, q)\t\t(PKO_ADDR(n) + ((q) << 9) + 0x000010)\n+#define PKO_L1_SQ_LINK(n, q)\t\t(PKO_ADDR(n) + ((q) << 9) + 0x000038)\n+#define PKO_DQ_WM_CTL(n, q)\t\t(PKO_ADDR(n) + ((q) << 9) + 0x000040)\n+#define PKO_L1_SQ_TOPOLOGY(n, q)\t(PKO_ADDR(n) + ((q) << 9) + 0x080000)\n+#define PKO_L2_SQ_SCHEDULE(n, q)\t(PKO_ADDR(n) + ((q) << 9) + 0x080008)\n+#define PKO_L3_L2_SQ_CHANNEL(n, q)\t(PKO_ADDR(n) + ((q) << 9) + 0x080038)\n+#define PKO_CHANNEL_LEVEL(n)\t\t(PKO_ADDR(n)\t\t + 0x0800f0)\n+#define PKO_SHAPER_CFG(n)\t\t(PKO_ADDR(n)\t\t + 0x0800f8)\n+#define PKO_L2_SQ_TOPOLOGY(n, q)\t(PKO_ADDR(n) + ((q) << 9) + 0x100000)\n+#define PKO_L3_SQ_SCHEDULE(n, q)\t(PKO_ADDR(n) + ((q) << 9) + 0x100008)\n+#define PKO_L3_SQ_TOPOLOGY(n, q)\t(PKO_ADDR(n) + ((q) << 9) + 0x180000)\n+#define PKO_L4_SQ_SCHEDULE(n, q)\t(PKO_ADDR(n) + ((q) << 9) + 0x180008)\n+#define PKO_L4_SQ_TOPOLOGY(n, q)\t(PKO_ADDR(n) + ((q) << 9) + 0x200000)\n+#define PKO_L5_SQ_SCHEDULE(n, q)\t(PKO_ADDR(n) + ((q) << 9) + 0x200008)\n+#define PKO_L5_SQ_TOPOLOGY(n, q)\t(PKO_ADDR(n) + ((q) << 9) + 0x280000)\n+#define PKO_DQ_SCHEDULE(n, q)\t\t(PKO_ADDR(n) + ((q) << 9) + 0x280008)\n+#define PKO_DQ_SW_XOFF(n, q)\t\t(PKO_ADDR(n) + ((q) << 9) + 0x2800e0)\n+#define PKO_DQ_TOPOLOGY(n, q)\t\t(PKO_ADDR(n) + ((q) << 9) + 0x300000)\n+#define PKO_PDM_CFG(n)\t\t\t(PKO_ADDR(n)\t\t + 0x800000)\n+#define PKO_PDM_DQ_MINPAD(n, q)\t\t(PKO_ADDR(n) + ((q) << 3) + 0x8f0000)\n+#define PKO_MAC_CFG(n, m)\t\t(PKO_ADDR(n) + ((m) << 3) + 0x900000)\n+#define PKO_PTF_STATUS(n, f)\t\t(PKO_ADDR(n) + ((f) << 3) + 0x900100)\n+#define PKO_PTGF_CFG(n, g)\t\t(PKO_ADDR(n) + ((g) << 3) + 0x900200)\n+#define PKO_PTF_IOBP_CFG(n)\t\t(PKO_ADDR(n)\t\t + 0x900300)\n+#define PKO_MCI0_MAX_CRED(n, m)\t\t(PKO_ADDR(n) + ((m) << 3) + 0xa00000)\n+#define PKO_MCI1_MAX_CRED(n, m)\t\t(PKO_ADDR(n) + ((m) << 3) + 0xa80000)\n+#define PKO_LUT(n, c)\t\t\t(PKO_ADDR(n) + ((c) << 3) + 0xb00000)\n+#define PKO_DPFI_STATUS(n)\t\t(PKO_ADDR(n)\t\t + 0xc00000)\n+#define PKO_DPFI_FLUSH(n)\t\t(PKO_ADDR(n)\t\t + 0xc00008)\n+#define PKO_DPFI_FPA_AURA(n)\t\t(PKO_ADDR(n)\t\t + 0xc00010)\n+#define PKO_DPFI_ENA(n)\t\t\t(PKO_ADDR(n)\t\t + 0xc00018)\n+#define PKO_STATUS(n)\t\t\t(PKO_ADDR(n)\t\t + 0xd00000)\n+#define PKO_ENABLE(n)\t\t\t(PKO_ADDR(n)\t\t + 0xd00008)\n+\n+/* These levels mimic the pko internal linked queue structure */\n+enum queue_level {\n+\tPQ\t= 1,\n+\tL2_SQ\t= 2,\n+\tL3_SQ\t= 3,\n+\tL4_SQ\t= 4,\n+\tL5_SQ\t= 5,\n+\tDQ\t= 6\n+};\n+\n+enum pko_dqop_e {\n+\tDQOP_SEND,\n+\tDQOP_OPEN,\n+\tDQOP_CLOSE,\n+\tDQOP_QUERY\n+};\n+\n+enum pko_dqstatus_e {\n+\tPASS = 0,\n+\tBADSTATE = 0x8,\n+\tNOFPABUF = 0x9,\n+\tNOPKOBUF = 0xa,\n+\tFAILRTNPTR = 0xb,\n+\tALREADY = 0xc,\n+\tNOTCREATED = 0xd,\n+\tNOTEMPTY = 0xe,\n+\tSENDPKTDROP = 0xf\n+};\n+\n+struct mac_info {\n+\tint\tfifo_cnt;\n+\tint\tprio;\n+\tint\tspeed;\n+\tint\tfifo;\n+\tint\tnum_lmacs;\n+};\n+\n+struct fifo_grp_info {\n+\tint\tspeed;\n+\tint\tsize;\n+};\n+\n+static const int lut_index_78xx[] = {\n+\t0x200,\n+\t0x240,\n+\t0x280,\n+\t0x2c0,\n+\t0x300,\n+\t0x340\n+};\n+\n+static const int lut_index_73xx[] = {\n+\t0x000,\n+\t0x040,\n+\t0x080\n+};\n+\n+static enum queue_level max_sq_level(void)\n+{\n+\t/* 73xx and 75xx only have 3 scheduler queue levels */\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))\n+\t\treturn L3_SQ;\n+\n+\treturn L5_SQ;\n+}\n+\n+static int get_num_fifos(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))\n+\t\treturn 16;\n+\n+\treturn 28;\n+}\n+\n+static int get_num_fifo_groups(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))\n+\t\treturn 5;\n+\n+\treturn 8;\n+}\n+\n+static int get_num_output_macs(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\treturn 28;\n+\telse if (OCTEON_IS_MODEL(OCTEON_CNF75XX))\n+\t\treturn 10;\n+\telse if (OCTEON_IS_MODEL(OCTEON_CN73XX))\n+\t\treturn 14;\n+\n+\treturn 0;\n+}\n+\n+static int get_output_mac(int\t\t\tinterface,\n+\t\t\t int\t\t\tindex,\n+\t\t\t enum octeon3_mac_type\tmac_type)\n+{\n+\tint mac;\n+\n+\t/* Output macs are hardcoded in the hardware. See PKO Output MACs\n+\t * section in the HRM.\n+\t */\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {\n+\t\tif (mac_type == SRIO_MAC)\n+\t\t\tmac = 4 + 2 * interface + index;\n+\t\telse\n+\t\t\tmac = 2 + 4 * interface + index;\n+\t} else {\n+\t\tmac = 4 + 4 * interface + index;\n+\t}\n+\n+\treturn mac;\n+}\n+\n+static int get_num_port_queues(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))\n+\t\treturn 16;\n+\n+\treturn 32;\n+}\n+\n+static int allocate_queues(int\t\t\tnode,\n+\t\t\t enum queue_level\tlevel,\n+\t\t\t int\t\t\tnum_queues,\n+\t\t\t int\t\t\t*queues)\n+{\n+\tstruct global_resource_tag\ttag;\n+\tchar\t\t\t\tbuf[16];\n+\tint\t\t\t\tmax_queues = 0;\n+\tint\t\t\t\trc;\n+\n+\tif (level == PQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkop\", 8);\n+\t\tsnprintf(buf, 16, \"oq_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\t\tmax_queues = 32;\n+\t\telse\n+\t\t\tmax_queues = 16;\n+\t} else if (level == L2_SQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkol\", 8);\n+\t\tsnprintf(buf, 16, \"2q_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\t\tmax_queues = 512;\n+\t\telse\n+\t\t\tmax_queues = 256;\n+\t} else if (level == L3_SQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkol\", 8);\n+\t\tsnprintf(buf, 16, \"3q_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\t\tmax_queues = 512;\n+\t\telse\n+\t\t\tmax_queues = 256;\n+\t} else if (level == L4_SQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkol\", 8);\n+\t\tsnprintf(buf, 16, \"4q_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\t\tmax_queues = 1024;\n+\t\telse\n+\t\t\tmax_queues = 0;\n+\t} else if (level == L5_SQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkol\", 8);\n+\t\tsnprintf(buf, 16, \"5q_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\t\tmax_queues = 1024;\n+\t\telse\n+\t\t\tmax_queues = 0;\n+\t} else if (level == DQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkod\", 8);\n+\t\tsnprintf(buf, 16, \"eq_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\t\tmax_queues = 1024;\n+\t\telse\n+\t\t\tmax_queues = 256;\n+\t}\n+\n+\tres_mgr_create_resource(tag, max_queues);\n+\trc = res_mgr_alloc_range(tag, -1, num_queues, false, queues);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\n+\treturn 0;\n+}\n+\n+static void free_queues(int\t\t\tnode,\n+\t\t\tenum queue_level\tlevel,\n+\t\t\tint\t\t\tnum_queues,\n+\t\t\tconst int\t\t*queues)\n+{\n+\tstruct global_resource_tag\ttag;\n+\tchar\t\t\t\tbuf[16];\n+\n+\tif (level == PQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkop\", 8);\n+\t\tsnprintf(buf, 16, \"oq_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\t} else if (level == L2_SQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkol\", 8);\n+\t\tsnprintf(buf, 16, \"2q_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\t} else if (level == L3_SQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkol\", 8);\n+\t\tsnprintf(buf, 16, \"3q_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\t} else if (level == L4_SQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkol\", 8);\n+\t\tsnprintf(buf, 16, \"4q_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\t} else if (level == L5_SQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkol\", 8);\n+\t\tsnprintf(buf, 16, \"5q_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\t} else if (level == DQ) {\n+\t\tstrncpy((char *)&tag.lo, \"cvm_pkod\", 8);\n+\t\tsnprintf(buf, 16, \"eq_%d....\", node);\n+\t\tmemcpy(&tag.hi, buf, 8);\n+\t}\n+\n+\tres_mgr_free_range(tag, queues, num_queues);\n+}\n+\n+static int port_queue_init(int\tnode,\n+\t\t\t int\tpq,\n+\t\t\t int\tmac)\n+{\n+\tu64\tdata;\n+\n+\tdata = mac << 16;\n+\toct_csr_write(data, PKO_L1_SQ_TOPOLOGY(node, pq));\n+\n+\tdata = mac << 13;\n+\toct_csr_write(data, PKO_L1_SQ_SHAPE(node, pq));\n+\n+\tdata = mac;\n+\tdata <<= 44;\n+\toct_csr_write(data, PKO_L1_SQ_LINK(node, pq));\n+\n+\treturn 0;\n+}\n+\n+static int scheduler_queue_l2_init(int\tnode,\n+\t\t\t\t int\tqueue,\n+\t\t\t\t int\tparent_q)\n+{\n+\tu64\tdata;\n+\n+\tdata = oct_csr_read(PKO_L1_SQ_TOPOLOGY(node, parent_q));\n+\tdata &= ~(GENMASK_ULL(40, 32) | GENMASK_ULL(4, 1));\n+\tdata |= (u64)queue << 32;\n+\tdata |= 0xf << 1;\n+\toct_csr_write(data, PKO_L1_SQ_TOPOLOGY(node, parent_q));\n+\n+\toct_csr_write(0, PKO_L2_SQ_SCHEDULE(node, queue));\n+\n+\tdata = parent_q << 16;\n+\toct_csr_write(data, PKO_L2_SQ_TOPOLOGY(node, queue));\n+\n+\treturn 0;\n+}\n+\n+static int scheduler_queue_l3_init(int\tnode,\n+\t\t\t\t int\tqueue,\n+\t\t\t\t int\tparent_q)\n+{\n+\tu64\tdata;\n+\n+\tdata = oct_csr_read(PKO_L2_SQ_TOPOLOGY(node, parent_q));\n+\tdata &= ~(GENMASK_ULL(40, 32) | GENMASK_ULL(4, 1));\n+\tdata |= (u64)queue << 32;\n+\tdata |= 0xf << 1;\n+\toct_csr_write(data, PKO_L2_SQ_TOPOLOGY(node, parent_q));\n+\n+\toct_csr_write(0, PKO_L3_SQ_SCHEDULE(node, queue));\n+\n+\tdata = parent_q << 16;\n+\toct_csr_write(data, PKO_L3_SQ_TOPOLOGY(node, queue));\n+\n+\treturn 0;\n+}\n+\n+static int scheduler_queue_l4_init(int\tnode,\n+\t\t\t\t int\tqueue,\n+\t\t\t\t int\tparent_q)\n+{\n+\tu64\tdata;\n+\n+\tdata = oct_csr_read(PKO_L3_SQ_TOPOLOGY(node, parent_q));\n+\tdata &= ~(GENMASK_ULL(41, 32) | GENMASK_ULL(4, 1));\n+\tdata |= (u64)queue << 32;\n+\tdata |= 0xf << 1;\n+\toct_csr_write(data, PKO_L3_SQ_TOPOLOGY(node, parent_q));\n+\n+\toct_csr_write(0, PKO_L4_SQ_SCHEDULE(node, queue));\n+\n+\tdata = parent_q << 16;\n+\toct_csr_write(data, PKO_L4_SQ_TOPOLOGY(node, queue));\n+\n+\treturn 0;\n+}\n+\n+static int scheduler_queue_l5_init(int\tnode,\n+\t\t\t\t int\tqueue,\n+\t\t\t\t int\tparent_q)\n+{\n+\tu64\tdata;\n+\n+\tdata = oct_csr_read(PKO_L4_SQ_TOPOLOGY(node, parent_q));\n+\tdata &= ~(GENMASK_ULL(41, 32) | GENMASK_ULL(4, 1));\n+\tdata |= (u64)queue << 32;\n+\tdata |= 0xf << 1;\n+\toct_csr_write(data, PKO_L4_SQ_TOPOLOGY(node, parent_q));\n+\n+\toct_csr_write(0, PKO_L5_SQ_SCHEDULE(node, queue));\n+\n+\tdata = parent_q << 16;\n+\toct_csr_write(data, PKO_L5_SQ_TOPOLOGY(node, queue));\n+\n+\treturn 0;\n+}\n+\n+static int descriptor_queue_init(int\t\tnode,\n+\t\t\t\t const int\t*queue,\n+\t\t\t\t int\t\tparent_q,\n+\t\t\t\t int\t\tnum_dq)\n+{\n+\tu64\tdata;\n+\tu64\taddr;\n+\tint\tprio;\n+\tint\trr_prio;\n+\tint\trr_quantum;\n+\tint\ti;\n+\n+\t/* Limit static priorities to the available prio field bits */\n+\tif (num_dq > 9) {\n+\t\tpr_err(\"octeon3-pko: Invalid number of dqs\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tprio = 0;\n+\n+\tif (num_dq == 1) {\n+\t\t/* Single dq */\n+\t\trr_prio = 0xf;\n+\t\trr_quantum = 0x10;\n+\t} else {\n+\t\t/* Multiple dqs */\n+\t\trr_prio = num_dq;\n+\t\trr_quantum = 0;\n+\t}\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\taddr = PKO_L5_SQ_TOPOLOGY(node, parent_q);\n+\telse\n+\t\taddr = PKO_L3_SQ_TOPOLOGY(node, parent_q);\n+\n+\tdata = oct_csr_read(addr);\n+\tdata &= ~(GENMASK_ULL(41, 32) | GENMASK_ULL(4, 1));\n+\tdata |= (u64)queue[0] << 32;\n+\tdata |= rr_prio << 1;\n+\toct_csr_write(data, addr);\n+\n+\tfor (i = 0; i < num_dq; i++) {\n+\t\tdata = (prio << 24) | rr_quantum;\n+\t\toct_csr_write(data, PKO_DQ_SCHEDULE(node, queue[i]));\n+\n+\t\tdata = parent_q << 16;\n+\t\toct_csr_write(data, PKO_DQ_TOPOLOGY(node, queue[i]));\n+\n+\t\tdata = BIT(49);\n+\t\toct_csr_write(data, PKO_DQ_WM_CTL(node, queue[i]));\n+\n+\t\tif (prio << rr_prio)\n+\t\t\tprio++;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int map_channel(int\tnode,\n+\t\t int\tpq,\n+\t\t int\tqueue,\n+\t\t int\tipd_port)\n+{\n+\tu64\tdata;\n+\tint\tlut_index = 0;\n+\tint\ttable_index;\n+\n+\tdata = oct_csr_read(PKO_L3_L2_SQ_CHANNEL(node, queue));\n+\tdata &= ~GENMASK_ULL(43, 32);\n+\tdata |= (u64)ipd_port << 32;\n+\toct_csr_write(data, PKO_L3_L2_SQ_CHANNEL(node, queue));\n+\n+\t/* See PKO_LUT register description in the HRM for how to compose the\n+\t * lut_index.\n+\t */\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX)) {\n+\t\ttable_index = ((ipd_port & 0xf00) - 0x800) >> 8;\n+\t\tlut_index = lut_index_78xx[table_index];\n+\t\tlut_index += ipd_port & 0xff;\n+\t} else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {\n+\t\ttable_index = ((ipd_port & 0xf00) - 0x800) >> 8;\n+\t\tlut_index = lut_index_73xx[table_index];\n+\t\tlut_index += ipd_port & 0xff;\n+\t} else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {\n+\t\tif ((ipd_port & 0xf00) != 0x800)\n+\t\t\treturn -1;\n+\t\tlut_index = ipd_port & 0xff;\n+\t}\n+\n+\tdata = BIT(15);\n+\tdata |= pq << 9;\n+\tdata |= queue;\n+\toct_csr_write(data, PKO_LUT(node, lut_index));\n+\n+\treturn 0;\n+}\n+\n+static int open_dq(int node, int dq)\n+{\n+\tu64\t\t\tdata;\n+\tu64\t\t\t*iobdma_addr;\n+\tu64\t\t\t*scratch_addr;\n+\tenum pko_dqstatus_e\tstatus;\n+\n+\t/* Build the dq open query. See PKO_QUERY_DMA_S in the HRM for the\n+\t * query format.\n+\t */\n+\tdata = (LMTDMA_SCR_OFFSET >> 3) << 56;\n+\tdata |= 1ull << 48;\n+\tdata |= 0x51ull << 40;\n+\tdata |= (u64)node << 36;\n+\tdata |= (u64)DQOP_OPEN << 32;\n+\tdata |= dq << 16;\n+\n+\tCVMX_SYNCWS;\n+\tpreempt_disable();\n+\n+\t/* Clear return location */\n+\tscratch_addr = (u64 *)(SCRATCH_BASE + LMTDMA_SCR_OFFSET);\n+\t*scratch_addr = ~0ull;\n+\n+\t/* Issue pko lmtdma command */\n+\tiobdma_addr = (u64 *)(IOBDMA_ORDERED_IO_ADDR);\n+\t*iobdma_addr = data;\n+\n+\t/* Wait for lmtdma command to complete and get response*/\n+\tCVMX_SYNCIOBDMA;\n+\tdata = *scratch_addr;\n+\n+\tpreempt_enable();\n+\n+\t/* See PKO_QUERY_RTN_S in the HRM for response format */\n+\tstatus = (data & GENMASK_ULL(63, 60)) >> 60;\n+\tif (status != PASS && status != ALREADY) {\n+\t\tpr_err(\"octeon3-pko: Failed to open dq\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static s64 query_dq(int node, int dq)\n+{\n+\tu64\t\t\tdata;\n+\tu64\t\t\t*iobdma_addr;\n+\tu64\t\t\t*scratch_addr;\n+\tenum pko_dqstatus_e\tstatus;\n+\ts64\t\t\tdepth;\n+\n+\t/* Build the dq open query. See PKO_QUERY_DMA_S in the HRM for the\n+\t * query format.\n+\t */\n+\tdata = (LMTDMA_SCR_OFFSET >> 3) << 56;\n+\tdata |= 1ull << 48;\n+\tdata |= 0x51ull << 40;\n+\tdata |= (u64)node << 36;\n+\tdata |= (u64)DQOP_QUERY << 32;\n+\tdata |= dq << 16;\n+\n+\tCVMX_SYNCWS;\n+\tpreempt_disable();\n+\n+\t/* Clear return location */\n+\tscratch_addr = (u64 *)(SCRATCH_BASE + LMTDMA_SCR_OFFSET);\n+\t*scratch_addr = ~0ull;\n+\n+\t/* Issue pko lmtdma command */\n+\tiobdma_addr = (u64 *)(IOBDMA_ORDERED_IO_ADDR);\n+\t*iobdma_addr = data;\n+\n+\t/* Wait for lmtdma command to complete and get response*/\n+\tCVMX_SYNCIOBDMA;\n+\tdata = *scratch_addr;\n+\n+\tpreempt_enable();\n+\n+\t/* See PKO_QUERY_RTN_S in the HRM for response format */\n+\tstatus = (data & GENMASK_ULL(63, 60)) >> 60;\n+\tif (status != PASS) {\n+\t\tpr_err(\"octeon3-pko: Failed to query dq=%d\\n\", dq);\n+\t\treturn -1;\n+\t}\n+\n+\tdepth = data & GENMASK_ULL(47, 0);\n+\n+\treturn depth;\n+}\n+\n+static u64 close_dq(int node, int dq)\n+{\n+\tu64\t\t\tdata;\n+\tu64\t\t\t*iobdma_addr;\n+\tu64\t\t\t*scratch_addr;\n+\tenum pko_dqstatus_e\tstatus;\n+\n+\t/* Build the dq open query. See PKO_QUERY_DMA_S in the HRM for the\n+\t * query format.\n+\t */\n+\tdata = (LMTDMA_SCR_OFFSET >> 3) << 56;\n+\tdata |= 1ull << 48;\n+\tdata |= 0x51ull << 40;\n+\tdata |= (u64)node << 36;\n+\tdata |= (u64)DQOP_CLOSE << 32;\n+\tdata |= dq << 16;\n+\n+\tCVMX_SYNCWS;\n+\tpreempt_disable();\n+\n+\t/* Clear return location */\n+\tscratch_addr = (u64 *)(SCRATCH_BASE + LMTDMA_SCR_OFFSET);\n+\t*scratch_addr = ~0ull;\n+\n+\t/* Issue pko lmtdma command */\n+\tiobdma_addr = (u64 *)(IOBDMA_ORDERED_IO_ADDR);\n+\t*iobdma_addr = data;\n+\n+\t/* Wait for lmtdma command to complete and get response*/\n+\tCVMX_SYNCIOBDMA;\n+\tdata = *scratch_addr;\n+\n+\tpreempt_enable();\n+\n+\t/* See PKO_QUERY_RTN_S in the HRM for response format */\n+\tstatus = (data & GENMASK_ULL(63, 60)) >> 60;\n+\tif (status != PASS) {\n+\t\tpr_err(\"octeon3-pko: Failed to close dq\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int get_78xx_fifos_required(int node, struct mac_info *macs)\n+{\n+\tint\t\tfifo_cnt = 0;\n+\tint\t\tbgx;\n+\tint\t\tindex;\n+\tint\t\tqlm;\n+\tint\t\tnum_lmacs;\n+\tenum port_mode\tmode;\n+\tint\t\ti;\n+\tint\t\tcnt;\n+\tint\t\tprio;\n+\tu64\t\tdata;\n+\n+\t/* The loopback mac gets 1 fifo by default */\n+\tmacs[0].fifo_cnt = 1;\n+\tmacs[0].speed = 1;\n+\tfifo_cnt += 1;\n+\n+\t/* The dpi mac gets 1 fifo by default */\n+\tmacs[1].fifo_cnt = 1;\n+\tmacs[1].speed = 50;\n+\tfifo_cnt += 1;\n+\n+\t/* The ilk macs get default number of fifos (module param) */\n+\tmacs[2].fifo_cnt = ilk0_lanes <= 4 ? ilk0_lanes : 4;\n+\tmacs[2].speed = 40;\n+\tfifo_cnt += macs[2].fifo_cnt;\n+\tmacs[3].fifo_cnt = ilk1_lanes <= 4 ? ilk1_lanes : 4;\n+\tmacs[3].speed = 40;\n+\tfifo_cnt += macs[3].fifo_cnt;\n+\n+\t/* Assign fifos to the active bgx macs */\n+\tfor (i = 4; i < get_num_output_macs(); i += 4) {\n+\t\tbgx = (i - 4) / 4;\n+\t\tqlm = bgx_port_get_qlm(node, bgx, 0);\n+\n+\t\tdata = oct_csr_read(GSER_CFG(node, qlm));\n+\t\tif (data & BIT(2)) {\n+\t\t\tdata = oct_csr_read(BGX_CMR_TX_LMACS(node, bgx));\n+\t\t\tnum_lmacs = data & 7;\n+\n+\t\t\tfor (index = 0; index < num_lmacs; index++) {\n+\t\t\t\tswitch (num_lmacs) {\n+\t\t\t\tcase 1:\n+\t\t\t\t\tmacs[i + index].num_lmacs = 4;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase 2:\n+\t\t\t\t\tmacs[i + index].num_lmacs = 2;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase 4:\n+\t\t\t\tdefault:\n+\t\t\t\t\tmacs[i + index].num_lmacs = 1;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\n+\t\t\t\tmode = bgx_port_get_mode(node, bgx, 0);\n+\t\t\t\tswitch (mode) {\n+\t\t\t\tcase PORT_MODE_SGMII:\n+\t\t\t\tcase PORT_MODE_RGMII:\n+\t\t\t\t\tmacs[i + index].fifo_cnt = 1;\n+\t\t\t\t\tmacs[i + index].prio = 1;\n+\t\t\t\t\tmacs[i + index].speed = 1;\n+\t\t\t\t\tbreak;\n+\n+\t\t\t\tcase PORT_MODE_XAUI:\n+\t\t\t\tcase PORT_MODE_RXAUI:\n+\t\t\t\t\tmacs[i + index].fifo_cnt = 4;\n+\t\t\t\t\tmacs[i + index].prio = 2;\n+\t\t\t\t\tmacs[i + index].speed = 20;\n+\t\t\t\t\tbreak;\n+\n+\t\t\t\tcase PORT_MODE_10G_KR:\n+\t\t\t\tcase PORT_MODE_XFI:\n+\t\t\t\t\tmacs[i + index].fifo_cnt = 4;\n+\t\t\t\t\tmacs[i + index].prio = 2;\n+\t\t\t\t\tmacs[i + index].speed = 10;\n+\t\t\t\t\tbreak;\n+\n+\t\t\t\tcase PORT_MODE_40G_KR4:\n+\t\t\t\tcase PORT_MODE_XLAUI:\n+\t\t\t\t\tmacs[i + index].fifo_cnt = 4;\n+\t\t\t\t\tmacs[i + index].prio = 3;\n+\t\t\t\t\tmacs[i + index].speed = 40;\n+\t\t\t\t\tbreak;\n+\n+\t\t\t\tdefault:\n+\t\t\t\t\tmacs[i + index].fifo_cnt = 0;\n+\t\t\t\t\tmacs[i + index].prio = 0;\n+\t\t\t\t\tmacs[i + index].speed = 0;\n+\t\t\t\t\tmacs[i + index].num_lmacs = 0;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\n+\t\t\t\tfifo_cnt += macs[i + index].fifo_cnt;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t/* If more fifos than available were assigned, reduce the number of\n+\t * fifos until within limit. Start with the lowest priority macs with 4\n+\t * fifos.\n+\t */\n+\tprio = 1;\n+\tcnt = 4;\n+\twhile (fifo_cnt > get_num_fifos()) {\n+\t\tfor (i = 0; i < get_num_output_macs(); i++) {\n+\t\t\tif (macs[i].prio == prio && macs[i].fifo_cnt == cnt) {\n+\t\t\t\tmacs[i].fifo_cnt >>= 1;\n+\t\t\t\tfifo_cnt -= macs[i].fifo_cnt;\n+\t\t\t}\n+\n+\t\t\tif (fifo_cnt <= get_num_fifos())\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (prio >= 3) {\n+\t\t\tprio = 1;\n+\t\t\tcnt >>= 1;\n+\t\t} else {\n+\t\t\tprio++;\n+\t\t}\n+\n+\t\tif (cnt == 0)\n+\t\t\tbreak;\n+\t}\n+\n+\t/* Assign left over fifos to dpi */\n+\tif (get_num_fifos() - fifo_cnt > 0) {\n+\t\tif (get_num_fifos() - fifo_cnt >= 3) {\n+\t\t\tmacs[1].fifo_cnt += 3;\n+\t\t\tfifo_cnt -= 3;\n+\t\t} else {\n+\t\t\tmacs[1].fifo_cnt += 1;\n+\t\t\tfifo_cnt -= 1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int get_75xx_fifos_required(int node, struct mac_info *macs)\n+{\n+\tint\t\tfifo_cnt = 0;\n+\tint\t\tbgx;\n+\tint\t\tindex;\n+\tint\t\tqlm;\n+\tenum port_mode\tmode;\n+\tint\t\ti;\n+\tint\t\tcnt;\n+\tint\t\tprio;\n+\tu64\t\tdata;\n+\n+\t/* The loopback mac gets 1 fifo by default */\n+\tmacs[0].fifo_cnt = 1;\n+\tmacs[0].speed = 1;\n+\tfifo_cnt += 1;\n+\n+\t/* The dpi mac gets 1 fifo by default */\n+\tmacs[1].fifo_cnt = 1;\n+\tmacs[1].speed = 50;\n+\tfifo_cnt += 1;\n+\n+\t/* Assign fifos to the active bgx macs */\n+\tbgx = 0;\n+\tfor (i = 2; i < 6; i++) {\n+\t\tindex = i - 2;\n+\t\tqlm = bgx_port_get_qlm(node, bgx, index);\n+\t\tdata = oct_csr_read(GSER_CFG(node, qlm));\n+\t\tif (data & BIT(2)) {\n+\t\t\tmacs[i].num_lmacs = 1;\n+\n+\t\t\tmode = bgx_port_get_mode(node, bgx, index);\n+\t\t\tswitch (mode) {\n+\t\t\tcase PORT_MODE_SGMII:\n+\t\t\tcase PORT_MODE_RGMII:\n+\t\t\t\tmacs[i].fifo_cnt = 1;\n+\t\t\t\tmacs[i].prio = 1;\n+\t\t\t\tmacs[i].speed = 1;\n+\t\t\t\tbreak;\n+\n+\t\t\tcase PORT_MODE_10G_KR:\n+\t\t\tcase PORT_MODE_XFI:\n+\t\t\t\tmacs[i].fifo_cnt = 4;\n+\t\t\t\tmacs[i].prio = 2;\n+\t\t\t\tmacs[i].speed = 10;\n+\t\t\t\tbreak;\n+\n+\t\t\tdefault:\n+\t\t\t\tmacs[i].fifo_cnt = 0;\n+\t\t\t\tmacs[i].prio = 0;\n+\t\t\t\tmacs[i].speed = 0;\n+\t\t\t\tmacs[i].num_lmacs = 0;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tfifo_cnt += macs[i].fifo_cnt;\n+\t\t}\n+\t}\n+\n+\t/* If more fifos than available were assigned, reduce the number of\n+\t * fifos until within limit. Start with the lowest priority macs with 4\n+\t * fifos.\n+\t */\n+\tprio = 1;\n+\tcnt = 4;\n+\twhile (fifo_cnt > get_num_fifos()) {\n+\t\tfor (i = 0; i < get_num_output_macs(); i++) {\n+\t\t\tif (macs[i].prio == prio && macs[i].fifo_cnt == cnt) {\n+\t\t\t\tmacs[i].fifo_cnt >>= 1;\n+\t\t\t\tfifo_cnt -= macs[i].fifo_cnt;\n+\t\t\t}\n+\n+\t\t\tif (fifo_cnt <= get_num_fifos())\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (prio >= 3) {\n+\t\t\tprio = 1;\n+\t\t\tcnt >>= 1;\n+\t\t} else {\n+\t\t\tprio++;\n+\t\t}\n+\n+\t\tif (cnt == 0)\n+\t\t\tbreak;\n+\t}\n+\n+\t/* Assign left over fifos to dpi */\n+\tif (get_num_fifos() - fifo_cnt > 0) {\n+\t\tif (get_num_fifos() - fifo_cnt >= 3) {\n+\t\t\tmacs[1].fifo_cnt += 3;\n+\t\t\tfifo_cnt -= 3;\n+\t\t} else {\n+\t\t\tmacs[1].fifo_cnt += 1;\n+\t\t\tfifo_cnt -= 1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int get_73xx_fifos_required(int node, struct mac_info *macs)\n+{\n+\tint\t\tfifo_cnt = 0;\n+\tint\t\tbgx;\n+\tint\t\tindex;\n+\tint\t\tqlm;\n+\tint\t\tnum_lmacs;\n+\tenum port_mode\tmode;\n+\tint\t\ti;\n+\tint\t\tcnt;\n+\tint\t\tprio;\n+\tu64\t\tdata;\n+\n+\t/* The loopback mac gets 1 fifo by default */\n+\tmacs[0].fifo_cnt = 1;\n+\tmacs[0].speed = 1;\n+\tfifo_cnt += 1;\n+\n+\t/* The dpi mac gets 1 fifo by default */\n+\tmacs[1].fifo_cnt = 1;\n+\tmacs[1].speed = 50;\n+\tfifo_cnt += 1;\n+\n+\t/* Assign fifos to the active bgx macs */\n+\tfor (i = 2; i < get_num_output_macs(); i += 4) {\n+\t\tbgx = (i - 2) / 4;\n+\t\tqlm = bgx_port_get_qlm(node, bgx, 0);\n+\t\tdata = oct_csr_read(GSER_CFG(node, qlm));\n+\n+\t\t/* Bgx2 can be connected to dlm 5, 6, or both */\n+\t\tif (bgx == 2) {\n+\t\t\tif (!(data & BIT(2))) {\n+\t\t\t\tqlm = bgx_port_get_qlm(node, bgx, 2);\n+\t\t\t\tdata = oct_csr_read(GSER_CFG(node, qlm));\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (data & BIT(2)) {\n+\t\t\tdata = oct_csr_read(BGX_CMR_TX_LMACS(node, bgx));\n+\t\t\tnum_lmacs = data & 7;\n+\n+\t\t\tfor (index = 0; index < num_lmacs; index++) {\n+\t\t\t\tswitch (num_lmacs) {\n+\t\t\t\tcase 1:\n+\t\t\t\t\tmacs[i + index].num_lmacs = 4;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase 2:\n+\t\t\t\t\tmacs[i + index].num_lmacs = 2;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase 4:\n+\t\t\t\tdefault:\n+\t\t\t\t\tmacs[i + index].num_lmacs = 1;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\n+\t\t\t\tmode = bgx_port_get_mode(node, bgx, index);\n+\t\t\t\tswitch (mode) {\n+\t\t\t\tcase PORT_MODE_SGMII:\n+\t\t\t\tcase PORT_MODE_RGMII:\n+\t\t\t\t\tmacs[i + index].fifo_cnt = 1;\n+\t\t\t\t\tmacs[i + index].prio = 1;\n+\t\t\t\t\tmacs[i + index].speed = 1;\n+\t\t\t\t\tbreak;\n+\n+\t\t\t\tcase PORT_MODE_XAUI:\n+\t\t\t\tcase PORT_MODE_RXAUI:\n+\t\t\t\t\tmacs[i + index].fifo_cnt = 4;\n+\t\t\t\t\tmacs[i + index].prio = 2;\n+\t\t\t\t\tmacs[i + index].speed = 20;\n+\t\t\t\t\tbreak;\n+\n+\t\t\t\tcase PORT_MODE_10G_KR:\n+\t\t\t\tcase PORT_MODE_XFI:\n+\t\t\t\t\tmacs[i + index].fifo_cnt = 4;\n+\t\t\t\t\tmacs[i + index].prio = 2;\n+\t\t\t\t\tmacs[i + index].speed = 10;\n+\t\t\t\t\tbreak;\n+\n+\t\t\t\tcase PORT_MODE_40G_KR4:\n+\t\t\t\tcase PORT_MODE_XLAUI:\n+\t\t\t\t\tmacs[i + index].fifo_cnt = 4;\n+\t\t\t\t\tmacs[i + index].prio = 3;\n+\t\t\t\t\tmacs[i + index].speed = 40;\n+\t\t\t\t\tbreak;\n+\n+\t\t\t\tdefault:\n+\t\t\t\t\tmacs[i + index].fifo_cnt = 0;\n+\t\t\t\t\tmacs[i + index].prio = 0;\n+\t\t\t\t\tmacs[i + index].speed = 0;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\n+\t\t\t\tfifo_cnt += macs[i + index].fifo_cnt;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t/* If more fifos than available were assigned, reduce the number of\n+\t * fifos until within limit. Start with the lowest priority macs with 4\n+\t * fifos.\n+\t */\n+\tprio = 1;\n+\tcnt = 4;\n+\twhile (fifo_cnt > get_num_fifos()) {\n+\t\tfor (i = 0; i < get_num_output_macs(); i++) {\n+\t\t\tif (macs[i].prio == prio && macs[i].fifo_cnt == cnt) {\n+\t\t\t\tmacs[i].fifo_cnt >>= 1;\n+\t\t\t\tfifo_cnt -= macs[i].fifo_cnt;\n+\t\t\t}\n+\n+\t\t\tif (fifo_cnt <= get_num_fifos())\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (prio >= 3) {\n+\t\t\tprio = 1;\n+\t\t\tcnt >>= 1;\n+\t\t} else {\n+\t\t\tprio++;\n+\t\t}\n+\n+\t\tif (cnt == 0)\n+\t\t\tbreak;\n+\t}\n+\n+\t/* Assign left over fifos to dpi */\n+\tif (get_num_fifos() - fifo_cnt > 0) {\n+\t\tif (get_num_fifos() - fifo_cnt >= 3) {\n+\t\t\tmacs[1].fifo_cnt += 3;\n+\t\t\tfifo_cnt -= 3;\n+\t\t} else {\n+\t\t\tmacs[1].fifo_cnt += 1;\n+\t\t\tfifo_cnt -= 1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int setup_macs(int node)\n+{\n+\tstruct mac_info\t\tmacs[MAX_OUTPUT_MAC];\n+\tstruct fifo_grp_info\tfifo_grp[MAX_FIFO_GRP];\n+\tint\t\t\tcnt;\n+\tint\t\t\tfifo;\n+\tint\t\t\tgrp;\n+\tint\t\t\ti;\n+\tu64\t\t\tdata;\n+\tint\t\t\tsize;\n+\n+\tmemset(macs, 0, sizeof(macs));\n+\tmemset(fifo_grp, 0, sizeof(fifo_grp));\n+\n+\t/* Get the number of fifos required by each mac */\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX)) {\n+\t\tget_78xx_fifos_required(node, macs);\n+\t} else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {\n+\t\tget_75xx_fifos_required(node, macs);\n+\t} else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {\n+\t\tget_73xx_fifos_required(node, macs);\n+\t} else {\n+\t\tpr_err(\"octeon3-pko: Unsupported board type\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\t/* Assign fifos to each mac. Start with macs requiring 4 fifos */\n+\tfifo = 0;\n+\tfor (cnt = 4; cnt > 0; cnt >>= 1) {\n+\t\tfor (i = 0; i < get_num_output_macs(); i++) {\n+\t\t\tif (macs[i].fifo_cnt != cnt)\n+\t\t\t\tcontinue;\n+\n+\t\t\tmacs[i].fifo = fifo;\n+\t\t\tgrp = fifo / 4;\n+\n+\t\t\tfifo_grp[grp].speed += macs[i].speed;\n+\n+\t\t\tif (cnt == 4) {\n+\t\t\t\t/* 10, 0, 0, 0 */\n+\t\t\t\tfifo_grp[grp].size = 4;\n+\t\t\t} else if (cnt == 2) {\n+\t\t\t\t/* 5, 0, 5, 0 */\n+\t\t\t\tfifo_grp[grp].size = 3;\n+\t\t\t} else if (cnt == 1) {\n+\t\t\t\tif ((fifo & 0x2) && fifo_grp[grp].size == 3) {\n+\t\t\t\t\t/* 5, 0, 2.5, 2.5 */\n+\t\t\t\t\tfifo_grp[grp].size = 1;\n+\t\t\t\t} else {\n+\t\t\t\t\t/* 2.5, 2.5, 2.5, 2.5 */\n+\t\t\t\t\tfifo_grp[grp].size = 0;\n+\t\t\t\t}\n+\t\t\t}\n+\n+\t\t\tfifo += cnt;\n+\t\t}\n+\t}\n+\n+\t/* Configure the fifo groups */\n+\tfor (i = 0; i < get_num_fifo_groups(); i++) {\n+\t\tdata = oct_csr_read(PKO_PTGF_CFG(node, i));\n+\t\tsize = data & GENMASK_ULL(2, 0);\n+\t\tif (size != fifo_grp[i].size)\n+\t\t\tdata |= BIT(6);\n+\t\tdata &= ~GENMASK_ULL(2, 0);\n+\t\tdata |= fifo_grp[i].size;\n+\n+\t\tdata &= ~GENMASK_ULL(5, 3);\n+\t\tif (fifo_grp[i].speed >= 40) {\n+\t\t\tif (fifo_grp[i].size >= 3) {\n+\t\t\t\t/* 50 Gbps */\n+\t\t\t\tdata |= 0x3 << 3;\n+\t\t\t} else {\n+\t\t\t\t/* 25 Gbps */\n+\t\t\t\tdata |= 0x2 << 3;\n+\t\t\t}\n+\t\t} else if (fifo_grp[i].speed >= 20) {\n+\t\t\t/* 25 Gbps */\n+\t\t\tdata |= 0x2 << 3;\n+\t\t} else if (fifo_grp[i].speed >= 10) {\n+\t\t\t/* 12.5 Gbps */\n+\t\t\tdata |= 0x1 << 3;\n+\t\t}\n+\t\toct_csr_write(data, PKO_PTGF_CFG(node, i));\n+\t\tdata &= ~BIT(6);\n+\t\toct_csr_write(data, PKO_PTGF_CFG(node, i));\n+\t}\n+\n+\t/* Configure the macs with their assigned fifo */\n+\tfor (i = 0; i < get_num_output_macs(); i++) {\n+\t\tdata = oct_csr_read(PKO_MAC_CFG(node, i));\n+\t\tdata &= ~GENMASK_ULL(4, 0);\n+\t\tif (!macs[i].fifo_cnt)\n+\t\t\tdata |= 0x1f;\n+\t\telse\n+\t\t\tdata |= macs[i].fifo;\n+\t\toct_csr_write(data, PKO_MAC_CFG(node, i));\n+\t}\n+\n+\t/* Setup mci0/mci1/skid credits */\n+\tfor (i = 0; i < get_num_output_macs(); i++) {\n+\t\tint\tfifo_credit;\n+\t\tint\tmac_credit;\n+\t\tint\tskid_credit;\n+\n+\t\tif (!macs[i].fifo_cnt)\n+\t\t\tcontinue;\n+\n+\t\tif (i == 0) {\n+\t\t\t/* Loopback */\n+\t\t\tmac_credit = 4 * 1024;\n+\t\t\tskid_credit = 0;\n+\t\t} else if (i == 1) {\n+\t\t\t/* Dpi */\n+\t\t\tmac_credit = 2 * 1024;\n+\t\t\tskid_credit = 0;\n+\t\t} else if (OCTEON_IS_MODEL(OCTEON_CN78XX) && ((i == 2 || i == 3))) {\n+\t\t\t/* ILK */\n+\t\t\tmac_credit = 4 * 1024;\n+\t\t\tskid_credit = 0;\n+\t\t} else if (OCTEON_IS_MODEL(OCTEON_CNF75XX) && ((i >= 6 && i <= 9))) {\n+\t\t\t/* Srio */\n+\t\t\tmac_credit = 1024 / 2;\n+\t\t\tskid_credit = 0;\n+\t\t} else {\n+\t\t\t/* Bgx */\n+\t\t\tmac_credit = macs[i].num_lmacs * 8 * 1024;\n+\t\t\tskid_credit = macs[i].num_lmacs * 256;\n+\t\t}\n+\n+\t\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {\n+\t\t\tfifo_credit = macs[i].fifo_cnt * FIFO_SIZE;\n+\t\t\tdata = (fifo_credit + mac_credit) / 16;\n+\t\t\toct_csr_write(data, PKO_MCI0_MAX_CRED(node, i));\n+\t\t}\n+\n+\t\tdata = mac_credit / 16;\n+\t\toct_csr_write(data, PKO_MCI1_MAX_CRED(node, i));\n+\n+\t\tdata = oct_csr_read(PKO_MAC_CFG(node, i));\n+\t\tdata &= ~GENMASK_ULL(6, 5);\n+\t\tdata |= ((skid_credit / 256) >> 1) << 5;\n+\t\toct_csr_write(data, PKO_MAC_CFG(node, i));\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int hw_init_global(int node, int aura)\n+{\n+\tu64\tdata;\n+\tint\ttimeout;\n+\n+\tdata = oct_csr_read(PKO_ENABLE(node));\n+\tif (data & BIT(0)) {\n+\t\tpr_info(\"octeon3-pko: Pko already enabled on node %d\\n\", node);\n+\t\treturn 0;\n+\t}\n+\n+\t/* Enable color awareness */\n+\tdata = oct_csr_read(PKO_SHAPER_CFG(node));\n+\tdata |= BIT(1);\n+\toct_csr_write(data, PKO_SHAPER_CFG(node));\n+\n+\t/* Clear flush command */\n+\toct_csr_write(0, PKO_DPFI_FLUSH(node));\n+\n+\t/* Set the aura number */\n+\tdata = (node << 10) | aura;\n+\toct_csr_write(data, PKO_DPFI_FPA_AURA(node));\n+\n+\tdata = BIT(0);\n+\toct_csr_write(data, PKO_DPFI_ENA(node));\n+\n+\t/* Wait until all pointers have been returned */\n+\ttimeout = 100000;\n+\tdo {\n+\t\tdata = oct_csr_read(PKO_STATUS(node));\n+\t\tif (data & BIT(63))\n+\t\t\tbreak;\n+\t\tudelay(1);\n+\t\ttimeout--;\n+\t} while (timeout);\n+\tif (!timeout) {\n+\t\tpr_err(\"octeon3-pko: Pko dfpi failed on node %d\\n\", node);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Set max outstanding requests in IOBP for any FIFO.*/\n+\tdata = oct_csr_read(PKO_PTF_IOBP_CFG(node));\n+\tdata &= ~GENMASK_ULL(6, 0);\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\tdata |= 0x10;\n+\telse\n+\t\tdata |= 3;\n+\toct_csr_write(data, PKO_PTF_IOBP_CFG(node));\n+\n+\t/* Set minimum packet size per Ethernet standard */\n+\tdata = 0x3c << 3;\n+\toct_csr_write(data, PKO_PDM_CFG(node));\n+\n+\t/* Initialize macs and fifos */\n+\tsetup_macs(node);\n+\n+\t/* Enable pko */\n+\tdata = BIT(0);\n+\toct_csr_write(data, PKO_ENABLE(node));\n+\n+\t/* Verify pko is ready */\n+\tdata = oct_csr_read(PKO_STATUS(node));\n+\tif (!(data & BIT(63))) {\n+\t\tpr_err(\"octeon3_pko: pko is not ready\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int hw_exit_global(int node)\n+{\n+\tu64\tdata;\n+\tint\ttimeout;\n+\tint\ti;\n+\n+\t/* Wait until there are no in-flight packets */\n+\tfor (i = 0; i < get_num_fifos(); i++) {\n+\t\tdata = oct_csr_read(PKO_PTF_STATUS(node, i));\n+\t\tif ((data & GENMASK_ULL(4, 0)) == 0x1f)\n+\t\t\tcontinue;\n+\n+\t\ttimeout = 10000;\n+\t\tdo {\n+\t\t\tif (!(data & GENMASK_ULL(11, 5)))\n+\t\t\t\tbreak;\n+\t\t\tudelay(1);\n+\t\t\ttimeout--;\n+\t\t\tdata = oct_csr_read(PKO_PTF_STATUS(node, i));\n+\t\t} while (timeout);\n+\t\tif (!timeout) {\n+\t\t\tpr_err(\"octeon3-pko: Timeout in-flight fifo\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\t/* Disable pko */\n+\toct_csr_write(0, PKO_ENABLE(node));\n+\n+\t/* Reset all port queues to the virtual mac */\n+\tfor (i = 0; i < get_num_port_queues(); i++) {\n+\t\tdata = get_num_output_macs() << 16;\n+\t\toct_csr_write(data, PKO_L1_SQ_TOPOLOGY(node, i));\n+\n+\t\tdata = get_num_output_macs() << 13;\n+\t\toct_csr_write(data, PKO_L1_SQ_SHAPE(node, i));\n+\n+\t\tdata = (u64)get_num_output_macs() << 48;\n+\t\toct_csr_write(data, PKO_L1_SQ_LINK(node, i));\n+\t}\n+\n+\t/* Reset all output macs */\n+\tfor (i = 0; i < get_num_output_macs(); i++) {\n+\t\tdata = 0x1f;\n+\t\toct_csr_write(data, PKO_MAC_CFG(node, i));\n+\t}\n+\n+\t/* Reset all fifo groups */\n+\tfor (i = 0; i < get_num_fifo_groups(); i++) {\n+\t\tdata = oct_csr_read(PKO_PTGF_CFG(node, i));\n+\t\t/* Simulator asserts if an unused group is reset */\n+\t\tif (data == 0)\n+\t\t\tcontinue;\n+\t\tdata = BIT(6);\n+\t\toct_csr_write(data, PKO_PTGF_CFG(node, i));\n+\t}\n+\n+\t/* Return cache pointers to fpa */\n+\tdata = BIT(0);\n+\toct_csr_write(data, PKO_DPFI_FLUSH(node));\n+\ttimeout = 10000;\n+\tdo {\n+\t\tdata = oct_csr_read(PKO_DPFI_STATUS(node));\n+\t\tif (data & BIT(0))\n+\t\t\tbreak;\n+\t\tudelay(1);\n+\t\ttimeout--;\n+\t} while (timeout);\n+\tif (!timeout) {\n+\t\tpr_err(\"octeon3-pko: Timeout flushing cache\\n\");\n+\t\treturn -1;\n+\t}\n+\toct_csr_write(0, PKO_DPFI_ENA(node));\n+\toct_csr_write(0, PKO_DPFI_FLUSH(node));\n+\n+\treturn 0;\n+}\n+\n+static int virtual_mac_config(int node)\n+{\n+\tint\t\t\tvmac;\n+\tint\t\t\tpq;\n+\tint\t\t\tdq[8];\n+\tint\t\t\tnum_dq;\n+\tint\t\t\tparent_q;\n+\tenum queue_level\tlevel;\n+\tint\t\t\tqueue;\n+\tint\t\t\ti;\n+\tint\t\t\trc;\n+\n+\t/* The virtual mac is after the last output mac. Note: for the 73xx it\n+\t * might be 2 after the last output mac (15).\n+\t */\n+\tvmac = get_num_output_macs();\n+\n+\t/* Allocate a port queue */\n+\trc = allocate_queues(node, PQ, 1, &pq);\n+\tif (rc < 0) {\n+\t\tpr_err(\"octeon3-pko: Failed to allocate port queue\\n\");\n+\t\treturn rc;\n+\t}\n+\n+\t/* Connect the port queue to the output mac */\n+\tport_queue_init(node, pq, vmac);\n+\n+\tparent_q = pq;\n+\tfor (level = L2_SQ; level <= max_sq_level(); level++) {\n+\t\trc = allocate_queues(node, level, 1, &queue);\n+\t\tif (rc < 0) {\n+\t\t\tpr_err(\"octeon3-pko: Failed to allocate queue\\n\");\n+\t\t\treturn rc;\n+\t\t}\n+\n+\t\tswitch (level) {\n+\t\tcase L2_SQ:\n+\t\t\tscheduler_queue_l2_init(node, queue, parent_q);\n+\t\t\tbreak;\n+\t\tcase L3_SQ:\n+\t\t\tscheduler_queue_l3_init(node, queue, parent_q);\n+\t\t\tbreak;\n+\t\tcase L4_SQ:\n+\t\t\tscheduler_queue_l4_init(node, queue, parent_q);\n+\t\t\tbreak;\n+\t\tcase L5_SQ:\n+\t\t\tscheduler_queue_l5_init(node, queue, parent_q);\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tparent_q = queue;\n+\t}\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))\n+\t\tnum_dq = 8;\n+\telse\n+\t\tnum_dq = 1;\n+\n+\trc = allocate_queues(node, DQ, num_dq, dq);\n+\tif (rc < 0) {\n+\t\tpr_err(\"octeon3-pko: Failed to allocate description queues\\n\");\n+\t\treturn rc;\n+\t}\n+\n+\t/* By convention the dq must be zero */\n+\tif (dq[0] != 0) {\n+\t\tpr_err(\"octeon3-pko: Failed to reserve description queues\\n\");\n+\t\treturn -1;\n+\t}\n+\tdescriptor_queue_init(node, dq, parent_q, num_dq);\n+\n+\t/* Open the dqs */\n+\tfor (i = 0; i < num_dq; i++)\n+\t\topen_dq(node, dq[i]);\n+\n+\treturn 0;\n+}\n+\n+static int drain_dq(int node, int dq)\n+{\n+\tu64\tdata;\n+\tint\ttimeout;\n+\ts64\trc;\n+\n+\tdata = BIT(2) | BIT(1);\n+\toct_csr_write(data, PKO_DQ_SW_XOFF(node, dq));\n+\n+\tusleep_range(1000, 2000);\n+\n+\tdata = 0;\n+\toct_csr_write(data, PKO_DQ_SW_XOFF(node, dq));\n+\n+\t/* Wait for the dq to drain */\n+\ttimeout = 10000;\n+\tdo {\n+\t\trc = query_dq(node, dq);\n+\t\tif (!rc)\n+\t\t\tbreak;\n+\t\telse if (rc < 0)\n+\t\t\treturn rc;\n+\t\tudelay(1);\n+\t\ttimeout--;\n+\t} while (timeout);\n+\tif (!timeout) {\n+\t\tpr_err(\"octeon3-pko: Timeout waiting for dq to drain\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\t/* Close the queue anf free internal buffers */\n+\tclose_dq(node, dq);\n+\n+\treturn 0;\n+}\n+\n+int octeon3_pko_exit_global(int node)\n+{\n+\tint\tdq[8];\n+\tint\tnum_dq;\n+\tint\ti;\n+\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))\n+\t\tnum_dq = 8;\n+\telse\n+\t\tnum_dq = 1;\n+\n+\t/* Shutdown the virtual/null interface */\n+\tfor (i = 0; i < ARRAY_SIZE(dq); i++)\n+\t\tdq[i] = i;\n+\tocteon3_pko_interface_uninit(node, dq, num_dq);\n+\n+\t/* Shutdown pko */\n+\thw_exit_global(node);\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pko_exit_global);\n+\n+int octeon3_pko_init_global(int node, int aura)\n+{\n+\tint\trc;\n+\n+\trc = hw_init_global(node, aura);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\t/* Channel credit level at level 2 */\n+\toct_csr_write(0, PKO_CHANNEL_LEVEL(node));\n+\n+\t/* Configure the null mac */\n+\trc = virtual_mac_config(node);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pko_init_global);\n+\n+int octeon3_pko_set_mac_options(int\t\t\tnode,\n+\t\t\t\tint\t\t\tinterface,\n+\t\t\t\tint\t\t\tindex,\n+\t\t\t\tenum octeon3_mac_type\tmac_type,\n+\t\t\t\tbool\t\t\tfcs_en,\n+\t\t\t\tbool\t\t\tpad_en,\n+\t\t\t\tint\t\t\tfcs_sop_off)\n+{\n+\tint\tmac;\n+\tu64\tdata;\n+\tint\tfifo_num;\n+\n+\tmac = get_output_mac(interface, index, mac_type);\n+\n+\tdata = oct_csr_read(PKO_MAC_CFG(node, mac));\n+\tfifo_num = data & GENMASK_ULL(4, 0);\n+\tif (fifo_num == 0x1f) {\n+\t\tpr_err(\"octeon3_pko: mac not configured %d:%d:%d\\n\", node, interface, index);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\t/* Some silicon requires fifo_num=0x1f to change padding, fcs */\n+\tdata &= ~GENMASK_ULL(4, 0);\n+\tdata |= 0x1f;\n+\n+\tdata &= ~(BIT(16) | BIT(15) | GENMASK_ULL(14, 7));\n+\tif (pad_en)\n+\t\tdata |= BIT(16);\n+\tif (fcs_en)\n+\t\tdata |= BIT(15);\n+\tif (fcs_sop_off)\n+\t\tdata |= fcs_sop_off << 7;\n+\n+\toct_csr_write(data, PKO_MAC_CFG(node, mac));\n+\n+\tdata &= ~GENMASK_ULL(4, 0);\n+\tdata |= fifo_num;\n+\toct_csr_write(data, PKO_MAC_CFG(node, mac));\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pko_set_mac_options);\n+\n+int octeon3_pko_get_fifo_size(int\t\t\tnode,\n+\t\t\t int\t\t\tinterface,\n+\t\t\t int\t\t\tindex,\n+\t\t\t enum octeon3_mac_type\tmac_type)\n+{\n+\tint\tmac;\n+\tu64\tdata;\n+\tint\tfifo_grp;\n+\tint\tfifo_off;\n+\tint\tsize;\n+\n+\t/* Set fifo size to 2.4 KB */\n+\tsize = FIFO_SIZE;\n+\n+\tmac = get_output_mac(interface, index, mac_type);\n+\n+\tdata = oct_csr_read(PKO_MAC_CFG(node, mac));\n+\tif ((data & GENMASK_ULL(4, 0)) == 0x1f) {\n+\t\tpr_err(\"octeon3_pko: mac not configured %d:%d:%d\\n\", node, interface, index);\n+\t\treturn -ENODEV;\n+\t}\n+\tfifo_grp = (data & GENMASK_ULL(4, 0)) >> 2;\n+\tfifo_off = data & GENMASK_ULL(1, 0);\n+\n+\tdata = oct_csr_read(PKO_PTGF_CFG(node, fifo_grp));\n+\tdata &= GENMASK_ULL(2, 0);\n+\tswitch (data) {\n+\tcase 0:\n+\t\t/* 2.5l, 2.5k, 2.5k, 2.5k */\n+\t\tbreak;\n+\tcase 1:\n+\t\t/* 5.0k, 0.0k, 2.5k, 2.5k */\n+\t\tif (fifo_off == 0)\n+\t\t\tsize *= 2;\n+\t\tif (fifo_off == 1)\n+\t\t\tsize = 0;\n+\t\tbreak;\n+\tcase 2:\n+\t\t/* 2.5k, 2.5k, 5.0k, 0.0k */\n+\t\tif (fifo_off == 2)\n+\t\t\tsize *= 2;\n+\t\tif (fifo_off == 3)\n+\t\t\tsize = 0;\n+\t\tbreak;\n+\tcase 3:\n+\t\t/* 5k, 0, 5k, 0 */\n+\t\tif ((fifo_off & 1) != 0)\n+\t\t\tsize = 0;\n+\t\tsize *= 2;\n+\t\tbreak;\n+\tcase 4:\n+\t\t/* 10k, 0, 0, 0 */\n+\t\tif (fifo_off != 0)\n+\t\t\tsize = 0;\n+\t\tsize *= 4;\n+\t\tbreak;\n+\tdefault:\n+\t\tsize = -1;\n+\t}\n+\n+\treturn size;\n+}\n+EXPORT_SYMBOL(octeon3_pko_get_fifo_size);\n+\n+int octeon3_pko_activate_dq(int node, int dq, int cnt)\n+{\n+\tint\ti;\n+\tint\trc = 0;\n+\tu64\tdata;\n+\n+\tfor (i = 0; i < cnt; i++) {\n+\t\trc = open_dq(node, dq + i);\n+\t\tif (rc)\n+\t\t\tbreak;\n+\n+\t\tdata = oct_csr_read(PKO_PDM_DQ_MINPAD(node, dq + i));\n+\t\tdata &= ~BIT(0);\n+\t\toct_csr_write(data, PKO_PDM_DQ_MINPAD(node, dq + i));\n+\t}\n+\n+\treturn rc;\n+}\n+EXPORT_SYMBOL(octeon3_pko_activate_dq);\n+\n+int octeon3_pko_interface_init(int\t\t\tnode,\n+\t\t\t int\t\t\tinterface,\n+\t\t\t int\t\t\tindex,\n+\t\t\t enum octeon3_mac_type\tmac_type,\n+\t\t\t int\t\t\tipd_port)\n+{\n+\tint\t\t\tmac;\n+\tint\t\t\tpq;\n+\tint\t\t\tparent_q;\n+\tint\t\t\tqueue;\n+\tenum queue_level\tlevel;\n+\tint\t\t\trc;\n+\n+\tmac = get_output_mac(interface, index, mac_type);\n+\n+\t/* Allocate a port queue for this interface */\n+\trc = allocate_queues(node, PQ, 1, &pq);\n+\tif (rc < 0) {\n+\t\tpr_err(\"octeon3-pko: Failed to allocate port queue\\n\");\n+\t\treturn rc;\n+\t}\n+\n+\t/* Connect the port queue to the output mac */\n+\tport_queue_init(node, pq, mac);\n+\n+\t/* Link scheduler queues to the port queue */\n+\tparent_q = pq;\n+\tfor (level = L2_SQ; level <= max_sq_level(); level++) {\n+\t\trc = allocate_queues(node, level, 1, &queue);\n+\t\tif (rc < 0) {\n+\t\t\tpr_err(\"octeon3-pko: Failed to allocate queue\\n\");\n+\t\t\treturn rc;\n+\t\t}\n+\n+\t\tswitch (level) {\n+\t\tcase L2_SQ:\n+\t\t\tscheduler_queue_l2_init(node, queue, parent_q);\n+\t\t\tmap_channel(node, pq, queue, ipd_port);\n+\t\t\tbreak;\n+\t\tcase L3_SQ:\n+\t\t\tscheduler_queue_l3_init(node, queue, parent_q);\n+\t\t\tbreak;\n+\t\tcase L4_SQ:\n+\t\t\tscheduler_queue_l4_init(node, queue, parent_q);\n+\t\t\tbreak;\n+\t\tcase L5_SQ:\n+\t\t\tscheduler_queue_l5_init(node, queue, parent_q);\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tparent_q = queue;\n+\t}\n+\n+\t/* Link the descriptor queue */\n+\trc = allocate_queues(node, DQ, 1, &queue);\n+\tif (rc < 0) {\n+\t\tpr_err(\"octeon3-pko: Failed to allocate descriptor queue\\n\");\n+\t\treturn rc;\n+\t}\n+\tdescriptor_queue_init(node, &queue, parent_q, 1);\n+\n+\treturn queue;\n+}\n+EXPORT_SYMBOL(octeon3_pko_interface_init);\n+\n+int octeon3_pko_interface_uninit(int\t\tnode,\n+\t\t\t\t const int\t*dq,\n+\t\t\t\t int\t\tnum_dq)\n+{\n+\tenum queue_level\tlevel;\n+\tint\t\t\tqueue;\n+\tint\t\t\tparent_q;\n+\tu64\t\t\tdata;\n+\tu64\t\t\taddr;\n+\tint\t\t\ti;\n+\tint\t\t\trc;\n+\n+\t/* Drain all dqs */\n+\tfor (i = 0; i < num_dq; i++) {\n+\t\trc = drain_dq(node, dq[i]);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\n+\t\t/* Free the dq */\n+\t\tdata = oct_csr_read(PKO_DQ_TOPOLOGY(node, dq[i]));\n+\t\tparent_q = (data & GENMASK_ULL(25, 16)) >> 16;\n+\t\tfree_queues(node, DQ, 1, &dq[i]);\n+\n+\t\t/* Free all the scheduler queues */\n+\t\tqueue = parent_q;\n+\t\tfor (level = max_sq_level(); (signed int)level >= PQ; level--) {\n+\t\t\tswitch (level) {\n+\t\t\tcase L5_SQ:\n+\t\t\t\taddr = PKO_L5_SQ_TOPOLOGY(node, queue);\n+\t\t\t\tdata = oct_csr_read(addr);\n+\t\t\t\tparent_q = (data & GENMASK_ULL(25, 16)) >> 16;\n+\t\t\t\tbreak;\n+\n+\t\t\tcase L4_SQ:\n+\t\t\t\taddr = PKO_L4_SQ_TOPOLOGY(node, queue);\n+\t\t\t\tdata = oct_csr_read(addr);\n+\t\t\t\tparent_q = (data & GENMASK_ULL(24, 16)) >> 16;\n+\t\t\t\tbreak;\n+\n+\t\t\tcase L3_SQ:\n+\t\t\t\taddr = PKO_L3_SQ_TOPOLOGY(node, queue);\n+\t\t\t\tdata = oct_csr_read(addr);\n+\t\t\t\tparent_q = (data & GENMASK_ULL(24, 16)) >> 16;\n+\t\t\t\tbreak;\n+\n+\t\t\tcase L2_SQ:\n+\t\t\t\taddr = PKO_L2_SQ_TOPOLOGY(node, queue);\n+\t\t\t\tdata = oct_csr_read(addr);\n+\t\t\t\tparent_q = (data & GENMASK_ULL(20, 16)) >> 16;\n+\t\t\t\tbreak;\n+\n+\t\t\tcase PQ:\n+\t\t\t\tbreak;\n+\n+\t\t\tdefault:\n+\t\t\t\tpr_err(\"octeon3-pko: Invalid level=%d\\n\",\n+\t\t\t\t level);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\n+\t\t\tfree_queues(node, level, 1, &queue);\n+\t\t\tqueue = parent_q;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+EXPORT_SYMBOL(octeon3_pko_interface_uninit);\ndiff --git a/drivers/net/ethernet/cavium/octeon/octeon3-sso.c b/drivers/net/ethernet/cavium/octeon/octeon3-sso.c\nnew file mode 100644\nindex 000000000000..62a5bb5a07b7\n--- /dev/null\n+++ b/drivers/net/ethernet/cavium/octeon/octeon3-sso.c\n@@ -0,0 +1,309 @@\n+/*\n+ * Copyright (c) 2017 Cavium, Inc.\n+ *\n+ * This file is subject to the terms and conditions of the GNU General Public\n+ * License. See the file \"COPYING\" in the main directory of this archive\n+ * for more details.\n+ */\n+#include <linux/module.h>\n+\n+#include <asm/octeon/octeon.h>\n+\n+#include \"octeon3.h\"\n+\n+/* Registers are accessed via xkphys */\n+#define SSO_BASE\t\t\t0x1670000000000ull\n+#define SSO_ADDR(node)\t\t\t(SET_XKPHYS + NODE_OFFSET(node) + \\\n+\t\t\t\t\t SSO_BASE)\n+\n+#define SSO_AW_STATUS(n)\t\t(SSO_ADDR(n)\t\t + 0x000010e0)\n+#define SSO_AW_CFG(n)\t\t\t(SSO_ADDR(n)\t\t + 0x000010f0)\n+#define SSO_ERR0(n)\t\t\t(SSO_ADDR(n)\t\t + 0x00001240)\n+#define SSO_TAQ_ADD(n)\t\t\t(SSO_ADDR(n)\t\t + 0x000020e0)\n+#define SSO_XAQ_AURA(n)\t\t\t(SSO_ADDR(n)\t\t + 0x00002100)\n+\n+#define AQ_OFFSET(g)\t\t\t((g) << 3)\n+#define AQ_ADDR(n, g)\t\t\t(SSO_ADDR(n) + AQ_OFFSET(g))\n+#define SSO_XAQ_HEAD_PTR(n, g)\t\t(AQ_ADDR(n, g)\t\t + 0x00080000)\n+#define SSO_XAQ_TAIL_PTR(n, g)\t\t(AQ_ADDR(n, g)\t\t + 0x00090000)\n+#define SSO_XAQ_HEAD_NEXT(n, g)\t\t(AQ_ADDR(n, g)\t\t + 0x000a0000)\n+#define SSO_XAQ_TAIL_NEXT(n, g)\t\t(AQ_ADDR(n, g)\t\t + 0x000b0000)\n+\n+#define GRP_OFFSET(grp)\t\t\t((grp) << 16)\n+#define GRP_ADDR(n, g)\t\t\t(SSO_ADDR(n) + GRP_OFFSET(g))\n+#define SSO_GRP_TAQ_THR(n, g)\t\t(GRP_ADDR(n, g)\t\t + 0x20000100)\n+#define SSO_GRP_PRI(n, g)\t\t(GRP_ADDR(n, g)\t\t + 0x20000200)\n+#define SSO_GRP_INT(n, g)\t\t(GRP_ADDR(n, g)\t\t + 0x20000400)\n+#define SSO_GRP_INT_THR(n, g)\t\t(GRP_ADDR(n, g)\t\t + 0x20000500)\n+#define SSO_GRP_AQ_CNT(n, g)\t\t(GRP_ADDR(n, g)\t\t + 0x20000700)\n+\n+static int get_num_sso_grps(void)\n+{\n+\tif (OCTEON_IS_MODEL(OCTEON_CN78XX))\n+\t\treturn 256;\n+\tif (OCTEON_IS_MODEL(OCTEON_CNF75XX) || OCTEON_IS_MODEL(OCTEON_CN73XX))\n+\t\treturn 64;\n+\treturn 0;\n+}\n+\n+void octeon3_sso_irq_set(int node, int grp, bool en)\n+{\n+\tif (en)\n+\t\toct_csr_write(1, SSO_GRP_INT_THR(node, grp));\n+\telse\n+\t\toct_csr_write(0, SSO_GRP_INT_THR(node, grp));\n+\n+\toct_csr_write(BIT(1), SSO_GRP_INT(node, grp));\n+}\n+EXPORT_SYMBOL(octeon3_sso_irq_set);\n+\n+/**\n+ * octeon3_sso_alloc_grp_range - Allocate a range of sso groups.\n+ * @node: Node where sso resides.\n+ * @req_grp: Group number to start allocating sequentially from. -1 for don't\n+ *\t care.\n+ * @req_cnt: Number of groups to allocate.\n+ * @use_last_avail: Set to request the last available groups.\n+ * @grp: Updated with allocated groups.\n+ *\n+ * Returns 0 if successful.\n+ * Returns <0 for error codes.\n+ */\n+int octeon3_sso_alloc_grp_range(int\tnode,\n+\t\t\t\tint\treq_grp,\n+\t\t\t\tint\treq_cnt,\n+\t\t\t\tbool\tuse_last_avail,\n+\t\t\t\tint\t*grp)\n+{\n+\tstruct global_resource_tag\ttag;\n+\tchar\t\t\t\tbuf[16];\n+\n+\t/* Allocate the request group range */\n+\tstrncpy((char *)&tag.lo, \"cvm_sso_\", 8);\n+\tsnprintf(buf, 16, \"0%d......\", node);\n+\tmemcpy(&tag.hi, buf, 8);\n+\n+\tres_mgr_create_resource(tag, get_num_sso_grps());\n+\treturn res_mgr_alloc_range(tag, req_grp, req_cnt, false, grp);\n+}\n+EXPORT_SYMBOL(octeon3_sso_alloc_grp_range);\n+\n+/**\n+ * octeon3_sso_alloc_grp - Allocate a sso group.\n+ * @node: Node where sso resides.\n+ * @req_grp: Group number to allocate, -1 for don't care.\n+ *\n+ * Returns allocated group.\n+ * Returns <0 for error codes.\n+ */\n+int octeon3_sso_alloc_grp(int node, int req_grp)\n+{\n+\tint\tgrp;\n+\tint\trc;\n+\n+\trc = octeon3_sso_alloc_grp_range(node, req_grp, 1, false, &grp);\n+\tif (!rc)\n+\t\trc = grp;\n+\n+\treturn rc;\n+}\n+EXPORT_SYMBOL(octeon3_sso_alloc_grp);\n+\n+/**\n+ * octeon3_sso_free_grp_range - Free a range of sso groups.\n+ * @node: Node where sso resides.\n+ * @grp: Array of groups to free.\n+ * @req_cnt: Number of groups to free.\n+ */\n+void octeon3_sso_free_grp_range(int\tnode,\n+\t\t\t\tint\t*grp,\n+\t\t\t\tint\treq_cnt)\n+{\n+\tstruct global_resource_tag\ttag;\n+\tchar\t\t\t\tbuf[16];\n+\n+\t/* Allocate the request group range */\n+\tstrncpy((char *)&tag.lo, \"cvm_sso_\", 8);\n+\tsnprintf(buf, 16, \"0%d......\", node);\n+\tmemcpy(&tag.hi, buf, 8);\n+\n+\tres_mgr_free_range(tag, grp, req_cnt);\n+}\n+EXPORT_SYMBOL(octeon3_sso_free_grp_range);\n+\n+/**\n+ * octeon3_sso_free_grp - Free a sso group.\n+ * @node: Node where sso resides.\n+ * @grp: Group to free.\n+ */\n+void octeon3_sso_free_grp(int\tnode,\n+\t\t\t int\tgrp)\n+{\n+\tocteon3_sso_free_grp_range(node, &grp, 1);\n+}\n+EXPORT_SYMBOL(octeon3_sso_free_grp);\n+\n+/**\n+ * octeon3_sso_pass1_limit - Near full TAQ can cause hang. When the TAQ\n+ *\t\t\t (Transitory Admission Queue) is near-full, it is\n+ *\t\t\t possible for SSO to hang.\n+ *\t\t\t Workaround: Ensure that the sum of\n+ *\t\t\t SSO_GRP(0..255)_TAQ_THR[MAX_THR] of all used\n+ *\t\t\t groups is <= 1264. This may reduce single-group\n+ *\t\t\t performance when many groups are used.\n+ *\n+ * @node: Node to update.\n+ * @grp: SSO group to update.\n+ */\n+void octeon3_sso_pass1_limit(int node, int grp)\n+{\n+\tu64\ttaq_thr;\n+\tu64\ttaq_add;\n+\tu64\tmax_thr;\n+\tu64\trsvd_thr;\n+\n+\t/* Ideally, we would like to divide the maximum number of TAQ buffers\n+\t * (1264) among the sso groups in use. However, since we don't know how\n+\t * many sso groups are used by code outside this driver we take the\n+\t * worst case approach and assume all 256 sso groups must be supported.\n+\t */\n+\tmax_thr = 1264 / get_num_sso_grps();\n+\tif (max_thr < 4)\n+\t\tmax_thr = 4;\n+\trsvd_thr = max_thr - 1;\n+\n+\t/* Changes to SSO_GRP_TAQ_THR[rsvd_thr] must also update\n+\t * SSO_TAQ_ADD[RSVD_FREE].\n+\t */\n+\ttaq_thr = oct_csr_read(SSO_GRP_TAQ_THR(node, grp));\n+\ttaq_add = (rsvd_thr - (taq_thr & GENMASK_ULL(10, 0))) << 16;\n+\n+\ttaq_thr &= ~(GENMASK_ULL(42, 32) | GENMASK_ULL(10, 0));\n+\ttaq_thr |= max_thr << 32;\n+\ttaq_thr |= rsvd_thr;\n+\n+\toct_csr_write(taq_thr, SSO_GRP_TAQ_THR(node, grp));\n+\toct_csr_write(taq_add, SSO_TAQ_ADD(node));\n+}\n+EXPORT_SYMBOL(octeon3_sso_pass1_limit);\n+\n+/**\n+ * octeon3_sso_shutdown - Shutdown the sso. It undoes what octeon3_sso_init()\n+ *\t\t\t did.\n+ * @node: Node where sso to disable is.\n+ * @aura: Aura used for the sso buffers.\n+ */\n+void octeon3_sso_shutdown(int node, int aura)\n+{\n+\tu64\tdata;\n+\tint\tmax_grps;\n+\tint\ttimeout;\n+\tint\ti;\n+\n+\t/* Disable sso */\n+\tdata = oct_csr_read(SSO_AW_CFG(node));\n+\tdata |= BIT(6) | BIT(4);\n+\tdata &= ~BIT(0);\n+\toct_csr_write(data, SSO_AW_CFG(node));\n+\n+\t/* Extract the fpa buffers */\n+\tmax_grps = get_num_sso_grps();\n+\tfor (i = 0; i < max_grps; i++) {\n+\t\tu64\thead;\n+\t\tu64\ttail;\n+\t\tvoid\t*ptr;\n+\n+\t\thead = oct_csr_read(SSO_XAQ_HEAD_PTR(node, i));\n+\t\ttail = oct_csr_read(SSO_XAQ_TAIL_PTR(node, i));\n+\t\tdata = oct_csr_read(SSO_GRP_AQ_CNT(node, i));\n+\n+\t\t/* Verify pointers */\n+\t\thead &= GENMASK_ULL(41, 7);\n+\t\ttail &= GENMASK_ULL(41, 7);\n+\t\tif (head != tail) {\n+\t\t\tpr_err(\"octeon3_sso: bad ptr\\n\");\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/* This sso group should have no pending entries */\n+\t\tif (data & GENMASK_ULL(32, 0))\n+\t\t\tpr_err(\"octeon3_sso: not empty\\n\");\n+\n+\t\tptr = phys_to_virt(head);\n+\t\tocteon_fpa3_free(node, aura, ptr);\n+\n+\t\t/* Clear pointers */\n+\t\toct_csr_write(0, SSO_XAQ_HEAD_PTR(node, i));\n+\t\toct_csr_write(0, SSO_XAQ_HEAD_NEXT(node, i));\n+\t\toct_csr_write(0, SSO_XAQ_TAIL_PTR(node, i));\n+\t\toct_csr_write(0, SSO_XAQ_TAIL_NEXT(node, i));\n+\t}\n+\n+\t/* Make sure all buffers drained */\n+\ttimeout = 10000;\n+\tdo {\n+\t\tdata = oct_csr_read(SSO_AW_STATUS(node));\n+\t\tif ((data & GENMASK_ULL(5, 0)) == 0)\n+\t\t\tbreak;\n+\t\ttimeout--;\n+\t\tudelay(1);\n+\t} while (timeout);\n+\tif (!timeout)\n+\t\tpr_err(\"octeon3_sso: timeout\\n\");\n+}\n+EXPORT_SYMBOL(octeon3_sso_shutdown);\n+\n+/**\n+ * octeon3_sso_init - Initialize the sso.\n+ * @node: Node where sso resides.\n+ * @aura: Aura used for the sso buffers.\n+ */\n+int octeon3_sso_init(int node, int aura)\n+{\n+\tu64\tdata;\n+\tint\tmax_grps;\n+\tint\ti;\n+\tint\trc = 0;\n+\n+\tdata = BIT(3) | BIT(2) | BIT(1);\n+\toct_csr_write(data, SSO_AW_CFG(node));\n+\n+\tdata = (node << 10) | aura;\n+\toct_csr_write(data, SSO_XAQ_AURA(node));\n+\n+\tmax_grps = get_num_sso_grps();\n+\tfor (i = 0; i < max_grps; i++) {\n+\t\tu64\tphys;\n+\t\tvoid\t*mem;\n+\n+\t\tmem = octeon_fpa3_alloc(node, aura);\n+\t\tif (!mem) {\n+\t\t\trc = -ENOMEM;\n+\t\t\tgoto err;\n+\t\t}\n+\n+\t\tphys = virt_to_phys(mem);\n+\t\toct_csr_write(phys, SSO_XAQ_HEAD_PTR(node, i));\n+\t\toct_csr_write(phys, SSO_XAQ_HEAD_NEXT(node, i));\n+\t\toct_csr_write(phys, SSO_XAQ_TAIL_PTR(node, i));\n+\t\toct_csr_write(phys, SSO_XAQ_TAIL_NEXT(node, i));\n+\n+\t\t/* SSO-18678 */\n+\t\tdata = 0x3f << 16;\n+\t\toct_csr_write(data, SSO_GRP_PRI(node, i));\n+\t}\n+\n+\tdata = BIT(0);\n+\toct_csr_write(data, SSO_ERR0(node));\n+\n+\tdata = BIT(3) | BIT(2) | BIT(1) | BIT(0);\n+\toct_csr_write(data, SSO_AW_CFG(node));\n+\n+ err:\n+\treturn rc;\n+}\n+EXPORT_SYMBOL(octeon3_sso_init);\n+\n+MODULE_LICENSE(\"GPL\");\n+MODULE_AUTHOR(\"Cavium, Inc. <support@cavium.com>\");\n+MODULE_DESCRIPTION(\"Cavium, Inc. SSO management.\");\ndiff --git a/drivers/net/ethernet/cavium/octeon/octeon3.h b/drivers/net/ethernet/cavium/octeon/octeon3.h\nnew file mode 100644\nindex 000000000000..b84a2515b0a0\n--- /dev/null\n+++ b/drivers/net/ethernet/cavium/octeon/octeon3.h\n@@ -0,0 +1,411 @@\n+/*\n+ * Copyright (c) 2017 Cavium, Inc.\n+ *\n+ * This file is subject to the terms and conditions of the GNU General Public\n+ * License. See the file \"COPYING\" in the main directory of this archive\n+ * for more details.\n+ */\n+#ifndef _OCTEON3_H_\n+#define _OCTEON3_H_\n+\n+#include <linux/netdevice.h>\n+#include <linux/platform_device.h>\n+\n+#define MAX_NODES\t\t\t2\n+#define NODE_MASK\t\t\t(MAX_NODES - 1)\n+#define MAX_BGX_PER_NODE\t\t6\n+#define MAX_LMAC_PER_BGX\t\t4\n+\n+#define IOBDMA_ORDERED_IO_ADDR\t\t0xffffffffffffa200ull\n+#define LMTDMA_ORDERED_IO_ADDR\t\t0xffffffffffffa400ull\n+\n+#define SCRATCH_BASE\t\t\t0xffffffffffff8000ull\n+#define PKO_LMTLINE\t\t\t2ull\n+#define LMTDMA_SCR_OFFSET\t\t(PKO_LMTLINE * CVMX_CACHE_LINE_SIZE)\n+\n+/* Pko sub-command three bit codes (SUBDC3) */\n+#define PKO_SENDSUBDC_GATHER\t\t0x1\n+\n+/* Pko sub-command four bit codes (SUBDC4) */\n+#define PKO_SENDSUBDC_TSO\t\t0x8\n+#define PKO_SENDSUBDC_FREE\t\t0x9\n+#define PKO_SENDSUBDC_WORK\t\t0xa\n+#define PKO_SENDSUBDC_MEM\t\t0xc\n+#define PKO_SENDSUBDC_EXT\t\t0xd\n+\n+#define BGX_RX_FIFO_SIZE\t\t(64 * 1024)\n+#define BGX_TX_FIFO_SIZE\t\t(32 * 1024)\n+\n+/* Registers are accessed via xkphys */\n+#define SET_XKPHYS\t\t\tBIT_ULL(63)\n+#define NODE_OFFSET(node)\t\t((node) * 0x1000000000ull)\n+\n+/* Bgx register definitions */\n+#define BGX_BASE\t\t\t0x11800e0000000ull\n+#define BGX_OFFSET(bgx)\t\t\t(BGX_BASE + ((bgx) << 24))\n+#define INDEX_OFFSET(index)\t\t((index) << 20)\n+#define INDEX_ADDR(n, b, i)\t\t(SET_XKPHYS + NODE_OFFSET(n) +\t \\\n+\t\t\t\t\t BGX_OFFSET(b) + INDEX_OFFSET(i))\n+#define CAM_OFFSET(mac)\t\t\t((mac) << 3)\n+#define CAM_ADDR(n, b, m)\t\t(INDEX_ADDR(n, b, 0) + CAM_OFFSET(m))\n+\n+#define BGX_CMR_CONFIG(n, b, i)\t\t(INDEX_ADDR(n, b, i)\t + 0x00000)\n+#define BGX_CMR_GLOBAL_CONFIG(n, b)\t(INDEX_ADDR(n, b, 0)\t + 0x00008)\n+#define BGX_CMR_RX_ID_MAP(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x00028)\n+#define BGX_CMR_RX_BP_ON(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x00088)\n+#define BGX_CMR_RX_ADR_CTL(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x000a0)\n+#define BGX_CMR_RX_FIFO_LEN(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x000c0)\n+#define BGX_CMR_RX_ADRX_CAM(n, b, m)\t(CAM_ADDR(n, b, m)\t + 0x00100)\n+#define BGX_CMR_CHAN_MSK_AND(n, b)\t(INDEX_ADDR(n, b, 0)\t + 0x00200)\n+#define BGX_CMR_CHAN_MSK_OR(n, b)\t(INDEX_ADDR(n, b, 0)\t + 0x00208)\n+#define BGX_CMR_TX_FIFO_LEN(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x00418)\n+#define BGX_CMR_TX_LMACS(n, b)\t\t(INDEX_ADDR(n, b, 0)\t + 0x01000)\n+\n+#define BGX_SPU_CONTROL1(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10000)\n+#define BGX_SPU_STATUS1(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10008)\n+#define BGX_SPU_STATUS2(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10020)\n+#define BGX_SPU_BX_STATUS(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10028)\n+#define BGX_SPU_BR_STATUS1(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10030)\n+#define BGX_SPU_BR_STATUS2(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10038)\n+#define BGX_SPU_BR_BIP_ERR_CNT(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10058)\n+#define BGX_SPU_BR_PMD_CONTROL(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10068)\n+#define BGX_SPU_BR_PMD_LP_CUP(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10078)\n+#define BGX_SPU_BR_PMD_LD_CUP(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10088)\n+#define BGX_SPU_BR_PMD_LD_REP(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10090)\n+#define BGX_SPU_FEC_CONTROL(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x100a0)\n+#define BGX_SPU_AN_CONTROL(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x100c8)\n+#define BGX_SPU_AN_STATUS(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x100d0)\n+#define BGX_SPU_AN_ADV(n, b, i)\t\t(INDEX_ADDR(n, b, i)\t + 0x100d8)\n+#define BGX_SPU_MISC_CONTROL(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x10218)\n+#define BGX_SPU_INT(n, b, i)\t\t(INDEX_ADDR(n, b, i)\t + 0x10220)\n+#define BGX_SPU_DBG_CONTROL(n, b)\t(INDEX_ADDR(n, b, 0)\t + 0x10300)\n+\n+#define BGX_SMU_RX_INT(n, b, i)\t\t(INDEX_ADDR(n, b, i)\t + 0x20000)\n+#define BGX_SMU_RX_FRM_CTL(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x20008)\n+#define BGX_SMU_RX_JABBER(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x20018)\n+#define BGX_SMU_RX_CTL(n, b, i)\t\t(INDEX_ADDR(n, b, i)\t + 0x20030)\n+#define BGX_SMU_TX_APPEND(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x20100)\n+#define BGX_SMU_TX_MIN_PKT(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x20118)\n+#define BGX_SMU_TX_INT(n, b, i)\t\t(INDEX_ADDR(n, b, i)\t + 0x20140)\n+#define BGX_SMU_TX_CTL(n, b, i)\t\t(INDEX_ADDR(n, b, i)\t + 0x20160)\n+#define BGX_SMU_TX_THRESH(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x20168)\n+#define BGX_SMU_CTRL(n, b, i)\t\t(INDEX_ADDR(n, b, i)\t + 0x20200)\n+\n+#define BGX_GMP_PCS_MR_CONTROL(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x30000)\n+#define BGX_GMP_PCS_MR_STATUS(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x30008)\n+#define BGX_GMP_PCS_AN_ADV(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x30010)\n+#define BGX_GMP_PCS_LINK_TIMER(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x30040)\n+#define BGX_GMP_PCS_SGM_AN_ADV(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x30068)\n+#define BGX_GMP_PCS_MISC_CTL(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x30078)\n+#define BGX_GMP_GMI_PRT_CFG(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x38010)\n+#define BGX_GMP_GMI_RX_FRM_CTL(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x38018)\n+#define BGX_GMP_GMI_RX_JABBER(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x38038)\n+#define BGX_GMP_GMI_TX_THRESH(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x38210)\n+#define BGX_GMP_GMI_TX_APPEND(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x38218)\n+#define BGX_GMP_GMI_TX_SLOT(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x38220)\n+#define BGX_GMP_GMI_TX_BURST(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x38228)\n+#define BGX_GMP_GMI_TX_MIN_PKT(n, b, i)\t(INDEX_ADDR(n, b, i)\t + 0x38240)\n+#define BGX_GMP_GMI_TX_SGMII_CTL(n, b, i) (INDEX_ADDR(n, b, i)\t + 0x38300)\n+\n+/* XCV register definitions */\n+#define XCV_BASE\t\t\t0x11800db000000ull\n+#define SET_XCV_BASE(node)\t\t(SET_XKPHYS + NODE_OFFSET(node) + \\\n+\t\t\t\t\t XCV_BASE)\n+#define XCV_RESET(node)\t\t\t(SET_XCV_BASE(node)\t + 0x0000)\n+#define XCV_DLL_CTL(node)\t\t(SET_XCV_BASE(node)\t + 0x0010)\n+#define XCV_COMP_CTL(node)\t\t(SET_XCV_BASE(node)\t + 0x0020)\n+#define XCV_CTL(node)\t\t\t(SET_XCV_BASE(node)\t + 0x0030)\n+#define XCV_INT(node)\t\t\t(SET_XCV_BASE(node)\t + 0x0040)\n+#define XCV_INBND_STATUS(node)\t\t(SET_XCV_BASE(node)\t + 0x0080)\n+#define XCV_BATCH_CRD_RET(node)\t\t(SET_XCV_BASE(node)\t + 0x0100)\n+\n+/* Gser register definitions */\n+#define GSER_BASE\t\t\t0x1180090000000ull\n+#define GSER_OFFSET(gser)\t\t(GSER_BASE + ((gser) << 24))\n+#define GSER_LANE_OFFSET(lane)\t\t((lane) << 20)\n+#define GSER_LANE_ADDR(n, g, l)\t\t(SET_XKPHYS + NODE_OFFSET(n) +\t \\\n+\t\t\t\t\t GSER_OFFSET(g) + GSER_LANE_OFFSET(l))\n+#define GSER_PHY_CTL(n, g)\t\t(GSER_LANE_ADDR(n, g, 0) + 0x000000)\n+#define GSER_CFG(n, g)\t\t\t(GSER_LANE_ADDR(n, g, 0) + 0x000080)\n+#define GSER_LANE_MODE(n, g)\t\t(GSER_LANE_ADDR(n, g, 0) + 0x000118)\n+#define GSER_RX_EIE_DETSTS(n, g)\t(GSER_LANE_ADDR(n, g, 0) + 0x000150)\n+#define GSER_LANE_LBERT_CFG(n, g, l)\t(GSER_LANE_ADDR(n, g, l) + 0x4c0020)\n+#define GSER_LANE_PCS_CTLIFC_0(n, g, l)\t(GSER_LANE_ADDR(n, g, l) + 0x4c0060)\n+#define GSER_LANE_PCS_CTLIFC_2(n, g, l)\t(GSER_LANE_ADDR(n, g, l) + 0x4c0070)\n+\n+/* Odd gser registers */\n+#define GSER_LANE_OFFSET_1(lane)\t((lane) << 7)\n+#define GSER_LANE_ADDR_1(n, g, l)\t(SET_XKPHYS + NODE_OFFSET(n) +\t \\\n+\t\t\t\t\t GSER_OFFSET(g) + GSER_LANE_OFFSET_1(l))\n+\n+#define GSER_BR_RX_CTL(n, g, l)\t\t(GSER_LANE_ADDR_1(n, g, l) + 0x000400)\n+#define GSER_BR_RX_EER(n, g, l)\t\t(GSER_LANE_ADDR_1(n, g, l) + 0x000418)\n+\n+#define GSER_LANE_OFFSET_2(mode)\t((mode) << 5)\n+#define GSER_LANE_ADDR_2(n, g, m)\t(SET_XKPHYS + NODE_OFFSET(n) +\t \\\n+\t\t\t\t\t GSER_OFFSET(g) + GSER_LANE_OFFSET_2(m))\n+\n+#define GSER_LANE_P_MODE_1(n, g, m)\t(GSER_LANE_ADDR_2(n, g, m) + 0x4e0048)\n+\n+#define DPI_BASE\t\t\t0x1df0000000000ull\n+#define DPI_ADDR(n)\t\t\t(SET_XKPHYS + NODE_OFFSET(n) + DPI_BASE)\n+#define DPI_CTL(n)\t\t\t(DPI_ADDR(n) + 0x00040)\n+\n+enum octeon3_mac_type {\n+\tBGX_MAC,\n+\tSRIO_MAC\n+};\n+\n+enum octeon3_src_type {\n+\tQLM,\n+\tXCV\n+};\n+\n+struct mac_platform_data {\n+\tenum octeon3_mac_type\tmac_type;\n+\tint\t\t\tnuma_node;\n+\tint\t\t\tinterface;\n+\tint\t\t\tport;\n+\tenum octeon3_src_type\tsrc_type;\n+};\n+\n+struct bgx_port_netdev_priv {\n+\tstruct bgx_port_priv *bgx_priv;\n+};\n+\n+/* Remove this define to use these enums after the last cvmx code references are\n+ * gone.\n+ */\n+/* PKO_MEMDSZ_E */\n+enum pko_memdsz_e {\n+\tMEMDSZ_B64 = 0,\n+\tMEMDSZ_B32 = 1,\n+\tMEMDSZ_B16 = 2,\n+\tMEMDSZ_B8 = 3\n+};\n+\n+/* PKO_MEMALG_E */\n+enum pko_memalg_e {\n+\tMEMALG_SET = 0,\n+\tMEMALG_SETTSTMP = 1,\n+\tMEMALG_SETRSLT = 2,\n+\tMEMALG_ADD = 8,\n+\tMEMALG_SUB = 9,\n+\tMEMALG_ADDLEN = 0xA,\n+\tMEMALG_SUBLEN = 0xB,\n+\tMEMALG_ADDMBUF = 0xC,\n+\tMEMALG_SUBMBUF = 0xD\n+};\n+\n+/* PKO_QUERY_RTN_S[DQSTATUS] */\n+enum pko_query_dqstatus {\n+\tPKO_DQSTATUS_PASS = 0,\n+\tPKO_DQSTATUS_BADSTATE = 0x8,\n+\tPKO_DQSTATUS_NOFPABUF = 0x9,\n+\tPKO_DQSTATUS_NOPKOBUF = 0xA,\n+\tPKO_DQSTATUS_FAILRTNPTR = 0xB,\n+\tPKO_DQSTATUS_ALREADY = 0xC,\n+\tPKO_DQSTATUS_NOTCREATED = 0xD,\n+\tPKO_DQSTATUS_NOTEMPTY = 0xE,\n+\tPKO_DQSTATUS_SENDPKTDROP = 0xF\n+};\n+\n+union wqe_word0 {\n+\tu64 u64;\n+\tstruct {\n+\t\t__BITFIELD_FIELD(u64 rsvd_0:4,\n+\t\t__BITFIELD_FIELD(u64 aura:12,\n+\t\t__BITFIELD_FIELD(u64 rsvd_1:1,\n+\t\t__BITFIELD_FIELD(u64 apad:3,\n+\t\t__BITFIELD_FIELD(u64 channel:12,\n+\t\t__BITFIELD_FIELD(u64 bufs:8,\n+\t\t__BITFIELD_FIELD(u64 style:8,\n+\t\t__BITFIELD_FIELD(u64 rsvd_2:10,\n+\t\t__BITFIELD_FIELD(u64 pknd:6,\n+\t\t;)))))))))\n+\t};\n+};\n+\n+union wqe_word1 {\n+\tu64 u64;\n+\tstruct {\n+\t\t__BITFIELD_FIELD(u64 len:16,\n+\t\t__BITFIELD_FIELD(u64 rsvd_0:2,\n+\t\t__BITFIELD_FIELD(u64 rsvd_1:2,\n+\t\t__BITFIELD_FIELD(u64 grp:10,\n+\t\t__BITFIELD_FIELD(u64 tag_type:2,\n+\t\t__BITFIELD_FIELD(u64 tag:32,\n+\t\t;))))))\n+\t};\n+};\n+\n+union wqe_word2 {\n+\tu64 u64;\n+\tstruct {\n+\t\t__BITFIELD_FIELD(u64 software:1,\n+\t\t__BITFIELD_FIELD(u64 lg_hdr_type:5,\n+\t\t__BITFIELD_FIELD(u64 lf_hdr_type:5,\n+\t\t__BITFIELD_FIELD(u64 le_hdr_type:5,\n+\t\t__BITFIELD_FIELD(u64 ld_hdr_type:5,\n+\t\t__BITFIELD_FIELD(u64 lc_hdr_type:5,\n+\t\t__BITFIELD_FIELD(u64 lb_hdr_type:5,\n+\t\t__BITFIELD_FIELD(u64 is_la_ether:1,\n+\t\t__BITFIELD_FIELD(u64 rsvd_0:8,\n+\t\t__BITFIELD_FIELD(u64 vlan_valid:1,\n+\t\t__BITFIELD_FIELD(u64 vlan_stacked:1,\n+\t\t__BITFIELD_FIELD(u64 stat_inc:1,\n+\t\t__BITFIELD_FIELD(u64 pcam_flag4:1,\n+\t\t__BITFIELD_FIELD(u64 pcam_flag3:1,\n+\t\t__BITFIELD_FIELD(u64 pcam_flag2:1,\n+\t\t__BITFIELD_FIELD(u64 pcam_flag1:1,\n+\t\t__BITFIELD_FIELD(u64 is_frag:1,\n+\t\t__BITFIELD_FIELD(u64 is_l3_bcast:1,\n+\t\t__BITFIELD_FIELD(u64 is_l3_mcast:1,\n+\t\t__BITFIELD_FIELD(u64 is_l2_bcast:1,\n+\t\t__BITFIELD_FIELD(u64 is_l2_mcast:1,\n+\t\t__BITFIELD_FIELD(u64 is_raw:1,\n+\t\t__BITFIELD_FIELD(u64 err_level:3,\n+\t\t__BITFIELD_FIELD(u64 err_code:8,\n+\t\t;))))))))))))))))))))))))\n+\t};\n+};\n+\n+union buf_ptr {\n+\tu64 u64;\n+\tstruct {\n+\t\t__BITFIELD_FIELD(u64 size:16,\n+\t\t__BITFIELD_FIELD(u64 packet_outside_wqe:1,\n+\t\t__BITFIELD_FIELD(u64 rsvd0:5,\n+\t\t__BITFIELD_FIELD(u64 addr:42,\n+\t\t;))))\n+\t};\n+};\n+\n+union wqe_word4 {\n+\tu64 u64;\n+\tstruct {\n+\t\t__BITFIELD_FIELD(u64 ptr_vlan:8,\n+\t\t__BITFIELD_FIELD(u64 ptr_layer_g:8,\n+\t\t__BITFIELD_FIELD(u64 ptr_layer_f:8,\n+\t\t__BITFIELD_FIELD(u64 ptr_layer_e:8,\n+\t\t__BITFIELD_FIELD(u64 ptr_layer_d:8,\n+\t\t__BITFIELD_FIELD(u64 ptr_layer_c:8,\n+\t\t__BITFIELD_FIELD(u64 ptr_layer_b:8,\n+\t\t__BITFIELD_FIELD(u64 ptr_layer_a:8,\n+\t\t;))))))))\n+\t};\n+};\n+\n+struct wqe {\n+\tunion wqe_word0\tword0;\n+\tunion wqe_word1\tword1;\n+\tunion wqe_word2\tword2;\n+\tunion buf_ptr\tpacket_ptr;\n+\tunion wqe_word4\tword4;\n+\tu64\t\twqe_data[11];\n+};\n+\n+enum port_mode {\n+\tPORT_MODE_DISABLED,\n+\tPORT_MODE_SGMII,\n+\tPORT_MODE_RGMII,\n+\tPORT_MODE_XAUI,\n+\tPORT_MODE_RXAUI,\n+\tPORT_MODE_XLAUI,\n+\tPORT_MODE_XFI,\n+\tPORT_MODE_10G_KR,\n+\tPORT_MODE_40G_KR4\n+};\n+\n+enum lane_mode {\n+\tR_25G_REFCLK100,\n+\tR_5G_REFCLK100,\n+\tR_8G_REFCLK100,\n+\tR_125G_REFCLK15625_KX,\n+\tR_3125G_REFCLK15625_XAUI,\n+\tR_103125G_REFCLK15625_KR,\n+\tR_125G_REFCLK15625_SGMII,\n+\tR_5G_REFCLK15625_QSGMII,\n+\tR_625G_REFCLK15625_RXAUI,\n+\tR_25G_REFCLK125,\n+\tR_5G_REFCLK125,\n+\tR_8G_REFCLK125\n+};\n+\n+struct port_status {\n+\tint\tlink;\n+\tint\tduplex;\n+\tint\tspeed;\n+};\n+\n+static inline u64 oct_csr_read(u64 addr)\n+{\n+\treturn __raw_readq((void __iomem *)addr);\n+}\n+\n+static inline void oct_csr_write(u64 data, u64 addr)\n+{\n+\t__raw_writeq(data, (void __iomem *)addr);\n+}\n+\n+extern int ilk0_lanes;\n+extern int ilk1_lanes;\n+\n+void bgx_nexus_load(void);\n+\n+int bgx_port_allocate_pknd(int node);\n+int bgx_port_get_pknd(int node, int bgx, int index);\n+enum port_mode bgx_port_get_mode(int node, int bgx, int index);\n+int bgx_port_get_qlm(int node, int bgx, int index);\n+void bgx_port_set_netdev(struct device *dev, struct net_device *netdev);\n+int bgx_port_enable(struct net_device *netdev);\n+int bgx_port_disable(struct net_device *netdev);\n+const u8 *bgx_port_get_mac(struct net_device *netdev);\n+void bgx_port_set_rx_filtering(struct net_device *netdev);\n+int bgx_port_change_mtu(struct net_device *netdev, int new_mtu);\n+int bgx_port_ethtool_get_link_ksettings(struct net_device *netdev,\n+\t\t\t\t\tstruct ethtool_link_ksettings *cmd);\n+int bgx_port_ethtool_get_settings(struct net_device *netdev,\n+\t\t\t\t struct ethtool_cmd *cmd);\n+int bgx_port_ethtool_set_settings(struct net_device *netdev,\n+\t\t\t\t struct ethtool_cmd *cmd);\n+int bgx_port_ethtool_nway_reset(struct net_device *netdev);\n+int bgx_port_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);\n+\n+void bgx_port_mix_assert_reset(struct net_device *netdev, int mix, bool v);\n+\n+int octeon3_pki_vlan_init(int node);\n+int octeon3_pki_cluster_init(int node, struct platform_device *pdev);\n+int octeon3_pki_ltype_init(int node);\n+int octeon3_pki_enable(int node);\n+int octeon3_pki_port_init(int node, int aura, int grp, int skip, int mb_size,\n+\t\t\t int pknd, int num_rx_cxt);\n+int octeon3_pki_get_stats(int node, int pknd, u64 *packets, u64 *octets,\n+\t\t\t u64 *dropped);\n+int octeon3_pki_set_ptp_skip(int node, int pknd, int skip);\n+int octeon3_pki_port_shutdown(int node, int pknd);\n+void octeon3_pki_shutdown(int node);\n+\n+void octeon3_sso_pass1_limit(int node, int grp);\n+int octeon3_sso_init(int node, int aura);\n+void octeon3_sso_shutdown(int node, int aura);\n+int octeon3_sso_alloc_grp(int node, int grp);\n+int octeon3_sso_alloc_grp_range(int node, int req_grp, int req_cnt,\n+\t\t\t\tbool use_last_avail, int *grp);\n+void octeon3_sso_free_grp(int node, int grp);\n+void octeon3_sso_free_grp_range(int node, int *grp, int req_cnt);\n+void octeon3_sso_irq_set(int node, int grp, bool en);\n+\n+int octeon3_pko_interface_init(int node, int interface, int index,\n+\t\t\t enum octeon3_mac_type mac_type, int ipd_port);\n+int octeon3_pko_activate_dq(int node, int dq, int cnt);\n+int octeon3_pko_get_fifo_size(int node, int interface, int index,\n+\t\t\t enum octeon3_mac_type mac_type);\n+int octeon3_pko_set_mac_options(int node, int interface, int index,\n+\t\t\t\tenum octeon3_mac_type mac_type, bool fcs_en,\n+\t\t\t\tbool pad_en, int fcs_sop_off);\n+int octeon3_pko_init_global(int node, int aura);\n+int octeon3_pko_interface_uninit(int node, const int *dq, int num_dq);\n+int octeon3_pko_exit_global(int node);\n+\n+#endif /* _OCTEON3_H_ */\n", "prefixes": [ "6/7" ] }