get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/1.0/patches/2197810/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2197810,
    "url": "http://patchwork.ozlabs.org/api/1.0/patches/2197810/?format=api",
    "project": {
        "id": 21,
        "url": "http://patchwork.ozlabs.org/api/1.0/projects/21/?format=api",
        "name": "Linux Tegra Development",
        "link_name": "linux-tegra",
        "list_id": "linux-tegra.vger.kernel.org",
        "list_email": "linux-tegra@vger.kernel.org",
        "web_url": null,
        "scm_url": null,
        "webscm_url": null
    },
    "msgid": "<20260218145809.1622856-8-bwicaksono@nvidia.com>",
    "date": "2026-02-18T14:58:08",
    "name": "[v2,7/8] perf: add NVIDIA Tegra410 C2C PMU",
    "commit_ref": null,
    "pull_url": null,
    "state": "handled-elsewhere",
    "archived": false,
    "hash": "45c8ad1e85551588a02c9e65ebee141fad9eba24",
    "submitter": {
        "id": 83903,
        "url": "http://patchwork.ozlabs.org/api/1.0/people/83903/?format=api",
        "name": "Besar Wicaksono",
        "email": "bwicaksono@nvidia.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/linux-tegra/patch/20260218145809.1622856-8-bwicaksono@nvidia.com/mbox/",
    "series": [
        {
            "id": 492565,
            "url": "http://patchwork.ozlabs.org/api/1.0/series/492565/?format=api",
            "date": "2026-02-18T14:58:01",
            "name": "perf: add NVIDIA Tegra410 Uncore PMU support",
            "version": 2,
            "mbox": "http://patchwork.ozlabs.org/series/492565/mbox/"
        }
    ],
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2197810/checks/",
    "tags": {},
    "headers": {
        "Return-Path": "\n <linux-tegra+bounces-12059-incoming=patchwork.ozlabs.org@vger.kernel.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "linux-tegra@vger.kernel.org"
        ],
        "Delivered-To": "patchwork-incoming@legolas.ozlabs.org",
        "Authentication-Results": [
            "legolas.ozlabs.org;\n\tdkim=pass (2048-bit key;\n unprotected) header.d=Nvidia.com header.i=@Nvidia.com header.a=rsa-sha256\n header.s=selector2 header.b=UzBblBBF;\n\tdkim-atps=neutral",
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org\n (client-ip=172.234.253.10; helo=sea.lore.kernel.org;\n envelope-from=linux-tegra+bounces-12059-incoming=patchwork.ozlabs.org@vger.kernel.org;\n receiver=patchwork.ozlabs.org)",
            "smtp.subspace.kernel.org;\n\tdkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com\n header.b=\"UzBblBBF\"",
            "smtp.subspace.kernel.org;\n arc=fail smtp.client-ip=52.101.48.18",
            "smtp.subspace.kernel.org;\n dmarc=pass (p=reject dis=none) header.from=nvidia.com",
            "smtp.subspace.kernel.org;\n spf=fail smtp.mailfrom=nvidia.com"
        ],
        "Received": [
            "from sea.lore.kernel.org (sea.lore.kernel.org [172.234.253.10])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4fGKTY4VvJz1xvq\n\tfor <incoming@patchwork.ozlabs.org>; Thu, 19 Feb 2026 02:01:21 +1100 (AEDT)",
            "from smtp.subspace.kernel.org (conduit.subspace.kernel.org\n [100.90.174.1])\n\tby sea.lore.kernel.org (Postfix) with ESMTP id 7B6D4301E6DA\n\tfor <incoming@patchwork.ozlabs.org>; Wed, 18 Feb 2026 14:59:29 +0000 (UTC)",
            "from localhost.localdomain (localhost.localdomain [127.0.0.1])\n\tby smtp.subspace.kernel.org (Postfix) with ESMTP id 629D6334C0A;\n\tWed, 18 Feb 2026 14:59:29 +0000 (UTC)",
            "from MW6PR02CU001.outbound.protection.outlook.com\n (mail-westus2azon11012018.outbound.protection.outlook.com [52.101.48.18])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby smtp.subspace.kernel.org (Postfix) with ESMTPS id C2FC433F364;\n\tWed, 18 Feb 2026 14:59:26 +0000 (UTC)",
            "from MN2PR14CA0023.namprd14.prod.outlook.com (2603:10b6:208:23e::28)\n by PH7PR12MB7257.namprd12.prod.outlook.com (2603:10b6:510:205::21) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.9632.13; Wed, 18 Feb\n 2026 14:59:15 +0000",
            "from BL02EPF00021F6A.namprd02.prod.outlook.com\n (2603:10b6:208:23e:cafe::a7) by MN2PR14CA0023.outlook.office365.com\n (2603:10b6:208:23e::28) with Microsoft SMTP Server (version=TLS1_3,\n cipher=TLS_AES_256_GCM_SHA384) id 15.20.9632.15 via Frontend Transport; Wed,\n 18 Feb 2026 14:59:15 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n BL02EPF00021F6A.mail.protection.outlook.com (10.167.249.6) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.9632.12 via Frontend Transport; Wed, 18 Feb 2026 14:59:14 +0000",
            "from rnnvmail202.nvidia.com (10.129.68.7) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.2562.20; Wed, 18 Feb\n 2026 06:58:56 -0800",
            "from rnnvmail201.nvidia.com (10.129.68.8) by rnnvmail202.nvidia.com\n (10.129.68.7) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.2562.20; Wed, 18 Feb\n 2026 06:58:55 -0800",
            "from build-bwicaksono-noble-20251018.internal (10.127.8.11) by\n mail.nvidia.com (10.129.68.8) with Microsoft SMTP Server id 15.2.2562.20 via\n Frontend Transport; Wed, 18 Feb 2026 06:58:54 -0800"
        ],
        "ARC-Seal": [
            "i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116;\n\tt=1771426769; cv=fail;\n b=uOvk2F2kHVpKhf/NwPB242/1+rSoeEI6/dw/MjtGg9RTo2t8aPQx7PhsxbTba/jL5P0Qt4Wd1L0xwCVEthTDCnbJGALUrAduoDcqbZRaGdpH2cdnOKxiu4M5cUUg9NFVZYDd3k2owV+3lho3+qf6KyCKOcwb7FxvjkX0zxQp/fY=",
            "i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none;\n b=NQWZ7/VxgmqZilKPFjG6ff8IdcEx6S+28afG7UniN66b3cgWzLnH9bMq8/OHFPzNIgGPvc0aiiQIPm70pRObeIzQPNUNo9L8bWRlAttD7wacQqf6mLsJ++Re7CTFs0jJ8eQTvOs0kHiyNPi1Y7cLpsSYBZOV8QlLluk43WPnV0RCjhXQoNotkdh75MJ9zp6zrbInM8fVpWSUAHQQ/w5fhps2FThF4HNITSc9HsCCkefxxrooOMOb0EfiPT4GZnVb1lTE4nZZteU7DpEsvZcBHvU47G81+W9bsBTqF9i47xcatLGbOYpZIUIaG/SyAYjbOKbLrFqk0QBh1UHycz/Ldw=="
        ],
        "ARC-Message-Signature": [
            "i=2; a=rsa-sha256; d=subspace.kernel.org;\n\ts=arc-20240116; t=1771426769; c=relaxed/simple;\n\tbh=NvhRoVQJXBnHlbwqIXep+ixCk5qdTzjLqjwR5wHGmg8=;\n\th=From:To:CC:Subject:Date:Message-ID:In-Reply-To:References:\n\t MIME-Version:Content-Type;\n b=I6aQJytZWS2OtNp+UUtrC9YAkrjauTqpolKFPaKCey7tTi2WRW2VB8UaAahAp1m0UaSwi1UABFYojMH7tZPynqkYuNrXNnOtoX/2Zj6DW3JSaI9sBOeI6oHE6Baesi9cgUU80AQBYaifnqyxpxmzcv44UIS1xYSbZmMoBuWrz8Q=",
            "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector10001;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=kpkQC+XS5tSp0ivCAqDV/7D4JswoTPnJc6SsnbVCeGM=;\n b=dflMidb3sFe4OP+cZeEpXxqM1iJKVQ4HYbxwpYttEyI3zeVLcGhos1Rwb3PrTNcGIqwg+Ycrg4yTgbbV8D1qe2wsl3lv6q444r0jTvyz1s12fqB0DibgywREfvLL0N3UbZAjBjPgI5RiLgQCOd9fLeHqfLIWhuCe1kbpbSMChKiUkKM02O27I/KoRvaMgeqtupJIsmFe6M90sv4T8+UQcM8IxePaouCvXbSfw5tS4yDKdEaLcRzBni+nW48/GB0viKlJn3r0fPPCOAJpkZrmZ6iLBNB2gENlpYbzJ0NdMmC2N+Tf9m8zjy8dDDAWJvJmWat2xqbGQru0CFuWDAo+pQ=="
        ],
        "ARC-Authentication-Results": [
            "i=2; smtp.subspace.kernel.org;\n dmarc=pass (p=reject dis=none) header.from=nvidia.com;\n spf=fail smtp.mailfrom=nvidia.com;\n dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com\n header.b=UzBblBBF; arc=fail smtp.client-ip=52.101.48.18",
            "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=kernel.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none (0)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=kpkQC+XS5tSp0ivCAqDV/7D4JswoTPnJc6SsnbVCeGM=;\n b=UzBblBBFbG65AG8MCDE53zdxdS0eBtV0z3WkhN/h8sdrLHBbLkZoTLfHnZ/4Mq4lt1YpfO/qS0VsBuMZiB+GjiGr1vDi6fbtUn8O7m3kbufyToAGINiEX5r5uA2ef9cJ7KjySeyma645C9Md1rqK46Yz+z6Auh0m8wC+ULPsD9jk5Xf/3zbSOcOwcD0Li1oaQ1X/3e5/v6qin42j5jhDWgdMdUae0aFRjnngVfTmeWRvt6DJZJqsGaAFW0Mzbw0CsHtNddt/N4g9gdCsmPCLVtjnKreXuMwYOhL4ZYWQZ4Iai5hJ8m2C9ZKUMKTfwqN1S1fRj+Qocd/5hy2hSJKG1w==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Besar Wicaksono <bwicaksono@nvidia.com>",
        "To": "<will@kernel.org>, <suzuki.poulose@arm.com>, <robin.murphy@arm.com>,\n\t<ilkka@os.amperecomputing.com>",
        "CC": "<linux-arm-kernel@lists.infradead.org>, <linux-kernel@vger.kernel.org>,\n\t<linux-tegra@vger.kernel.org>, <mark.rutland@arm.com>, <treding@nvidia.com>,\n\t<jonathanh@nvidia.com>, <vsethi@nvidia.com>, <rwiley@nvidia.com>,\n\t<sdonthineni@nvidia.com>, <skelley@nvidia.com>, <ywan@nvidia.com>,\n\t<mochs@nvidia.com>, <nirmoyd@nvidia.com>, Besar Wicaksono\n\t<bwicaksono@nvidia.com>",
        "Subject": "[PATCH v2 7/8] perf: add NVIDIA Tegra410 C2C PMU",
        "Date": "Wed, 18 Feb 2026 14:58:08 +0000",
        "Message-ID": "<20260218145809.1622856-8-bwicaksono@nvidia.com>",
        "X-Mailer": "git-send-email 2.43.0",
        "In-Reply-To": "<20260218145809.1622856-1-bwicaksono@nvidia.com>",
        "References": "<20260218145809.1622856-1-bwicaksono@nvidia.com>",
        "Precedence": "bulk",
        "X-Mailing-List": "linux-tegra@vger.kernel.org",
        "List-Id": "<linux-tegra.vger.kernel.org>",
        "List-Subscribe": "<mailto:linux-tegra+subscribe@vger.kernel.org>",
        "List-Unsubscribe": "<mailto:linux-tegra+unsubscribe@vger.kernel.org>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-NV-OnPremToCloud": "ExternallySecured",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "BL02EPF00021F6A:EE_|PH7PR12MB7257:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "efb91bcb-910c-47ef-cad7-08de6efe4878",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "\n\tBCL:0;ARA:13230040|1800799024|376014|36860700013|82310400026;",
        "X-Microsoft-Antispam-Message-Info": "\n 0xgFQwSxunCSLw1owAl1YugUi/LqlM1OCRrFLFW+QmjarS/EvOVqyjy8jQrtOCtU9SspDiMowWIkdefYyK36NDaYTj6oOITQ4i+hpxNSRp/rDKoDKaDNifEh9a4VGUMpKC+4ecjBGI1PTFiYX9q3gPXmswxet2nnYlu4Tp0fKoLLAm7GgF2mvX8JZUTIGbHoSNiwXbVhn8tiiYfapIw271m3qtUY2tHZ0upL7yGpPe24LH1xQsk1k31ja+fFOxqo8i5YCqLC1weWOQ/ZCuCFibfgVyzH2kGNrxczcsKRbkRYEukoP1O8d6LajKvWs0QG956OLbLqhGsXxA13YBept+dyu5AgVlP4D57tF63fD7onMpvDzsgiiAzfLdYHOc9HzU9DTGf6/ANeXQAoCG4YGLJnGmyKPD/KyfHC4BfzopOKR91H/KU5RAjaHK1D34cM2Psb79G3e5D85cu4eHa2bxOTGZEzpd2SAL1HU/ebyyGtCNioTJWpb44YIqRsgcJlu2/onQKjqRgBsayWJELs/tjPiYKcH3hKRrHXq27Vk4yEBgqbDKCwOtjd+lOuT+IITGJanJjRvdnbFYGUN2SKXMViiXlZIy2+LsN+lFEzlM9bgUCa/hzHp4nTz3Jmn7PvuT5CL53oELnXMJmzuToycg32wwT+xcoQkor8VcaakexZEvA/bYY0eS0DEz8JF5h6J8MhSlEK9y70gyAHfQzqT8dwY5Owb9T0xKFr54qffSsaxPIDOtH98UklfRomVRBnv5eZ0+Ik3oikmaLu3N30rkLfK0vFSMjTTlqMmXAkpbpTg0X0wMsYv2bpeIbfyviI1rYQZKnJYAFhvhXCAnpwPinN9oQgBimBo7auDnmTc2hTdMEKGB/1W/fLE6ueDcHFaRwWu3R7WTrirapkDdtYN9ASCymB9NEaX+4RhNqeDujpguROzCuJ5u/ubiogvsNUWcd1d+lNJO9a9MtOwV/IZbVXySVAnqdQ02nQa30wiHYKrzCP7as+DVMIyJyQ+EhckWhdHhjd1J9V72Ay8gXffEJBeJ8HncCM725B9NWuPm4I7a0Yiw0LWU1iKFeVr48UeD65zrIQhNshqhWXdChCgHsSZvWpzrSXeqPxJVkeWsbeEUosJzIzyNxID6fOxT4LqkDp1bJWw0/MTHRpoHCiOkSMHHZy/41BCe5MyJVFn96q5XhRZSYDG+Gv3aJIUjURLl+5xBr6mxjUeF+jGW5Afn3DxuIoUyV0/zXkkB2mX57C7fjdaqRfgGRsZP55Yt7CV4xBUdbNWM39fOjKR/q3gr5K8bu0FXAonpmeXNzabgBDSIddII91qJ7Gqjc6CxYoj9N4FDyO4XHnsu2LOx4Rn7yzt1MT2O7JGlNGKT8gTmVQ4Q5r49B8HVj/1Ly8LNSVfvsfubBo4fIormBQsVZ8BGWLUIgbXqdmhPzu0A5cw0BOTNpM2yyVX10O2c4MUUrsTPGYLsPfKvNJcZC8KUUYs+tK9jKGnkWpd83yB5wnoH1Q9UZ3m1M23QxzbihA5erkNAub466DzYIngv3a4qZjWZ42LZJUwEhJsmErB9Py89Zz1irLe5weMTnWJ8PbAQ2zJheXWHVK6xc9bqBtg8PM4sXMOyhICdI9WDoRFA1JcnpfpuDPeptfKc9B7fGXHXf8sWloTG/TSXnU0FSZrlBnsg==",
        "X-Forefront-Antispam-Report": "\n\tCIP:216.228.117.161;CTRY:US;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:mail.nvidia.com;PTR:dc6edge2.nvidia.com;CAT:NONE;SFS:(13230040)(1800799024)(376014)(36860700013)(82310400026);DIR:OUT;SFP:1101;",
        "X-MS-Exchange-AntiSpam-MessageData-ChunkCount": "1",
        "X-MS-Exchange-AntiSpam-MessageData-0": "\n\tbEv/h1fCH31jTszf3LExfA9DV4bNlwv09RquTlnYRGv6LdibZDVavCZRcUOzIEcsfQ0KcqXxoVPjuBF/lidLQKh0jMlK7S8pFN4xVJduAZymBwU4kVSpQPbJu59NYYcTeprLfYL1CnpO++fNRo2EBPa82yVc5W/5ztCpMxLM/VgOC6Enp9o0DnzX4XF1nwn+C/YjYH8vvQ13I1PLdDL1OCo5mFdGx74nWMVhbZ4UVdgdtnLxJpOQOyE/VKD+7yeo8TMiC3e5puYB+4D1IgYH0OJxde7Pocrj7wdgpSbvr0LuZ6l9ozUJ55lOoSu5zjVG5RKo9D4+7yzQ+Ucy/p7wMVW0oLZ1NYorNbPgDoll0quK67xZjJXoqGUepTkwub4knHtDRhBk9S+vi0+Zx4fqc+8nMhcdhM1kVX4OYYl+Hi9+oBxz29p1EXTB7xOEPyF6",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "18 Feb 2026 14:59:14.7908\n (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n efb91bcb-910c-47ef-cad7-08de6efe4878",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a;Ip=[216.228.117.161];Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n\tBL02EPF00021F6A.namprd02.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PH7PR12MB7257"
    },
    "content": "Adds NVIDIA C2C PMU support in Tegra410 SOC. This PMU is\nused to measure memory latency between the SOC and device\nmemory, e.g GPU Memory (GMEM), CXL Memory, or memory on\nremote Tegra410 SOC.\n\nReviewed-by: Ilkka Koskinen <ilkka@os.amperecomputing.com>\nSigned-off-by: Besar Wicaksono <bwicaksono@nvidia.com>\n---\n .../admin-guide/perf/nvidia-tegra410-pmu.rst  |  151 +++\n drivers/perf/Kconfig                          |    7 +\n drivers/perf/Makefile                         |    1 +\n drivers/perf/nvidia_t410_c2c_pmu.c            | 1062 +++++++++++++++++\n 4 files changed, 1221 insertions(+)\n create mode 100644 drivers/perf/nvidia_t410_c2c_pmu.c",
    "diff": "diff --git a/Documentation/admin-guide/perf/nvidia-tegra410-pmu.rst b/Documentation/admin-guide/perf/nvidia-tegra410-pmu.rst\nindex c8fbc289d12c..678cb3df228e 100644\n--- a/Documentation/admin-guide/perf/nvidia-tegra410-pmu.rst\n+++ b/Documentation/admin-guide/perf/nvidia-tegra410-pmu.rst\n@@ -9,6 +9,9 @@ metrics like memory bandwidth, latency, and utilization:\n * PCIE\n * PCIE-TGT\n * CPU Memory (CMEM) Latency\n+* NVLink-C2C\n+* NV-CLink\n+* NV-DLink\n \n PMU Driver\n ----------\n@@ -367,3 +370,151 @@ see /sys/bus/event_source/devices/nvidia_cmem_latency_pmu_<socket-id>.\n Example usage::\n \n   perf stat -a -e '{nvidia_cmem_latency_pmu_0/rd_req/,nvidia_cmem_latency_pmu_0/rd_cum_outs/,nvidia_cmem_latency_pmu_0/cycles/}'\n+\n+NVLink-C2C PMU\n+--------------\n+\n+This PMU monitors latency events of memory read/write requests that pass through\n+the NVIDIA Chip-to-Chip (C2C) interface. Bandwidth events are not available\n+in this PMU, unlike the C2C PMU in Grace (Tegra241 SoC).\n+\n+The events and configuration options of this PMU device are available in sysfs,\n+see /sys/bus/event_source/devices/nvidia_nvlink_c2c_pmu_<socket-id>.\n+\n+The list of events:\n+\n+  * IN_RD_CUM_OUTS: accumulated outstanding request (in cycles) of incoming read requests.\n+  * IN_RD_REQ: the number of incoming read requests.\n+  * IN_WR_CUM_OUTS: accumulated outstanding request (in cycles) of incoming write requests.\n+  * IN_WR_REQ: the number of incoming write requests.\n+  * OUT_RD_CUM_OUTS: accumulated outstanding request (in cycles) of outgoing read requests.\n+  * OUT_RD_REQ: the number of outgoing read requests.\n+  * OUT_WR_CUM_OUTS: accumulated outstanding request (in cycles) of outgoing write requests.\n+  * OUT_WR_REQ: the number of outgoing write requests.\n+  * CYCLES: NVLink-C2C interface cycle counts.\n+\n+The incoming events count the reads/writes from remote device to the SoC.\n+The outgoing events count the reads/writes from the SoC to remote device.\n+\n+The sysfs /sys/bus/event_source/devices/nvidia_nvlink_c2c_pmu_<socket-id>/peer\n+contains the information about the connected device.\n+\n+When the C2C interface is connected to GPU(s), the user can use the\n+\"gpu_mask\" parameter to filter traffic to/from specific GPU(s). Each bit represents the GPU\n+index, e.g. \"gpu_mask=0x1\" corresponds to GPU 0 and \"gpu_mask=0x3\" is for GPU 0 and 1.\n+The PMU will monitor all GPUs by default if not specified.\n+\n+When connected to another SoC, only the read events are available.\n+\n+The events can be used to calculate the average latency of the read/write requests::\n+\n+   C2C_FREQ_IN_GHZ = CYCLES / ELAPSED_TIME_IN_NS\n+\n+   IN_RD_AVG_LATENCY_IN_CYCLES = IN_RD_CUM_OUTS / IN_RD_REQ\n+   IN_RD_AVG_LATENCY_IN_NS = IN_RD_AVG_LATENCY_IN_CYCLES / C2C_FREQ_IN_GHZ\n+\n+   IN_WR_AVG_LATENCY_IN_CYCLES = IN_WR_CUM_OUTS / IN_WR_REQ\n+   IN_WR_AVG_LATENCY_IN_NS = IN_WR_AVG_LATENCY_IN_CYCLES / C2C_FREQ_IN_GHZ\n+\n+   OUT_RD_AVG_LATENCY_IN_CYCLES = OUT_RD_CUM_OUTS / OUT_RD_REQ\n+   OUT_RD_AVG_LATENCY_IN_NS = OUT_RD_AVG_LATENCY_IN_CYCLES / C2C_FREQ_IN_GHZ\n+\n+   OUT_WR_AVG_LATENCY_IN_CYCLES = OUT_WR_CUM_OUTS / OUT_WR_REQ\n+   OUT_WR_AVG_LATENCY_IN_NS = OUT_WR_AVG_LATENCY_IN_CYCLES / C2C_FREQ_IN_GHZ\n+\n+Example usage:\n+\n+  * Count incoming traffic from all GPUs connected via NVLink-C2C::\n+\n+      perf stat -a -e nvidia_nvlink_c2c_pmu_0/in_rd_req/\n+\n+  * Count incoming traffic from GPU 0 connected via NVLink-C2C::\n+\n+      perf stat -a -e nvidia_nvlink_c2c_pmu_0/in_rd_cum_outs,gpu_mask=0x1/\n+\n+  * Count incoming traffic from GPU 1 connected via NVLink-C2C::\n+\n+      perf stat -a -e nvidia_nvlink_c2c_pmu_0/in_rd_cum_outs,gpu_mask=0x2/\n+\n+  * Count outgoing traffic to all GPUs connected via NVLink-C2C::\n+\n+      perf stat -a -e nvidia_nvlink_c2c_pmu_0/out_rd_req/\n+\n+  * Count outgoing traffic to GPU 0 connected via NVLink-C2C::\n+\n+      perf stat -a -e nvidia_nvlink_c2c_pmu_0/out_rd_cum_outs,gpu_mask=0x1/\n+\n+  * Count outgoing traffic to GPU 1 connected via NVLink-C2C::\n+\n+      perf stat -a -e nvidia_nvlink_c2c_pmu_0/out_rd_cum_outs,gpu_mask=0x2/\n+\n+NV-CLink PMU\n+------------\n+\n+This PMU monitors latency events of memory read requests that pass through\n+the NV-CLINK interface. Bandwidth events are not available in this PMU.\n+In Tegra410 SoC, the NV-CLink interface is used to connect to another Tegra410\n+SoC and this PMU only counts read traffic.\n+\n+The events and configuration options of this PMU device are available in sysfs,\n+see /sys/bus/event_source/devices/nvidia_nvclink_pmu_<socket-id>.\n+\n+The list of events:\n+\n+  * IN_RD_CUM_OUTS: accumulated outstanding request (in cycles) of incoming read requests.\n+  * IN_RD_REQ: the number of incoming read requests.\n+  * OUT_RD_CUM_OUTS: accumulated outstanding request (in cycles) of outgoing read requests.\n+  * OUT_RD_REQ: the number of outgoing read requests.\n+  * CYCLES: NV-CLINK interface cycle counts.\n+\n+The incoming events count the reads from remote device to the SoC.\n+The outgoing events count the reads from the SoC to remote device.\n+\n+The events can be used to calculate the average latency of the read requests::\n+\n+   CLINK_FREQ_IN_GHZ = CYCLES / ELAPSED_TIME_IN_NS\n+\n+   IN_RD_AVG_LATENCY_IN_CYCLES = IN_RD_CUM_OUTS / IN_RD_REQ\n+   IN_RD_AVG_LATENCY_IN_NS = IN_RD_AVG_LATENCY_IN_CYCLES / CLINK_FREQ_IN_GHZ\n+\n+   OUT_RD_AVG_LATENCY_IN_CYCLES = OUT_RD_CUM_OUTS / OUT_RD_REQ\n+   OUT_RD_AVG_LATENCY_IN_NS = OUT_RD_AVG_LATENCY_IN_CYCLES / CLINK_FREQ_IN_GHZ\n+\n+Example usage:\n+\n+  * Count incoming read traffic from remote SoC connected via NV-CLINK::\n+\n+      perf stat -a -e nvidia_nvclink_pmu_0/in_rd_req/\n+\n+  * Count outgoing read traffic to remote SoC connected via NV-CLINK::\n+\n+      perf stat -a -e nvidia_nvclink_pmu_0/out_rd_req/\n+\n+NV-DLink PMU\n+------------\n+\n+This PMU monitors latency events of memory read requests that pass through\n+the NV-DLINK interface.  Bandwidth events are not available in this PMU.\n+In Tegra410 SoC, this PMU only counts CXL memory read traffic.\n+\n+The events and configuration options of this PMU device are available in sysfs,\n+see /sys/bus/event_source/devices/nvidia_nvdlink_pmu_<socket-id>.\n+\n+The list of events:\n+\n+  * IN_RD_CUM_OUTS: accumulated outstanding read requests (in cycles) to CXL memory.\n+  * IN_RD_REQ: the number of read requests to CXL memory.\n+  * CYCLES: NV-DLINK interface cycle counts.\n+\n+The events can be used to calculate the average latency of the read requests::\n+\n+   DLINK_FREQ_IN_GHZ = CYCLES / ELAPSED_TIME_IN_NS\n+\n+   IN_RD_AVG_LATENCY_IN_CYCLES = IN_RD_CUM_OUTS / IN_RD_REQ\n+   IN_RD_AVG_LATENCY_IN_NS = IN_RD_AVG_LATENCY_IN_CYCLES / DLINK_FREQ_IN_GHZ\n+\n+Example usage:\n+\n+  * Count read events to CXL memory::\n+\n+      perf stat -a -e '{nvidia_nvdlink_pmu_0/in_rd_req/,nvidia_nvdlink_pmu_0/in_rd_cum_outs/}'\ndiff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig\nindex 26e86067d8f9..ab90932fc2d0 100644\n--- a/drivers/perf/Kconfig\n+++ b/drivers/perf/Kconfig\n@@ -318,4 +318,11 @@ config NVIDIA_TEGRA410_CMEM_LATENCY_PMU\n \t  Enable perf support for CPU memory latency counters monitoring on\n \t  NVIDIA Tegra410 SoC.\n \n+config NVIDIA_TEGRA410_C2C_PMU\n+\ttristate \"NVIDIA Tegra410 C2C PMU\"\n+\tdepends on ARM64 && ACPI\n+\thelp\n+\t  Enable perf support for counters in NVIDIA C2C interface of NVIDIA\n+\t  Tegra410 SoC.\n+\n endmenu\ndiff --git a/drivers/perf/Makefile b/drivers/perf/Makefile\nindex 4aa6aad393c2..eb8a022dad9a 100644\n--- a/drivers/perf/Makefile\n+++ b/drivers/perf/Makefile\n@@ -36,3 +36,4 @@ obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/\n obj-$(CONFIG_MESON_DDR_PMU) += amlogic/\n obj-$(CONFIG_CXL_PMU) += cxl_pmu.o\n obj-$(CONFIG_NVIDIA_TEGRA410_CMEM_LATENCY_PMU) += nvidia_t410_cmem_latency_pmu.o\n+obj-$(CONFIG_NVIDIA_TEGRA410_C2C_PMU) += nvidia_t410_c2c_pmu.o\ndiff --git a/drivers/perf/nvidia_t410_c2c_pmu.c b/drivers/perf/nvidia_t410_c2c_pmu.c\nnew file mode 100644\nindex 000000000000..a3891c94dcde\n--- /dev/null\n+++ b/drivers/perf/nvidia_t410_c2c_pmu.c\n@@ -0,0 +1,1062 @@\n+// SPDX-License-Identifier: GPL-2.0\n+/*\n+ * NVIDIA Tegra410 C2C PMU driver.\n+ *\n+ * Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n+ */\n+\n+#include <linux/acpi.h>\n+#include <linux/bitops.h>\n+#include <linux/cpumask.h>\n+#include <linux/device.h>\n+#include <linux/interrupt.h>\n+#include <linux/io.h>\n+#include <linux/module.h>\n+#include <linux/perf_event.h>\n+#include <linux/platform_device.h>\n+#include <linux/property.h>\n+\n+/* The C2C interface types in Tegra410. */\n+#define C2C_TYPE_NVLINK          0x0\n+#define C2C_TYPE_NVCLINK         0x1\n+#define C2C_TYPE_NVDLINK         0x2\n+#define C2C_TYPE_COUNT           0x3\n+\n+/* The type of the peer device connected to the C2C interface. */\n+#define C2C_PEER_TYPE_CPU        0x0\n+#define C2C_PEER_TYPE_GPU        0x1\n+#define C2C_PEER_TYPE_CXLMEM     0x2\n+#define C2C_PEER_TYPE_COUNT      0x3\n+\n+/* The number of peer devices can be connected to the C2C interface. */\n+#define C2C_NR_PEER_CPU          0x1\n+#define C2C_NR_PEER_GPU          0x2\n+#define C2C_NR_PEER_CXLMEM       0x1\n+#define C2C_NR_PEER_MAX          0x2\n+\n+/* Number of instances on each interface. */\n+#define C2C_NR_INST_NVLINK       14\n+#define C2C_NR_INST_NVCLINK      12\n+#define C2C_NR_INST_NVDLINK      16\n+#define C2C_NR_INST_MAX          16\n+\n+/* Register offsets. */\n+#define C2C_CTRL                    0x864\n+#define C2C_IN_STATUS               0x868\n+#define C2C_CYCLE_CNTR              0x86c\n+#define C2C_IN_RD_CUM_OUTS_CNTR     0x874\n+#define C2C_IN_RD_REQ_CNTR          0x87c\n+#define C2C_IN_WR_CUM_OUTS_CNTR     0x884\n+#define C2C_IN_WR_REQ_CNTR          0x88c\n+#define C2C_OUT_STATUS              0x890\n+#define C2C_OUT_RD_CUM_OUTS_CNTR    0x898\n+#define C2C_OUT_RD_REQ_CNTR         0x8a0\n+#define C2C_OUT_WR_CUM_OUTS_CNTR    0x8a8\n+#define C2C_OUT_WR_REQ_CNTR         0x8b0\n+\n+/* C2C_IN_STATUS register field. */\n+#define C2C_IN_STATUS_CYCLE_OVF             BIT(0)\n+#define C2C_IN_STATUS_IN_RD_CUM_OUTS_OVF    BIT(1)\n+#define C2C_IN_STATUS_IN_RD_REQ_OVF         BIT(2)\n+#define C2C_IN_STATUS_IN_WR_CUM_OUTS_OVF    BIT(3)\n+#define C2C_IN_STATUS_IN_WR_REQ_OVF         BIT(4)\n+\n+/* C2C_OUT_STATUS register field. */\n+#define C2C_OUT_STATUS_OUT_RD_CUM_OUTS_OVF    BIT(0)\n+#define C2C_OUT_STATUS_OUT_RD_REQ_OVF         BIT(1)\n+#define C2C_OUT_STATUS_OUT_WR_CUM_OUTS_OVF    BIT(2)\n+#define C2C_OUT_STATUS_OUT_WR_REQ_OVF         BIT(3)\n+\n+/* Events. */\n+#define C2C_EVENT_CYCLES                0x0\n+#define C2C_EVENT_IN_RD_CUM_OUTS        0x1\n+#define C2C_EVENT_IN_RD_REQ             0x2\n+#define C2C_EVENT_IN_WR_CUM_OUTS        0x3\n+#define C2C_EVENT_IN_WR_REQ             0x4\n+#define C2C_EVENT_OUT_RD_CUM_OUTS       0x5\n+#define C2C_EVENT_OUT_RD_REQ            0x6\n+#define C2C_EVENT_OUT_WR_CUM_OUTS       0x7\n+#define C2C_EVENT_OUT_WR_REQ            0x8\n+\n+#define C2C_NUM_EVENTS           0x9\n+#define C2C_MASK_EVENT           0xFF\n+#define C2C_MAX_ACTIVE_EVENTS    32\n+\n+#define C2C_ACTIVE_CPU_MASK        0x0\n+#define C2C_ASSOCIATED_CPU_MASK    0x1\n+\n+/*\n+ * Maximum poll count for reading counter value using high-low-high sequence.\n+ */\n+#define HILOHI_MAX_POLL    1000\n+\n+static unsigned long nv_c2c_pmu_cpuhp_state;\n+\n+/* PMU descriptor. */\n+\n+/* Tracks the events assigned to the PMU for a given logical index. */\n+struct nv_c2c_pmu_hw_events {\n+\t/* The events that are active. */\n+\tstruct perf_event *events[C2C_MAX_ACTIVE_EVENTS];\n+\n+\t/*\n+\t * Each bit indicates a logical counter is being used (or not) for an\n+\t * event.\n+\t */\n+\tDECLARE_BITMAP(used_ctrs, C2C_MAX_ACTIVE_EVENTS);\n+};\n+\n+struct nv_c2c_pmu {\n+\tstruct pmu pmu;\n+\tstruct device *dev;\n+\tstruct acpi_device *acpi_dev;\n+\n+\tconst char *name;\n+\tconst char *identifier;\n+\n+\tunsigned int c2c_type;\n+\tunsigned int peer_type;\n+\tunsigned int socket;\n+\tunsigned int nr_inst;\n+\tunsigned int nr_peer;\n+\tunsigned long peer_insts[C2C_NR_PEER_MAX][BITS_TO_LONGS(C2C_NR_INST_MAX)];\n+\tu32 filter_default;\n+\n+\tstruct nv_c2c_pmu_hw_events hw_events;\n+\n+\tcpumask_t associated_cpus;\n+\tcpumask_t active_cpu;\n+\n+\tstruct hlist_node cpuhp_node;\n+\n+\tstruct attribute **formats;\n+\tconst struct attribute_group *attr_groups[6];\n+\n+\tvoid __iomem *base_broadcast;\n+\tvoid __iomem *base[C2C_NR_INST_MAX];\n+};\n+\n+#define to_c2c_pmu(p) (container_of(p, struct nv_c2c_pmu, pmu))\n+\n+/* Get event type from perf_event. */\n+static inline u32 get_event_type(struct perf_event *event)\n+{\n+\treturn (event->attr.config) & C2C_MASK_EVENT;\n+}\n+\n+static inline u32 get_filter_mask(struct perf_event *event)\n+{\n+\tu32 filter;\n+\tstruct nv_c2c_pmu *c2c_pmu = to_c2c_pmu(event->pmu);\n+\n+\tfilter = ((u32)event->attr.config1) & c2c_pmu->filter_default;\n+\tif (filter == 0)\n+\t\tfilter = c2c_pmu->filter_default;\n+\n+\treturn filter;\n+}\n+\n+/* PMU operations. */\n+\n+static int nv_c2c_pmu_get_event_idx(struct nv_c2c_pmu_hw_events *hw_events,\n+\t\t\t\t    struct perf_event *event)\n+{\n+\tu32 idx;\n+\n+\tidx = find_first_zero_bit(hw_events->used_ctrs, C2C_MAX_ACTIVE_EVENTS);\n+\tif (idx >= C2C_MAX_ACTIVE_EVENTS)\n+\t\treturn -EAGAIN;\n+\n+\tset_bit(idx, hw_events->used_ctrs);\n+\n+\treturn idx;\n+}\n+\n+static bool\n+nv_c2c_pmu_validate_event(struct pmu *pmu,\n+\t\t\t  struct nv_c2c_pmu_hw_events *hw_events,\n+\t\t\t  struct perf_event *event)\n+{\n+\tif (is_software_event(event))\n+\t\treturn true;\n+\n+\t/* Reject groups spanning multiple HW PMUs. */\n+\tif (event->pmu != pmu)\n+\t\treturn false;\n+\n+\treturn nv_c2c_pmu_get_event_idx(hw_events, event) >= 0;\n+}\n+\n+/*\n+ * Make sure the group of events can be scheduled at once\n+ * on the PMU.\n+ */\n+static bool nv_c2c_pmu_validate_group(struct perf_event *event)\n+{\n+\tstruct perf_event *sibling, *leader = event->group_leader;\n+\tstruct nv_c2c_pmu_hw_events fake_hw_events;\n+\n+\tif (event->group_leader == event)\n+\t\treturn true;\n+\n+\tmemset(&fake_hw_events, 0, sizeof(fake_hw_events));\n+\n+\tif (!nv_c2c_pmu_validate_event(event->pmu, &fake_hw_events, leader))\n+\t\treturn false;\n+\n+\tfor_each_sibling_event(sibling, leader) {\n+\t\tif (!nv_c2c_pmu_validate_event(event->pmu, &fake_hw_events,\n+\t\t\t\t\t       sibling))\n+\t\t\treturn false;\n+\t}\n+\n+\treturn nv_c2c_pmu_validate_event(event->pmu, &fake_hw_events, event);\n+}\n+\n+static int nv_c2c_pmu_event_init(struct perf_event *event)\n+{\n+\tstruct nv_c2c_pmu *c2c_pmu = to_c2c_pmu(event->pmu);\n+\tstruct hw_perf_event *hwc = &event->hw;\n+\tu32 event_type = get_event_type(event);\n+\n+\tif (event->attr.type != event->pmu->type ||\n+\t    event_type >= C2C_NUM_EVENTS)\n+\t\treturn -ENOENT;\n+\n+\t/*\n+\t * Following other \"uncore\" PMUs, we do not support sampling mode or\n+\t * attach to a task (per-process mode).\n+\t */\n+\tif (is_sampling_event(event)) {\n+\t\tdev_dbg(c2c_pmu->pmu.dev, \"Can't support sampling events\\n\");\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\tif (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {\n+\t\tdev_dbg(c2c_pmu->pmu.dev, \"Can't support per-task counters\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/*\n+\t * Make sure the CPU assignment is on one of the CPUs associated with\n+\t * this PMU.\n+\t */\n+\tif (!cpumask_test_cpu(event->cpu, &c2c_pmu->associated_cpus)) {\n+\t\tdev_dbg(c2c_pmu->pmu.dev,\n+\t\t\t\"Requested cpu is not associated with the PMU\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Enforce the current active CPU to handle the events in this PMU. */\n+\tevent->cpu = cpumask_first(&c2c_pmu->active_cpu);\n+\tif (event->cpu >= nr_cpu_ids)\n+\t\treturn -EINVAL;\n+\n+\tif (!nv_c2c_pmu_validate_group(event))\n+\t\treturn -EINVAL;\n+\n+\thwc->idx = -1;\n+\thwc->config = event_type;\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Read 64-bit register as a pair of 32-bit registers using hi-lo-hi sequence.\n+ */\n+static u64 read_reg64_hilohi(const void __iomem *addr, u32 max_poll_count)\n+{\n+\tu32 val_lo, val_hi;\n+\tu64 val;\n+\n+\t/* Use high-low-high sequence to avoid tearing */\n+\tdo {\n+\t\tif (max_poll_count-- == 0) {\n+\t\t\tpr_err(\"NV C2C PMU: timeout hi-low-high sequence\\n\");\n+\t\t\treturn 0;\n+\t\t}\n+\n+\t\tval_hi = readl(addr + 4);\n+\t\tval_lo = readl(addr);\n+\t} while (val_hi != readl(addr + 4));\n+\n+\tval = (((u64)val_hi << 32) | val_lo);\n+\n+\treturn val;\n+}\n+\n+static void nv_c2c_pmu_check_status(struct nv_c2c_pmu *c2c_pmu, u32 instance)\n+{\n+\tu32 in_status, out_status;\n+\n+\tin_status = readl(c2c_pmu->base[instance] + C2C_IN_STATUS);\n+\tout_status = readl(c2c_pmu->base[instance] + C2C_OUT_STATUS);\n+\n+\tif (in_status || out_status)\n+\t\tdev_warn(c2c_pmu->dev,\n+\t\t\t\"C2C PMU overflow in: 0x%x, out: 0x%x\\n\",\n+\t\t\tin_status, out_status);\n+}\n+\n+static u32 nv_c2c_ctr_offset[C2C_NUM_EVENTS] = {\n+\t[C2C_EVENT_CYCLES] = C2C_CYCLE_CNTR,\n+\t[C2C_EVENT_IN_RD_CUM_OUTS] = C2C_IN_RD_CUM_OUTS_CNTR,\n+\t[C2C_EVENT_IN_RD_REQ] = C2C_IN_RD_REQ_CNTR,\n+\t[C2C_EVENT_IN_WR_CUM_OUTS] = C2C_IN_WR_CUM_OUTS_CNTR,\n+\t[C2C_EVENT_IN_WR_REQ] = C2C_IN_WR_REQ_CNTR,\n+\t[C2C_EVENT_OUT_RD_CUM_OUTS] = C2C_OUT_RD_CUM_OUTS_CNTR,\n+\t[C2C_EVENT_OUT_RD_REQ] = C2C_OUT_RD_REQ_CNTR,\n+\t[C2C_EVENT_OUT_WR_CUM_OUTS] = C2C_OUT_WR_CUM_OUTS_CNTR,\n+\t[C2C_EVENT_OUT_WR_REQ] = C2C_OUT_WR_REQ_CNTR,\n+};\n+\n+static u64 nv_c2c_pmu_read_counter(struct perf_event *event)\n+{\n+\tu32 ctr_id, ctr_offset, filter_mask, filter_idx, inst_idx;\n+\tunsigned long *inst_mask;\n+\tDECLARE_BITMAP(filter_bitmap, C2C_NR_PEER_MAX);\n+\tstruct nv_c2c_pmu *c2c_pmu = to_c2c_pmu(event->pmu);\n+\tu64 val = 0;\n+\n+\tfilter_mask = get_filter_mask(event);\n+\tbitmap_from_arr32(filter_bitmap, &filter_mask, c2c_pmu->nr_peer);\n+\n+\tctr_id = event->hw.config;\n+\tctr_offset = nv_c2c_ctr_offset[ctr_id];\n+\n+\tfor_each_set_bit(filter_idx, filter_bitmap, c2c_pmu->nr_peer) {\n+\t\tinst_mask = c2c_pmu->peer_insts[filter_idx];\n+\t\tfor_each_set_bit(inst_idx, inst_mask, c2c_pmu->nr_inst) {\n+\t\t\tnv_c2c_pmu_check_status(c2c_pmu, inst_idx);\n+\n+\t\t\t/*\n+\t\t\t * Each instance share same clock and the driver always\n+\t\t\t * enables all instances. So we can use the counts from\n+\t\t\t * one instance for cycle counter.\n+\t\t\t */\n+\t\t\tif (ctr_id == C2C_EVENT_CYCLES)\n+\t\t\t\treturn read_reg64_hilohi(\n+\t\t\t\t\tc2c_pmu->base[inst_idx] + ctr_offset,\n+\t\t\t\t\tHILOHI_MAX_POLL);\n+\n+\t\t\t/*\n+\t\t\t * For other events, sum up the counts from all instances.\n+\t\t\t */\n+\t\t\tval += read_reg64_hilohi(\n+\t\t\t\tc2c_pmu->base[inst_idx] + ctr_offset,\n+\t\t\t\tHILOHI_MAX_POLL);\n+\t\t}\n+\t}\n+\n+\treturn val;\n+}\n+\n+static void nv_c2c_pmu_event_update(struct perf_event *event)\n+{\n+\tstruct hw_perf_event *hwc = &event->hw;\n+\tu64 prev, now;\n+\n+\tdo {\n+\t\tprev = local64_read(&hwc->prev_count);\n+\t\tnow = nv_c2c_pmu_read_counter(event);\n+\t} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);\n+\n+\tlocal64_add(now - prev, &event->count);\n+}\n+\n+static void nv_c2c_pmu_start(struct perf_event *event, int pmu_flags)\n+{\n+\tevent->hw.state = 0;\n+}\n+\n+static void nv_c2c_pmu_stop(struct perf_event *event, int pmu_flags)\n+{\n+\tevent->hw.state |= PERF_HES_STOPPED;\n+}\n+\n+static int nv_c2c_pmu_add(struct perf_event *event, int flags)\n+{\n+\tstruct nv_c2c_pmu *c2c_pmu = to_c2c_pmu(event->pmu);\n+\tstruct nv_c2c_pmu_hw_events *hw_events = &c2c_pmu->hw_events;\n+\tstruct hw_perf_event *hwc = &event->hw;\n+\tint idx;\n+\n+\tif (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),\n+\t\t\t\t\t   &c2c_pmu->associated_cpus)))\n+\t\treturn -ENOENT;\n+\n+\tidx = nv_c2c_pmu_get_event_idx(hw_events, event);\n+\tif (idx < 0)\n+\t\treturn idx;\n+\n+\thw_events->events[idx] = event;\n+\thwc->idx = idx;\n+\thwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;\n+\n+\tif (flags & PERF_EF_START)\n+\t\tnv_c2c_pmu_start(event, PERF_EF_RELOAD);\n+\n+\t/* Propagate changes to the userspace mapping. */\n+\tperf_event_update_userpage(event);\n+\n+\treturn 0;\n+}\n+\n+static void nv_c2c_pmu_del(struct perf_event *event, int flags)\n+{\n+\tstruct nv_c2c_pmu *c2c_pmu = to_c2c_pmu(event->pmu);\n+\tstruct nv_c2c_pmu_hw_events *hw_events = &c2c_pmu->hw_events;\n+\tstruct hw_perf_event *hwc = &event->hw;\n+\tint idx = hwc->idx;\n+\n+\tnv_c2c_pmu_stop(event, PERF_EF_UPDATE);\n+\n+\thw_events->events[idx] = NULL;\n+\n+\tclear_bit(idx, hw_events->used_ctrs);\n+\n+\tperf_event_update_userpage(event);\n+}\n+\n+static void nv_c2c_pmu_read(struct perf_event *event)\n+{\n+\tnv_c2c_pmu_event_update(event);\n+}\n+\n+static void nv_c2c_pmu_enable(struct pmu *pmu)\n+{\n+\tvoid __iomem *bcast;\n+\tstruct nv_c2c_pmu *c2c_pmu = to_c2c_pmu(pmu);\n+\n+\t/* Check if any filter is enabled. */\n+\tif (bitmap_empty(c2c_pmu->hw_events.used_ctrs, C2C_MAX_ACTIVE_EVENTS))\n+\t\treturn;\n+\n+\t/* Enable all the counters. */\n+\tbcast = c2c_pmu->base_broadcast;\n+\twritel(0x1UL, bcast + C2C_CTRL);\n+}\n+\n+static void nv_c2c_pmu_disable(struct pmu *pmu)\n+{\n+\tunsigned int idx;\n+\tvoid __iomem *bcast;\n+\tstruct perf_event *event;\n+\tstruct nv_c2c_pmu *c2c_pmu = to_c2c_pmu(pmu);\n+\n+\t/* Disable all the counters. */\n+\tbcast = c2c_pmu->base_broadcast;\n+\twritel(0x0UL, bcast + C2C_CTRL);\n+\n+\t/*\n+\t * The counters will start from 0 again on restart.\n+\t * Update the events immediately to avoid losing the counts.\n+\t */\n+\tfor_each_set_bit(idx, c2c_pmu->hw_events.used_ctrs,\n+\t\t\t C2C_MAX_ACTIVE_EVENTS) {\n+\t\tevent = c2c_pmu->hw_events.events[idx];\n+\n+\t\tif (!event)\n+\t\t\tcontinue;\n+\n+\t\tnv_c2c_pmu_event_update(event);\n+\n+\t\tlocal64_set(&event->hw.prev_count, 0ULL);\n+\t}\n+}\n+\n+/* PMU identifier attribute. */\n+\n+static ssize_t nv_c2c_pmu_identifier_show(struct device *dev,\n+\t\t\t\t\t  struct device_attribute *attr,\n+\t\t\t\t\t  char *page)\n+{\n+\tstruct nv_c2c_pmu *c2c_pmu = to_c2c_pmu(dev_get_drvdata(dev));\n+\n+\treturn sysfs_emit(page, \"%s\\n\", c2c_pmu->identifier);\n+}\n+\n+static struct device_attribute nv_c2c_pmu_identifier_attr =\n+\t__ATTR(identifier, 0444, nv_c2c_pmu_identifier_show, NULL);\n+\n+static struct attribute *nv_c2c_pmu_identifier_attrs[] = {\n+\t&nv_c2c_pmu_identifier_attr.attr,\n+\tNULL,\n+};\n+\n+static struct attribute_group nv_c2c_pmu_identifier_attr_group = {\n+\t.attrs = nv_c2c_pmu_identifier_attrs,\n+};\n+\n+/* Peer attribute. */\n+\n+static ssize_t nv_c2c_pmu_peer_show(struct device *dev,\n+\tstruct device_attribute *attr,\n+\tchar *page)\n+{\n+\tconst char *peer_type[C2C_PEER_TYPE_COUNT] = {\n+\t\t[C2C_PEER_TYPE_CPU] = \"cpu\",\n+\t\t[C2C_PEER_TYPE_GPU] = \"gpu\",\n+\t\t[C2C_PEER_TYPE_CXLMEM] = \"cxlmem\",\n+\t};\n+\n+\tstruct nv_c2c_pmu *c2c_pmu = to_c2c_pmu(dev_get_drvdata(dev));\n+\treturn sysfs_emit(page, \"nr_%s=%u\\n\", peer_type[c2c_pmu->peer_type],\n+\t\tc2c_pmu->nr_peer);\n+}\n+\n+static struct device_attribute nv_c2c_pmu_peer_attr =\n+\t__ATTR(peer, 0444, nv_c2c_pmu_peer_show, NULL);\n+\n+static struct attribute *nv_c2c_pmu_peer_attrs[] = {\n+\t&nv_c2c_pmu_peer_attr.attr,\n+\tNULL,\n+};\n+\n+static struct attribute_group nv_c2c_pmu_peer_attr_group = {\n+\t.attrs = nv_c2c_pmu_peer_attrs,\n+};\n+\n+/* Format attributes. */\n+\n+#define NV_C2C_PMU_EXT_ATTR(_name, _func, _config)\t\t\t\\\n+\t(&((struct dev_ext_attribute[]){\t\t\t\t\\\n+\t\t{\t\t\t\t\t\t\t\\\n+\t\t\t.attr = __ATTR(_name, 0444, _func, NULL),\t\\\n+\t\t\t.var = (void *)_config\t\t\t\t\\\n+\t\t}\t\t\t\t\t\t\t\\\n+\t})[0].attr.attr)\n+\n+#define NV_C2C_PMU_FORMAT_ATTR(_name, _config) \\\n+\tNV_C2C_PMU_EXT_ATTR(_name, device_show_string, _config)\n+\n+#define NV_C2C_PMU_FORMAT_EVENT_ATTR \\\n+\tNV_C2C_PMU_FORMAT_ATTR(event, \"config:0-3\")\n+\n+static struct attribute *nv_c2c_nvlink_pmu_formats[] = {\n+\tNV_C2C_PMU_FORMAT_EVENT_ATTR,\n+\tNV_C2C_PMU_FORMAT_ATTR(gpu_mask, \"config1:0-1\"),\n+\tNULL,\n+};\n+\n+static struct attribute *nv_c2c_pmu_formats[] = {\n+\tNV_C2C_PMU_FORMAT_EVENT_ATTR,\n+\tNULL,\n+};\n+\n+static struct attribute_group *\n+nv_c2c_pmu_alloc_format_attr_group(struct nv_c2c_pmu *c2c_pmu)\n+{\n+\tstruct attribute_group *format_group;\n+\tstruct device *dev = c2c_pmu->dev;\n+\n+\tformat_group =\n+\t\tdevm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);\n+\tif (!format_group)\n+\t\treturn NULL;\n+\n+\tformat_group->name = \"format\";\n+\tformat_group->attrs = c2c_pmu->formats;\n+\n+\treturn format_group;\n+}\n+\n+/* Event attributes. */\n+\n+static ssize_t nv_c2c_pmu_sysfs_event_show(struct device *dev,\n+\t\t\t\t\t   struct device_attribute *attr,\n+\t\t\t\t\t   char *buf)\n+{\n+\tstruct perf_pmu_events_attr *pmu_attr;\n+\n+\tpmu_attr = container_of(attr, typeof(*pmu_attr), attr);\n+\treturn sysfs_emit(buf, \"event=0x%llx\\n\", pmu_attr->id);\n+}\n+\n+#define NV_C2C_PMU_EVENT_ATTR(_name, _config)\t\\\n+\tPMU_EVENT_ATTR_ID(_name, nv_c2c_pmu_sysfs_event_show, _config)\n+\n+static struct attribute *nv_c2c_pmu_events[] = {\n+\tNV_C2C_PMU_EVENT_ATTR(cycles, C2C_EVENT_CYCLES),\n+\tNV_C2C_PMU_EVENT_ATTR(in_rd_cum_outs, C2C_EVENT_IN_RD_CUM_OUTS),\n+\tNV_C2C_PMU_EVENT_ATTR(in_rd_req, C2C_EVENT_IN_RD_REQ),\n+\tNV_C2C_PMU_EVENT_ATTR(in_wr_cum_outs, C2C_EVENT_IN_WR_CUM_OUTS),\n+\tNV_C2C_PMU_EVENT_ATTR(in_wr_req, C2C_EVENT_IN_WR_REQ),\n+\tNV_C2C_PMU_EVENT_ATTR(out_rd_cum_outs, C2C_EVENT_OUT_RD_CUM_OUTS),\n+\tNV_C2C_PMU_EVENT_ATTR(out_rd_req, C2C_EVENT_OUT_RD_REQ),\n+\tNV_C2C_PMU_EVENT_ATTR(out_wr_cum_outs, C2C_EVENT_OUT_WR_CUM_OUTS),\n+\tNV_C2C_PMU_EVENT_ATTR(out_wr_req, C2C_EVENT_OUT_WR_REQ),\n+\tNULL\n+};\n+\n+static umode_t\n+nv_c2c_pmu_event_attr_is_visible(struct kobject *kobj, struct attribute *attr,\n+\t\t\t\t int unused)\n+{\n+\tstruct device *dev = kobj_to_dev(kobj);\n+\tstruct nv_c2c_pmu *c2c_pmu = to_c2c_pmu(dev_get_drvdata(dev));\n+\tstruct perf_pmu_events_attr *eattr;\n+\n+\teattr = container_of(attr, typeof(*eattr), attr.attr);\n+\n+\tif (c2c_pmu->c2c_type == C2C_TYPE_NVDLINK) {\n+\t\t/* Only incoming reads are available. */\n+\t\tswitch (eattr->id) {\n+\t\tcase C2C_EVENT_IN_WR_CUM_OUTS:\n+\t\tcase C2C_EVENT_IN_WR_REQ:\n+\t\tcase C2C_EVENT_OUT_RD_CUM_OUTS:\n+\t\tcase C2C_EVENT_OUT_RD_REQ:\n+\t\tcase C2C_EVENT_OUT_WR_CUM_OUTS:\n+\t\tcase C2C_EVENT_OUT_WR_REQ:\n+\t\t\treturn 0;\n+\t\tdefault:\n+\t\t\treturn attr->mode;\n+\t\t}\n+\t} else {\n+\t\t/* Hide the write events if C2C connected to another SoC. */\n+\t\tif (c2c_pmu->peer_type == C2C_PEER_TYPE_CPU) {\n+\t\t\tswitch (eattr->id) {\n+\t\t\tcase C2C_EVENT_IN_WR_CUM_OUTS:\n+\t\t\tcase C2C_EVENT_IN_WR_REQ:\n+\t\t\tcase C2C_EVENT_OUT_WR_CUM_OUTS:\n+\t\t\tcase C2C_EVENT_OUT_WR_REQ:\n+\t\t\t\treturn 0;\n+\t\t\tdefault:\n+\t\t\t\treturn attr->mode;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn attr->mode;\n+}\n+\n+static const struct attribute_group nv_c2c_pmu_events_group = {\n+\t.name = \"events\",\n+\t.attrs = nv_c2c_pmu_events,\n+\t.is_visible = nv_c2c_pmu_event_attr_is_visible,\n+};\n+\n+/* Cpumask attributes. */\n+\n+static ssize_t nv_c2c_pmu_cpumask_show(struct device *dev,\n+\t\t\t\t       struct device_attribute *attr, char *buf)\n+{\n+\tstruct pmu *pmu = dev_get_drvdata(dev);\n+\tstruct nv_c2c_pmu *c2c_pmu = to_c2c_pmu(pmu);\n+\tstruct dev_ext_attribute *eattr =\n+\t\tcontainer_of(attr, struct dev_ext_attribute, attr);\n+\tunsigned long mask_id = (unsigned long)eattr->var;\n+\tconst cpumask_t *cpumask;\n+\n+\tswitch (mask_id) {\n+\tcase C2C_ACTIVE_CPU_MASK:\n+\t\tcpumask = &c2c_pmu->active_cpu;\n+\t\tbreak;\n+\tcase C2C_ASSOCIATED_CPU_MASK:\n+\t\tcpumask = &c2c_pmu->associated_cpus;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn 0;\n+\t}\n+\treturn cpumap_print_to_pagebuf(true, buf, cpumask);\n+}\n+\n+#define NV_C2C_PMU_CPUMASK_ATTR(_name, _config)\t\t\t\\\n+\tNV_C2C_PMU_EXT_ATTR(_name, nv_c2c_pmu_cpumask_show,\t\\\n+\t\t\t\t(unsigned long)_config)\n+\n+static struct attribute *nv_c2c_pmu_cpumask_attrs[] = {\n+\tNV_C2C_PMU_CPUMASK_ATTR(cpumask, C2C_ACTIVE_CPU_MASK),\n+\tNV_C2C_PMU_CPUMASK_ATTR(associated_cpus, C2C_ASSOCIATED_CPU_MASK),\n+\tNULL,\n+};\n+\n+static const struct attribute_group nv_c2c_pmu_cpumask_attr_group = {\n+\t.attrs = nv_c2c_pmu_cpumask_attrs,\n+};\n+\n+/* Per PMU device attribute groups. */\n+\n+static int nv_c2c_pmu_alloc_attr_groups(struct nv_c2c_pmu *c2c_pmu)\n+{\n+\tconst struct attribute_group **attr_groups = c2c_pmu->attr_groups;\n+\n+\tattr_groups[0] = nv_c2c_pmu_alloc_format_attr_group(c2c_pmu);\n+\tattr_groups[1] = &nv_c2c_pmu_events_group;\n+\tattr_groups[2] = &nv_c2c_pmu_cpumask_attr_group;\n+\tattr_groups[3] = &nv_c2c_pmu_identifier_attr_group;\n+\tattr_groups[4] = &nv_c2c_pmu_peer_attr_group;\n+\n+\tif (!attr_groups[0])\n+\t\treturn -ENOMEM;\n+\n+\treturn 0;\n+}\n+\n+static int nv_c2c_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)\n+{\n+\tstruct nv_c2c_pmu *c2c_pmu =\n+\t\thlist_entry_safe(node, struct nv_c2c_pmu, cpuhp_node);\n+\n+\tif (!cpumask_test_cpu(cpu, &c2c_pmu->associated_cpus))\n+\t\treturn 0;\n+\n+\t/* If the PMU is already managed, there is nothing to do */\n+\tif (!cpumask_empty(&c2c_pmu->active_cpu))\n+\t\treturn 0;\n+\n+\t/* Use this CPU for event counting */\n+\tcpumask_set_cpu(cpu, &c2c_pmu->active_cpu);\n+\n+\treturn 0;\n+}\n+\n+static int nv_c2c_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)\n+{\n+\tunsigned int dst;\n+\n+\tstruct nv_c2c_pmu *c2c_pmu =\n+\t\thlist_entry_safe(node, struct nv_c2c_pmu, cpuhp_node);\n+\n+\t/* Nothing to do if this CPU doesn't own the PMU */\n+\tif (!cpumask_test_and_clear_cpu(cpu, &c2c_pmu->active_cpu))\n+\t\treturn 0;\n+\n+\t/* Choose a new CPU to migrate ownership of the PMU to */\n+\tdst = cpumask_any_and_but(&c2c_pmu->associated_cpus,\n+\t\t\t\t  cpu_online_mask, cpu);\n+\tif (dst >= nr_cpu_ids)\n+\t\treturn 0;\n+\n+\t/* Use this CPU for event counting */\n+\tperf_pmu_migrate_context(&c2c_pmu->pmu, cpu, dst);\n+\tcpumask_set_cpu(dst, &c2c_pmu->active_cpu);\n+\n+\treturn 0;\n+}\n+\n+static int nv_c2c_pmu_get_cpus(struct nv_c2c_pmu *c2c_pmu)\n+{\n+\tint ret = 0, socket = c2c_pmu->socket, cpu;\n+\n+\tfor_each_possible_cpu(cpu) {\n+\t\tif (cpu_to_node(cpu) == socket)\n+\t\t\tcpumask_set_cpu(cpu, &c2c_pmu->associated_cpus);\n+\t}\n+\n+\tif (cpumask_empty(&c2c_pmu->associated_cpus)) {\n+\t\tdev_dbg(c2c_pmu->dev,\n+\t\t\t\"No cpu associated with C2C PMU socket-%u\\n\", socket);\n+\t\tret = -ENODEV;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static int nv_c2c_pmu_init_socket(struct nv_c2c_pmu *c2c_pmu)\n+{\n+\tconst char *uid_str;\n+\tint ret, socket;\n+\n+\tuid_str = acpi_device_uid(c2c_pmu->acpi_dev);\n+\tif (!uid_str) {\n+\t\tret = -ENODEV;\n+\t\tgoto fail;\n+\t}\n+\n+\tret = kstrtou32(uid_str, 0, &socket);\n+\tif (ret)\n+\t\tgoto fail;\n+\n+\tc2c_pmu->socket = socket;\n+\treturn 0;\n+\n+fail:\n+\tdev_err(c2c_pmu->dev, \"Failed to initialize socket\\n\");\n+\treturn ret;\n+}\n+\n+static int nv_c2c_pmu_init_id(struct nv_c2c_pmu *c2c_pmu)\n+{\n+\tconst char *name_fmt[C2C_TYPE_COUNT] = {\n+\t\t[C2C_TYPE_NVLINK] = \"nvidia_nvlink_c2c_pmu_%u\",\n+\t\t[C2C_TYPE_NVCLINK] = \"nvidia_nvclink_pmu_%u\",\n+\t\t[C2C_TYPE_NVDLINK] = \"nvidia_nvdlink_pmu_%u\",\n+\t};\n+\n+\tchar *name;\n+\tint ret;\n+\n+\tname = devm_kasprintf(c2c_pmu->dev, GFP_KERNEL,\n+\t\tname_fmt[c2c_pmu->c2c_type], c2c_pmu->socket);\n+\tif (!name) {\n+\t\tret = -ENOMEM;\n+\t\tgoto fail;\n+\t}\n+\n+\tc2c_pmu->name = name;\n+\n+\tc2c_pmu->identifier = acpi_device_hid(c2c_pmu->acpi_dev);\n+\n+\treturn 0;\n+\n+fail:\n+\tdev_err(c2c_pmu->dev, \"Failed to initialize name\\n\");\n+\treturn ret;\n+}\n+\n+static int nv_c2c_pmu_init_filter(struct nv_c2c_pmu *c2c_pmu)\n+{\n+\tu32 cpu_en = 0;\n+\tstruct device *dev = c2c_pmu->dev;\n+\n+\tif (c2c_pmu->c2c_type == C2C_TYPE_NVDLINK) {\n+\t\tc2c_pmu->peer_type = C2C_PEER_TYPE_CXLMEM;\n+\n+\t\tc2c_pmu->nr_inst = C2C_NR_INST_NVDLINK;\n+\t\tc2c_pmu->peer_insts[0][0] = (1UL << c2c_pmu->nr_inst) - 1;\n+\n+\t\tc2c_pmu->nr_peer = C2C_NR_PEER_CXLMEM;\n+\t\tc2c_pmu->filter_default = (1 << c2c_pmu->nr_peer) - 1;\n+\n+\t\tc2c_pmu->formats = nv_c2c_pmu_formats;\n+\n+\t\treturn 0;\n+\t}\n+\n+\tc2c_pmu->nr_inst = (c2c_pmu->c2c_type == C2C_TYPE_NVLINK) ?\n+\t\tC2C_NR_INST_NVLINK : C2C_NR_INST_NVCLINK;\n+\n+\tif (device_property_read_u32(dev, \"cpu_en_mask\", &cpu_en))\n+\t\tdev_dbg(dev, \"no cpu_en_mask property\\n\");\n+\n+\tif (cpu_en) {\n+\t\tc2c_pmu->peer_type = C2C_PEER_TYPE_CPU;\n+\n+\t\t/* Fill peer_insts bitmap with instances connected to peer CPU. */\n+\t\tbitmap_from_arr32(c2c_pmu->peer_insts[0], &cpu_en,\n+\t\t\t\tc2c_pmu->nr_inst);\n+\n+\t\tc2c_pmu->nr_peer = 1;\n+\t\tc2c_pmu->formats = nv_c2c_pmu_formats;\n+\t} else {\n+\t\tu32 i;\n+\t\tconst char *props[C2C_NR_PEER_MAX] = {\n+\t\t\t\"gpu0_en_mask\", \"gpu1_en_mask\"\n+\t\t};\n+\n+\t\tfor (i = 0; i < C2C_NR_PEER_MAX; i++) {\n+\t\t\tu32 gpu_en = 0;\n+\n+\t\t\tif (device_property_read_u32(dev, props[i], &gpu_en))\n+\t\t\t\tdev_dbg(dev, \"no %s property\\n\", props[i]);\n+\n+\t\t\tif (gpu_en) {\n+\t\t\t\t/* Fill peer_insts bitmap with instances connected to peer GPU. */\n+\t\t\t\tbitmap_from_arr32(c2c_pmu->peer_insts[i], &gpu_en,\n+\t\t\t\t\t\tc2c_pmu->nr_inst);\n+\n+\t\t\t\tc2c_pmu->nr_peer++;\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (c2c_pmu->nr_peer == 0) {\n+\t\t\tdev_err(dev, \"No GPU is enabled\\n\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tc2c_pmu->peer_type = C2C_PEER_TYPE_GPU;\n+\t\tc2c_pmu->formats = nv_c2c_nvlink_pmu_formats;\n+\t}\n+\n+\tc2c_pmu->filter_default = (1 << c2c_pmu->nr_peer) - 1;\n+\n+\treturn 0;\n+}\n+\n+static void *nv_c2c_pmu_init_pmu(struct platform_device *pdev)\n+{\n+\tint ret;\n+\tstruct nv_c2c_pmu *c2c_pmu;\n+\tstruct acpi_device *acpi_dev;\n+\tstruct device *dev = &pdev->dev;\n+\n+\tacpi_dev = ACPI_COMPANION(dev);\n+\tif (!acpi_dev)\n+\t\treturn ERR_PTR(-ENODEV);\n+\n+\tc2c_pmu = devm_kzalloc(dev, sizeof(*c2c_pmu), GFP_KERNEL);\n+\tif (!c2c_pmu)\n+\t\treturn ERR_PTR(-ENOMEM);\n+\n+\tc2c_pmu->dev = dev;\n+\tc2c_pmu->acpi_dev = acpi_dev;\n+\tc2c_pmu->c2c_type = (unsigned int)(unsigned long)device_get_match_data(dev);\n+\tplatform_set_drvdata(pdev, c2c_pmu);\n+\n+\tret = nv_c2c_pmu_init_socket(c2c_pmu);\n+\tif (ret)\n+\t\tgoto done;\n+\n+\tret = nv_c2c_pmu_init_id(c2c_pmu);\n+\tif (ret)\n+\t\tgoto done;\n+\n+\tret = nv_c2c_pmu_init_filter(c2c_pmu);\n+\tif (ret)\n+\t\tgoto done;\n+\n+done:\n+\tif (ret)\n+\t\treturn ERR_PTR(ret);\n+\n+\treturn c2c_pmu;\n+}\n+\n+static int nv_c2c_pmu_init_mmio(struct nv_c2c_pmu *c2c_pmu)\n+{\n+\tint i;\n+\tstruct device *dev = c2c_pmu->dev;\n+\tstruct platform_device *pdev = to_platform_device(dev);\n+\n+\t/* Map the address of all the instances. */\n+\tfor (i = 0; i < c2c_pmu->nr_inst; i++) {\n+\t\tc2c_pmu->base[i] = devm_platform_ioremap_resource(pdev, i);\n+\t\tif (IS_ERR(c2c_pmu->base[i])) {\n+\t\t\tdev_err(dev, \"Failed map address for instance %d\\n\", i);\n+\t\t\treturn PTR_ERR(c2c_pmu->base[i]);\n+\t\t}\n+\t}\n+\n+\t/* Map broadcast address. */\n+\tc2c_pmu->base_broadcast = devm_platform_ioremap_resource(pdev,\n+\t\t\t\t\t\t\t\t c2c_pmu->nr_inst);\n+\tif (IS_ERR(c2c_pmu->base_broadcast)) {\n+\t\tdev_err(dev, \"Failed map broadcast address\\n\");\n+\t\treturn PTR_ERR(c2c_pmu->base_broadcast);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int nv_c2c_pmu_register_pmu(struct nv_c2c_pmu *c2c_pmu)\n+{\n+\tint ret;\n+\n+\tret = cpuhp_state_add_instance(nv_c2c_pmu_cpuhp_state,\n+\t\t\t\t       &c2c_pmu->cpuhp_node);\n+\tif (ret) {\n+\t\tdev_err(c2c_pmu->dev, \"Error %d registering hotplug\\n\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tc2c_pmu->pmu = (struct pmu) {\n+\t\t.parent\t\t= c2c_pmu->dev,\n+\t\t.task_ctx_nr\t= perf_invalid_context,\n+\t\t.pmu_enable\t= nv_c2c_pmu_enable,\n+\t\t.pmu_disable\t= nv_c2c_pmu_disable,\n+\t\t.event_init\t= nv_c2c_pmu_event_init,\n+\t\t.add\t\t= nv_c2c_pmu_add,\n+\t\t.del\t\t= nv_c2c_pmu_del,\n+\t\t.start\t\t= nv_c2c_pmu_start,\n+\t\t.stop\t\t= nv_c2c_pmu_stop,\n+\t\t.read\t\t= nv_c2c_pmu_read,\n+\t\t.attr_groups\t= c2c_pmu->attr_groups,\n+\t\t.capabilities\t= PERF_PMU_CAP_NO_EXCLUDE |\n+\t\t\t\t\tPERF_PMU_CAP_NO_INTERRUPT,\n+\t};\n+\n+\tret = perf_pmu_register(&c2c_pmu->pmu, c2c_pmu->name, -1);\n+\tif (ret) {\n+\t\tdev_err(c2c_pmu->dev, \"Failed to register C2C PMU: %d\\n\", ret);\n+\t\tcpuhp_state_remove_instance(nv_c2c_pmu_cpuhp_state,\n+\t\t\t\t\t  &c2c_pmu->cpuhp_node);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int nv_c2c_pmu_probe(struct platform_device *pdev)\n+{\n+\tint ret;\n+\tstruct nv_c2c_pmu *c2c_pmu;\n+\n+\tc2c_pmu = nv_c2c_pmu_init_pmu(pdev);\n+\tif (IS_ERR(c2c_pmu))\n+\t\treturn PTR_ERR(c2c_pmu);\n+\n+\tret = nv_c2c_pmu_init_mmio(c2c_pmu);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tret = nv_c2c_pmu_get_cpus(c2c_pmu);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tret = nv_c2c_pmu_alloc_attr_groups(c2c_pmu);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tret = nv_c2c_pmu_register_pmu(c2c_pmu);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tdev_dbg(c2c_pmu->dev, \"Registered %s PMU\\n\", c2c_pmu->name);\n+\n+\treturn 0;\n+}\n+\n+static void nv_c2c_pmu_device_remove(struct platform_device *pdev)\n+{\n+\tstruct nv_c2c_pmu *c2c_pmu = platform_get_drvdata(pdev);\n+\n+\tperf_pmu_unregister(&c2c_pmu->pmu);\n+\tcpuhp_state_remove_instance(nv_c2c_pmu_cpuhp_state, &c2c_pmu->cpuhp_node);\n+}\n+\n+static const struct acpi_device_id nv_c2c_pmu_acpi_match[] = {\n+\t{ \"NVDA2023\", (kernel_ulong_t)C2C_TYPE_NVLINK },\n+\t{ \"NVDA2022\", (kernel_ulong_t)C2C_TYPE_NVCLINK },\n+\t{ \"NVDA2020\", (kernel_ulong_t)C2C_TYPE_NVDLINK },\n+\t{ }\n+};\n+MODULE_DEVICE_TABLE(acpi, nv_c2c_pmu_acpi_match);\n+\n+static struct platform_driver nv_c2c_pmu_driver = {\n+\t.driver = {\n+\t\t.name = \"nvidia-t410-c2c-pmu\",\n+\t\t.acpi_match_table = ACPI_PTR(nv_c2c_pmu_acpi_match),\n+\t\t.suppress_bind_attrs = true,\n+\t},\n+\t.probe = nv_c2c_pmu_probe,\n+\t.remove = nv_c2c_pmu_device_remove,\n+};\n+\n+static int __init nv_c2c_pmu_init(void)\n+{\n+\tint ret;\n+\n+\tret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,\n+\t\t\t\t      \"perf/nvidia/c2c:online\",\n+\t\t\t\t      nv_c2c_pmu_online_cpu,\n+\t\t\t\t      nv_c2c_pmu_cpu_teardown);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tnv_c2c_pmu_cpuhp_state = ret;\n+\treturn platform_driver_register(&nv_c2c_pmu_driver);\n+}\n+\n+static void __exit nv_c2c_pmu_exit(void)\n+{\n+\tplatform_driver_unregister(&nv_c2c_pmu_driver);\n+\tcpuhp_remove_multi_state(nv_c2c_pmu_cpuhp_state);\n+}\n+\n+module_init(nv_c2c_pmu_init);\n+module_exit(nv_c2c_pmu_exit);\n+\n+MODULE_LICENSE(\"GPL\");\n+MODULE_DESCRIPTION(\"NVIDIA Tegra410 C2C PMU driver\");\n+MODULE_AUTHOR(\"Besar Wicaksono <bwicaksono@nvidia.com>\");\n",
    "prefixes": [
        "v2",
        "7/8"
    ]
}