Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/1.2/patches/833363/?format=api
{ "id": 833363, "url": "http://patchwork.ozlabs.org/api/1.2/patches/833363/?format=api", "web_url": "http://patchwork.ozlabs.org/project/ubuntu-kernel/patch/20171102140501.87671-5-gwalbon@linux.vnet.ibm.com/", "project": { "id": 15, "url": "http://patchwork.ozlabs.org/api/1.2/projects/15/?format=api", "name": "Ubuntu Kernel", "link_name": "ubuntu-kernel", "list_id": "kernel-team.lists.ubuntu.com", "list_email": "kernel-team@lists.ubuntu.com", "web_url": null, "scm_url": null, "webscm_url": null, "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20171102140501.87671-5-gwalbon@linux.vnet.ibm.com>", "list_archive_url": null, "date": "2017-11-02T14:04:53", "name": "[Artful,04/12] powerpc/perf: Add core IMC PMU support", "commit_ref": null, "pull_url": null, "state": "new", "archived": false, "hash": "d20a3d9340326a8dbcd31c71c549b14064d7af6c", "submitter": { "id": 71662, "url": "http://patchwork.ozlabs.org/api/1.2/people/71662/?format=api", "name": "Gustavo Walbon", "email": "gwalbon@linux.vnet.ibm.com" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/ubuntu-kernel/patch/20171102140501.87671-5-gwalbon@linux.vnet.ibm.com/mbox/", "series": [ { "id": 11509, "url": "http://patchwork.ozlabs.org/api/1.2/series/11509/?format=api", "web_url": "http://patchwork.ozlabs.org/project/ubuntu-kernel/list/?series=11509", "date": "2017-11-02T14:04:50", "name": "Backport for Power9 Nest PMU Instrumentation", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/11509/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/833363/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/833363/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<kernel-team-bounces@lists.ubuntu.com>", "X-Original-To": "incoming@patchwork.ozlabs.org", "Delivered-To": "patchwork-incoming@bilbo.ozlabs.org", "Authentication-Results": "ozlabs.org;\n\tspf=none (mailfrom) smtp.mailfrom=lists.ubuntu.com\n\t(client-ip=91.189.94.19; helo=huckleberry.canonical.com;\n\tenvelope-from=kernel-team-bounces@lists.ubuntu.com;\n\treceiver=<UNKNOWN>)", "Received": [ "from huckleberry.canonical.com (huckleberry.canonical.com\n\t[91.189.94.19])\n\tby ozlabs.org (Postfix) with ESMTP id 3ySRgS1BdQz9t3H;\n\tFri, 3 Nov 2017 01:05:24 +1100 (AEDT)", "from localhost ([127.0.0.1] helo=huckleberry.canonical.com)\n\tby huckleberry.canonical.com with esmtp (Exim 4.86_2)\n\t(envelope-from <kernel-team-bounces@lists.ubuntu.com>)\n\tid 1eAG7T-0002At-Pa; Thu, 02 Nov 2017 14:05:19 +0000", "from mx0b-001b2d01.pphosted.com ([148.163.158.5]\n\thelo=mx0a-001b2d01.pphosted.com)\n\tby huckleberry.canonical.com with esmtps\n\t(TLS1.2:ECDHE_RSA_AES_128_GCM_SHA256:128) (Exim 4.86_2)\n\t(envelope-from <gwalbon@linux.vnet.ibm.com>) id 1eAG7Q-00028r-3U\n\tfor kernel-team@lists.ubuntu.com; Thu, 02 Nov 2017 14:05:16 +0000", "from pps.filterd (m0098420.ppops.net [127.0.0.1])\n\tby mx0b-001b2d01.pphosted.com (8.16.0.21/8.16.0.21) with SMTP id\n\tvA2E4UBr103140\n\tfor <kernel-team@lists.ubuntu.com>; Thu, 2 Nov 2017 10:05:15 -0400", "from e12.ny.us.ibm.com (e12.ny.us.ibm.com [129.33.205.202])\n\tby mx0b-001b2d01.pphosted.com with ESMTP id 2e0264jy56-1\n\t(version=TLSv1.2 cipher=AES256-SHA bits=256 verify=NOT)\n\tfor <kernel-team@lists.ubuntu.com>; Thu, 02 Nov 2017 10:05:14 -0400", "from localhost\n\tby e12.ny.us.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use\n\tOnly! Violators will be prosecuted\n\tfor <kernel-team@lists.ubuntu.com> from <gwalbon@linux.vnet.ibm.com>; \n\tThu, 2 Nov 2017 10:05:14 -0400", "from b01cxnp23033.gho.pok.ibm.com (9.57.198.28)\n\tby e12.ny.us.ibm.com (146.89.104.199) with IBM ESMTP SMTP Gateway:\n\tAuthorized Use Only! Violators will be prosecuted; \n\tThu, 2 Nov 2017 10:05:12 -0400", "from b01ledav005.gho.pok.ibm.com (b01ledav005.gho.pok.ibm.com\n\t[9.57.199.110])\n\tby b01cxnp23033.gho.pok.ibm.com (8.14.9/8.14.9/NCO v10.0) with ESMTP\n\tid vA2E5CL355771184; Thu, 2 Nov 2017 14:05:12 GMT", "from b01ledav005.gho.pok.ibm.com (unknown [127.0.0.1])\n\tby IMSVA (Postfix) with ESMTP id A9805AE04B;\n\tThu, 2 Nov 2017 10:05:58 -0400 (EDT)", "from localhost (unknown [9.85.138.60])\n\tby b01ledav005.gho.pok.ibm.com (Postfix) with ESMTP id D627BAE03B;\n\tThu, 2 Nov 2017 10:05:57 -0400 (EDT)" ], "From": "Gustavo Walbon <gwalbon@linux.vnet.ibm.com>", "To": "kernel-team@lists.ubuntu.com", "Subject": "[Artful][PATCH 04/12] powerpc/perf: Add core IMC PMU support", "Date": "Thu, 2 Nov 2017 12:04:53 -0200", "X-Mailer": "git-send-email 2.13.3", "In-Reply-To": "<20171102140501.87671-1-gwalbon@linux.vnet.ibm.com>", "References": "<20171102140501.87671-1-gwalbon@linux.vnet.ibm.com>", "X-TM-AS-GCONF": "00", "x-cbid": "17110214-0048-0000-0000-000001FF6930", "X-IBM-SpamModules-Scores": "", "X-IBM-SpamModules-Versions": "BY=3.00007997; HX=3.00000241; KW=3.00000007;\n\tPH=3.00000004; SC=3.00000239; SDB=6.00940090; UDB=6.00474019;\n\tIPR=6.00720346; \n\tBA=6.00005666; NDR=6.00000001; ZLA=6.00000005; ZF=6.00000009;\n\tZB=6.00000000; \n\tZP=6.00000000; ZH=6.00000000; ZU=6.00000002; MB=3.00017836;\n\tXFM=3.00000015; UTC=2017-11-02 14:05:13", "X-IBM-AV-DETECTION": "SAVI=unused REMOTE=unused XFE=unused", "x-cbparentid": "17110214-0049-0000-0000-0000430E79E9", "Message-Id": "<20171102140501.87671-5-gwalbon@linux.vnet.ibm.com>", "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10432:, ,\n\tdefinitions=2017-11-02_05:, , signatures=0", "X-Proofpoint-Spam-Details": "rule=outbound_notspam policy=outbound score=0\n\tpriorityscore=1501\n\tmalwarescore=0 suspectscore=3 phishscore=0 bulkscore=0 spamscore=0\n\tclxscore=1015 lowpriorityscore=0 impostorscore=0 adultscore=0\n\tclassifier=spam adjust=0 reason=mlx scancount=1\n\tengine=8.0.1-1707230000\n\tdefinitions=main-1711020178", "X-BeenThere": "kernel-team@lists.ubuntu.com", "X-Mailman-Version": "2.1.20", "Precedence": "list", "List-Id": "Kernel team discussions <kernel-team.lists.ubuntu.com>", "List-Unsubscribe": "<https://lists.ubuntu.com/mailman/options/kernel-team>,\n\t<mailto:kernel-team-request@lists.ubuntu.com?subject=unsubscribe>", "List-Archive": "<https://lists.ubuntu.com/archives/kernel-team>", "List-Post": "<mailto:kernel-team@lists.ubuntu.com>", "List-Help": "<mailto:kernel-team-request@lists.ubuntu.com?subject=help>", "List-Subscribe": "<https://lists.ubuntu.com/mailman/listinfo/kernel-team>,\n\t<mailto:kernel-team-request@lists.ubuntu.com?subject=subscribe>", "MIME-Version": "1.0", "Content-Type": "text/plain; charset=\"utf-8\"", "Content-Transfer-Encoding": "base64", "Errors-To": "kernel-team-bounces@lists.ubuntu.com", "Sender": "\"kernel-team\" <kernel-team-bounces@lists.ubuntu.com>" }, "content": "From: Anju T Sudhakar <anju@linux.vnet.ibm.com>\n\nBugLink: https://bugs.launchpad.net/bugs/1481347\n\nAdd support to register Core In-Memory Collection PMU counters.\nPatch adds core IMC specific data structures, along with memory\ninit functions and CPU hotplug support.\n\nSigned-off-by: Anju T Sudhakar <anju@linux.vnet.ibm.com>\nSigned-off-by: Hemant Kumar <hemant@linux.vnet.ibm.com>\nSigned-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>\nSigned-off-by: Michael Ellerman <mpe@ellerman.id.au>\n(cherry-picked from 39a846db1d574a498511ffccd75223a35cdcb059)\nSigned-off-by: Gustavo Walbon <gwalbon@linux.vnet.ibm.com>\n---\n arch/powerpc/perf/imc-pmu.c | 303 +++++++++++++++++++++++++++++++++++++++++++-\n include/linux/cpuhotplug.h | 1 +\n 2 files changed, 300 insertions(+), 4 deletions(-)", "diff": "diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c\nindex 4543faa1bb0d..482f8d6d5e65 100644\n--- a/arch/powerpc/perf/imc-pmu.c\n+++ b/arch/powerpc/perf/imc-pmu.c\n@@ -31,6 +31,12 @@ static cpumask_t nest_imc_cpumask;\n struct imc_pmu_ref *nest_imc_refc;\n static int nest_pmus;\n \n+/* Core IMC data structures and variables */\n+\n+static cpumask_t core_imc_cpumask;\n+struct imc_pmu_ref *core_imc_refc;\n+static struct imc_pmu *core_imc_pmu;\n+\n struct imc_pmu *imc_event_to_pmu(struct perf_event *event)\n {\n \treturn container_of(event->pmu, struct imc_pmu, pmu);\n@@ -62,11 +68,13 @@ static ssize_t imc_pmu_cpumask_get_attr(struct device *dev,\n \tstruct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu);\n \tcpumask_t *active_mask;\n \n-\t/* Subsequenct patch will add more pmu types here */\n \tswitch(imc_pmu->domain){\n \tcase IMC_DOMAIN_NEST:\n \t\tactive_mask = &nest_imc_cpumask;\n \t\tbreak;\n+\tcase IMC_DOMAIN_CORE:\n+\t\tactive_mask = &core_imc_cpumask;\n+\t\tbreak;\n \tdefault:\n \t\treturn 0;\n \t}\n@@ -486,6 +494,240 @@ static int nest_imc_event_init(struct perf_event *event)\n \treturn 0;\n }\n \n+/*\n+ * core_imc_mem_init : Initializes memory for the current core.\n+ *\n+ * Uses alloc_pages_node() and uses the returned address as an argument to\n+ * an opal call to configure the pdbar. The address sent as an argument is\n+ * converted to physical address before the opal call is made. This is the\n+ * base address at which the core imc counters are populated.\n+ */\n+static int core_imc_mem_init(int cpu, int size)\n+{\n+\tint phys_id, rc = 0, core_id = (cpu / threads_per_core);\n+\tstruct imc_mem_info *mem_info;\n+\n+\t/*\n+\t * alloc_pages_node() will allocate memory for core in the\n+\t * local node only.\n+\t */\n+\tphys_id = topology_physical_package_id(cpu);\n+\tmem_info = &core_imc_pmu->mem_info[core_id];\n+\tmem_info->id = core_id;\n+\n+\t/* We need only vbase for core counters */\n+\tmem_info->vbase = page_address(alloc_pages_node(phys_id,\n+\t\t\t\t\t GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,\n+\t\t\t\t\t get_order(size)));\n+\tif (!mem_info->vbase)\n+\t\treturn -ENOMEM;\n+\n+\t/* Init the mutex */\n+\tcore_imc_refc[core_id].id = core_id;\n+\tmutex_init(&core_imc_refc[core_id].lock);\n+\n+\trc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,\n+\t\t\t\t__pa((void *)mem_info->vbase),\n+\t\t\t\tget_hard_smp_processor_id(cpu));\n+\tif (rc) {\n+\t\tfree_pages((u64)mem_info->vbase, get_order(size));\n+\t\tmem_info->vbase = NULL;\n+\t}\n+\n+\treturn rc;\n+}\n+\n+static bool is_core_imc_mem_inited(int cpu)\n+{\n+\tstruct imc_mem_info *mem_info;\n+\tint core_id = (cpu / threads_per_core);\n+\n+\tmem_info = &core_imc_pmu->mem_info[core_id];\n+\tif (!mem_info->vbase)\n+\t\treturn false;\n+\n+\treturn true;\n+}\n+\n+static int ppc_core_imc_cpu_online(unsigned int cpu)\n+{\n+\tconst struct cpumask *l_cpumask;\n+\tstatic struct cpumask tmp_mask;\n+\tint ret = 0;\n+\n+\t/* Get the cpumask for this core */\n+\tl_cpumask = cpu_sibling_mask(cpu);\n+\n+\t/* If a cpu for this core is already set, then, don't do anything */\n+\tif (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask))\n+\t\treturn 0;\n+\n+\tif (!is_core_imc_mem_inited(cpu)) {\n+\t\tret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size);\n+\t\tif (ret) {\n+\t\t\tpr_info(\"core_imc memory allocation for cpu %d failed\\n\", cpu);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\t/* set the cpu in the mask */\n+\tcpumask_set_cpu(cpu, &core_imc_cpumask);\n+\treturn 0;\n+}\n+\n+static int ppc_core_imc_cpu_offline(unsigned int cpu)\n+{\n+\tunsigned int ncpu, core_id;\n+\tstruct imc_pmu_ref *ref;\n+\n+\t/*\n+\t * clear this cpu out of the mask, if not present in the mask,\n+\t * don't bother doing anything.\n+\t */\n+\tif (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask))\n+\t\treturn 0;\n+\n+\t/* Find any online cpu in that core except the current \"cpu\" */\n+\tncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);\n+\n+\tif (ncpu >= 0 && ncpu < nr_cpu_ids) {\n+\t\tcpumask_set_cpu(ncpu, &core_imc_cpumask);\n+\t\tperf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);\n+\t} else {\n+\t\t/*\n+\t\t * If this is the last cpu in this core then, skip taking refernce\n+\t\t * count mutex lock for this core and directly zero \"refc\" for\n+\t\t * this core.\n+\t\t */\n+\t\topal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,\n+\t\t\t\t get_hard_smp_processor_id(cpu));\n+\t\tcore_id = cpu / threads_per_core;\n+\t\tref = &core_imc_refc[core_id];\n+\t\tif (!ref)\n+\t\t\treturn -EINVAL;\n+\n+\t\tref->refc = 0;\n+\t}\n+\treturn 0;\n+}\n+\n+static int core_imc_pmu_cpumask_init(void)\n+{\n+\treturn cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,\n+\t\t\t\t \"perf/powerpc/imc_core:online\",\n+\t\t\t\t ppc_core_imc_cpu_online,\n+\t\t\t\t ppc_core_imc_cpu_offline);\n+}\n+\n+static void core_imc_counters_release(struct perf_event *event)\n+{\n+\tint rc, core_id;\n+\tstruct imc_pmu_ref *ref;\n+\n+\tif (event->cpu < 0)\n+\t\treturn;\n+\t/*\n+\t * See if we need to disable the IMC PMU.\n+\t * If no events are currently in use, then we have to take a\n+\t * mutex to ensure that we don't race with another task doing\n+\t * enable or disable the core counters.\n+\t */\n+\tcore_id = event->cpu / threads_per_core;\n+\n+\t/* Take the mutex lock and decrement the refernce count for this core */\n+\tref = &core_imc_refc[core_id];\n+\tif (!ref)\n+\t\treturn;\n+\n+\tmutex_lock(&ref->lock);\n+\tref->refc--;\n+\tif (ref->refc == 0) {\n+\t\trc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,\n+\t\t\t\t\t get_hard_smp_processor_id(event->cpu));\n+\t\tif (rc) {\n+\t\t\tmutex_unlock(&ref->lock);\n+\t\t\tpr_err(\"IMC: Unable to stop the counters for core %d\\n\", core_id);\n+\t\t\treturn;\n+\t\t}\n+\t} else if (ref->refc < 0) {\n+\t\tWARN(1, \"core-imc: Invalid event reference count\\n\");\n+\t\tref->refc = 0;\n+\t}\n+\tmutex_unlock(&ref->lock);\n+}\n+\n+static int core_imc_event_init(struct perf_event *event)\n+{\n+\tint core_id, rc;\n+\tu64 config = event->attr.config;\n+\tstruct imc_mem_info *pcmi;\n+\tstruct imc_pmu *pmu;\n+\tstruct imc_pmu_ref *ref;\n+\n+\tif (event->attr.type != event->pmu->type)\n+\t\treturn -ENOENT;\n+\n+\t/* Sampling not supported */\n+\tif (event->hw.sample_period)\n+\t\treturn -EINVAL;\n+\n+\t/* unsupported modes and filters */\n+\tif (event->attr.exclude_user ||\n+\t event->attr.exclude_kernel ||\n+\t event->attr.exclude_hv ||\n+\t event->attr.exclude_idle ||\n+\t event->attr.exclude_host ||\n+\t event->attr.exclude_guest)\n+\t\treturn -EINVAL;\n+\n+\tif (event->cpu < 0)\n+\t\treturn -EINVAL;\n+\n+\tevent->hw.idx = -1;\n+\tpmu = imc_event_to_pmu(event);\n+\n+\t/* Sanity check for config (event offset) */\n+\tif (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))\n+\t\treturn -EINVAL;\n+\n+\tif (!is_core_imc_mem_inited(event->cpu))\n+\t\treturn -ENODEV;\n+\n+\tcore_id = event->cpu / threads_per_core;\n+\tpcmi = &core_imc_pmu->mem_info[core_id];\n+\tif ((!pcmi->vbase))\n+\t\treturn -ENODEV;\n+\n+\t/* Get the core_imc mutex for this core */\n+\tref = &core_imc_refc[core_id];\n+\tif (!ref)\n+\t\treturn -EINVAL;\n+\n+\t/*\n+\t * Core pmu units are enabled only when it is used.\n+\t * See if this is triggered for the first time.\n+\t * If yes, take the mutex lock and enable the core counters.\n+\t * If not, just increment the count in core_imc_refc struct.\n+\t */\n+\tmutex_lock(&ref->lock);\n+\tif (ref->refc == 0) {\n+\t\trc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,\n+\t\t\t\t\t get_hard_smp_processor_id(event->cpu));\n+\t\tif (rc) {\n+\t\t\tmutex_unlock(&ref->lock);\n+\t\t\tpr_err(\"core-imc: Unable to start the counters for core %d\\n\",\n+\t\t\t\t\t\t\t\t\tcore_id);\n+\t\t\treturn rc;\n+\t\t}\n+\t}\n+\t++ref->refc;\n+\tmutex_unlock(&ref->lock);\n+\n+\tevent->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);\n+\tevent->destroy = core_imc_counters_release;\n+\treturn 0;\n+}\n+\n static u64 * get_event_base_addr(struct perf_event *event)\n {\n \t/*\n@@ -564,12 +806,15 @@ static int update_pmu_ops(struct imc_pmu *pmu)\n \tpmu->pmu.attr_groups = pmu->attr_groups;\n \tpmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group;\n \n-\t/* Subsequenct patch will add more pmu types here */\n \tswitch (pmu->domain) {\n \tcase IMC_DOMAIN_NEST:\n \t\tpmu->pmu.event_init = nest_imc_event_init;\n \t\tpmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;\n \t\tbreak;\n+\tcase IMC_DOMAIN_CORE:\n+\t\tpmu->pmu.event_init = core_imc_event_init;\n+\t\tpmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;\n+\t\tbreak;\n \tdefault:\n \t\tbreak;\n \t}\n@@ -621,6 +866,22 @@ static int init_nest_pmu_ref(void)\n \treturn 0;\n }\n \n+static void cleanup_all_core_imc_memory(void)\n+{\n+\tint i, nr_cores = num_present_cpus() / threads_per_core;\n+\tstruct imc_mem_info *ptr = core_imc_pmu->mem_info;\n+\tint size = core_imc_pmu->counter_mem_size;\n+\n+\t/* mem_info will never be NULL */\n+\tfor (i = 0; i < nr_cores; i++) {\n+\t\tif (ptr[i].vbase)\n+\t\t\tfree_pages((u64)ptr->vbase, get_order(size));\n+\t}\n+\n+\tkfree(ptr);\n+\tkfree(core_imc_refc);\n+}\n+\n /*\n * Common function to unregister cpu hotplug callback and\n * free the memory.\n@@ -641,6 +902,12 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)\n \t\tmutex_unlock(&nest_init_lock);\n \t}\n \n+\t/* Free core_imc memory */\n+\tif (pmu_ptr->domain == IMC_DOMAIN_CORE) {\n+\t\tcpuhp_remove_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE);\n+\t\tcleanup_all_core_imc_memory();\n+\t}\n+\n \t/* Only free the attr_groups which are dynamically allocated */\n \tkfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);\n \tkfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);\n@@ -656,11 +923,11 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,\n \t\t\t\t\t\t\t\tint pmu_index)\n {\n \tconst char *s;\n+\tint nr_cores;\n \n \tif (of_property_read_string(parent, \"name\", &s))\n \t\treturn -ENODEV;\n \n-\t/* Subsequenct patch will add more pmu types here */\n \tswitch (pmu_ptr->domain) {\n \tcase IMC_DOMAIN_NEST:\n \t\t/* Update the pmu name */\n@@ -671,6 +938,27 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,\n \t\t/* Needed for hotplug/migration */\n \t\tper_nest_pmu_arr[pmu_index] = pmu_ptr;\n \t\tbreak;\n+\tcase IMC_DOMAIN_CORE:\n+\t\t/* Update the pmu name */\n+\t\tpmu_ptr->pmu.name = kasprintf(GFP_KERNEL, \"%s%s\", s, \"_imc\");\n+\t\tif (!pmu_ptr->pmu.name)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tnr_cores = num_present_cpus() / threads_per_core;\n+\t\tpmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),\n+\t\t\t\t\t\t\t\tGFP_KERNEL);\n+\n+\t\tif (!pmu_ptr->mem_info)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tcore_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),\n+\t\t\t\t\t\t\t\tGFP_KERNEL);\n+\n+\t\tif (!core_imc_refc)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tcore_imc_pmu = pmu_ptr;\n+\t\tbreak;\n \tdefault:\n \t\treturn -EINVAL;\n \t}\n@@ -696,7 +984,6 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id\n \tif (ret)\n \t\tgoto err_free;\n \n-\t/* Subsequenct patch will add more pmu types here */\n \tswitch (pmu_ptr->domain) {\n \tcase IMC_DOMAIN_NEST:\n \t\t/*\n@@ -722,6 +1009,14 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id\n \t\tnest_pmus++;\n \t\tmutex_unlock(&nest_init_lock);\n \t\tbreak;\n+\tcase IMC_DOMAIN_CORE:\n+\t\tret = core_imc_pmu_cpumask_init();\n+\t\tif (ret) {\n+\t\t\tcleanup_all_core_imc_memory();\n+\t\t\treturn ret;\n+\t\t}\n+\n+\t\tbreak;\n \tdefault:\n \t\treturn -1;\t/* Unknown domain */\n \t}\ndiff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h\nindex 05d1907713e9..47eb6a1f2db9 100644\n--- a/include/linux/cpuhotplug.h\n+++ b/include/linux/cpuhotplug.h\n@@ -138,6 +138,7 @@ enum cpuhp_state {\n \tCPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,\n \tCPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,\n \tCPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,\n+\tCPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,\n \tCPUHP_AP_WORKQUEUE_ONLINE,\n \tCPUHP_AP_RCUTREE_ONLINE,\n \tCPUHP_AP_ONLINE_DYN,\n", "prefixes": [ "Artful", "04/12" ] }