1 Star 0 Fork 108

panchenbo / qemu

forked from src-openEuler / qemu 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
hw-arm64-add-vcpu-cache-info-support.patch 12.76 KB
一键复制 编辑 原始数据 按行查看 历史
imxcc 提交于 2022-02-17 22:34 . Update patch with openeuler !232
From c5cd762bb7513b6df07e26f4eb619dccbd1918b7 Mon Sep 17 00:00:00 2001
From: Ying Fang <fangying1@huawei.com>
Date: Tue, 8 Feb 2022 11:31:15 +0800
Subject: [PATCH 23/24] hw/arm64: add vcpu cache info support
Support VCPU Cache info by dtb and PPTT table, including L1, L2 and L3 Cache.
Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
Signed-off-by: Honghao <honghao5@huawei.com>
Signed-off-by: Ying Fang <fangying1@huawei.com>
Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
---
hw/acpi/aml-build.c | 158 ++++++++++++++++++++++++++++++++++++
hw/arm/virt.c | 72 ++++++++++++++++
include/hw/acpi/aml-build.h | 47 +++++++++++
tests/data/acpi/virt/PPTT | Bin 96 -> 208 bytes
4 files changed, 277 insertions(+)
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index bb2cad63b5..bebf49622b 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -1994,6 +1994,163 @@ static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
}
}
+#ifdef __aarch64__
+/*
+ * ACPI spec, Revision 6.3
+ * 5.2.29.2 Cache Type Structure (Type 1)
+ */
+static void build_cache_hierarchy_node(GArray *tbl, uint32_t next_level,
+ uint32_t cache_type)
+{
+ build_append_byte(tbl, 1);
+ build_append_byte(tbl, 24);
+ build_append_int_noprefix(tbl, 0, 2);
+ build_append_int_noprefix(tbl, 127, 4);
+ build_append_int_noprefix(tbl, next_level, 4);
+
+ switch (cache_type) {
+ case ARM_L1D_CACHE: /* L1 dcache info */
+ build_append_int_noprefix(tbl, ARM_L1DCACHE_SIZE, 4);
+ build_append_int_noprefix(tbl, ARM_L1DCACHE_SETS, 4);
+ build_append_byte(tbl, ARM_L1DCACHE_ASSOCIATIVITY);
+ build_append_byte(tbl, ARM_L1DCACHE_ATTRIBUTES);
+ build_append_int_noprefix(tbl, ARM_L1DCACHE_LINE_SIZE, 2);
+ break;
+ case ARM_L1I_CACHE: /* L1 icache info */
+ build_append_int_noprefix(tbl, ARM_L1ICACHE_SIZE, 4);
+ build_append_int_noprefix(tbl, ARM_L1ICACHE_SETS, 4);
+ build_append_byte(tbl, ARM_L1ICACHE_ASSOCIATIVITY);
+ build_append_byte(tbl, ARM_L1ICACHE_ATTRIBUTES);
+ build_append_int_noprefix(tbl, ARM_L1ICACHE_LINE_SIZE, 2);
+ break;
+ case ARM_L2_CACHE: /* L2 cache info */
+ build_append_int_noprefix(tbl, ARM_L2CACHE_SIZE, 4);
+ build_append_int_noprefix(tbl, ARM_L2CACHE_SETS, 4);
+ build_append_byte(tbl, ARM_L2CACHE_ASSOCIATIVITY);
+ build_append_byte(tbl, ARM_L2CACHE_ATTRIBUTES);
+ build_append_int_noprefix(tbl, ARM_L2CACHE_LINE_SIZE, 2);
+ break;
+ case ARM_L3_CACHE: /* L3 cache info */
+ build_append_int_noprefix(tbl, ARM_L3CACHE_SIZE, 4);
+ build_append_int_noprefix(tbl, ARM_L3CACHE_SETS, 4);
+ build_append_byte(tbl, ARM_L3CACHE_ASSOCIATIVITY);
+ build_append_byte(tbl, ARM_L3CACHE_ATTRIBUTES);
+ build_append_int_noprefix(tbl, ARM_L3CACHE_LINE_SIZE, 2);
+ break;
+ default:
+ build_append_int_noprefix(tbl, 0, 4);
+ build_append_int_noprefix(tbl, 0, 4);
+ build_append_byte(tbl, 0);
+ build_append_byte(tbl, 0);
+ build_append_int_noprefix(tbl, 0, 2);
+ }
+}
+
+/*
+ * ACPI spec, Revision 6.3
+ * 5.2.29 Processor Properties Topology Table (PPTT)
+ */
+void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
+ const char *oem_id, const char *oem_table_id)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ GQueue *list = g_queue_new();
+ guint pptt_start = table_data->len;
+ guint parent_offset;
+ guint length, i;
+ int uid = 0;
+ int socket;
+ AcpiTable table = { .sig = "PPTT", .rev = 2,
+ .oem_id = oem_id, .oem_table_id = oem_table_id };
+
+ acpi_table_begin(&table, table_data);
+
+ for (socket = 0; socket < ms->smp.sockets; socket++) {
+ uint32_t l3_cache_offset = table_data->len - pptt_start;
+ build_cache_hierarchy_node(table_data, 0, ARM_L3_CACHE);
+
+ g_queue_push_tail(list,
+ GUINT_TO_POINTER(table_data->len - pptt_start));
+ build_processor_hierarchy_node(
+ table_data,
+ /*
+ * Physical package - represents the boundary
+ * of a physical package
+ */
+ (1 << 0),
+ 0, socket, &l3_cache_offset, 1);
+ }
+
+ if (mc->smp_props.clusters_supported) {
+ length = g_queue_get_length(list);
+ for (i = 0; i < length; i++) {
+ int cluster;
+
+ parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+ for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
+ g_queue_push_tail(list,
+ GUINT_TO_POINTER(table_data->len - pptt_start));
+ build_processor_hierarchy_node(
+ table_data,
+ (0 << 0), /* not a physical package */
+ parent_offset, cluster, NULL, 0);
+ }
+ }
+ }
+
+ length = g_queue_get_length(list);
+ for (i = 0; i < length; i++) {
+ int core;
+
+ parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+ for (core = 0; core < ms->smp.cores; core++) {
+ uint32_t priv_rsrc[3] = {};
+ priv_rsrc[0] = table_data->len - pptt_start; /* L2 cache offset */
+ build_cache_hierarchy_node(table_data, 0, ARM_L2_CACHE);
+
+ priv_rsrc[1] = table_data->len - pptt_start; /* L1 dcache offset */
+ build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1D_CACHE);
+
+ priv_rsrc[2] = table_data->len - pptt_start; /* L1 icache offset */
+ build_cache_hierarchy_node(table_data, priv_rsrc[0], ARM_L1I_CACHE);
+
+ if (ms->smp.threads > 1) {
+ g_queue_push_tail(list,
+ GUINT_TO_POINTER(table_data->len - pptt_start));
+ build_processor_hierarchy_node(
+ table_data,
+ (0 << 0), /* not a physical package */
+ parent_offset, core, priv_rsrc, 3);
+ } else {
+ build_processor_hierarchy_node(
+ table_data,
+ (1 << 1) | /* ACPI Processor ID valid */
+ (1 << 3), /* Node is a Leaf */
+ parent_offset, uid++, priv_rsrc, 3);
+ }
+ }
+ }
+
+ length = g_queue_get_length(list);
+ for (i = 0; i < length; i++) {
+ int thread;
+
+ parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+ for (thread = 0; thread < ms->smp.threads; thread++) {
+ build_processor_hierarchy_node(
+ table_data,
+ (1 << 1) | /* ACPI Processor ID valid */
+ (1 << 2) | /* Processor is a Thread */
+ (1 << 3), /* Node is a Leaf */
+ parent_offset, uid++, NULL, 0);
+ }
+ }
+
+ g_queue_free(list);
+ acpi_table_end(linker, &table);
+}
+
+#else
/*
* ACPI spec, Revision 6.3
* 5.2.29 Processor Properties Topology Table (PPTT)
@@ -2084,6 +2241,7 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
g_queue_free(list);
acpi_table_end(linker, &table);
}
+#endif
/* build rev1/rev3/rev5.1 FADT */
void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f,
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index ddcb73f714..529c0d38b6 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -350,6 +350,72 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms)
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL2_IRQ, irqflags);
}
+static void fdt_add_l3cache_nodes(const VirtMachineState *vms)
+{
+ int i;
+ const MachineState *ms = MACHINE(vms);
+ int cpus_per_socket = ms->smp.clusters * ms->smp.cores * ms->smp.threads;
+ int sockets = (ms->smp.cpus + cpus_per_socket - 1) / cpus_per_socket;
+
+ for (i = 0; i < sockets; i++) {
+ char *nodename = g_strdup_printf("/cpus/l3-cache%d", i);
+
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cache");
+ qemu_fdt_setprop_string(ms->fdt, nodename, "cache-unified", "true");
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-level", 3);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", 0x2000000);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", 128);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", 2048);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
+ qemu_fdt_alloc_phandle(ms->fdt));
+ g_free(nodename);
+ }
+}
+
+static void fdt_add_l2cache_nodes(const VirtMachineState *vms)
+{
+ const MachineState *ms = MACHINE(vms);
+ int cpus_per_socket = ms->smp.clusters * ms->smp.cores * ms->smp.threads;
+ int cpu;
+
+ for (cpu = 0; cpu < ms->smp.cpus; cpu++) {
+ char *next_path = g_strdup_printf("/cpus/l3-cache%d",
+ cpu / cpus_per_socket);
+ char *nodename = g_strdup_printf("/cpus/l2-cache%d", cpu);
+
+ qemu_fdt_add_subnode(ms->fdt, nodename);
+ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cache");
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", 0x80000);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", 64);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", 1024);
+ qemu_fdt_setprop_phandle(ms->fdt, nodename, "next-level-cache",
+ next_path);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
+ qemu_fdt_alloc_phandle(ms->fdt));
+
+ g_free(next_path);
+ g_free(nodename);
+ }
+}
+
+static void fdt_add_l1cache_prop(const VirtMachineState *vms,
+ char *nodename, int cpu)
+{
+ const MachineState *ms = MACHINE(vms);
+ char *cachename = g_strdup_printf("/cpus/l2-cache%d", cpu);
+
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-size", 0x10000);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-line-size", 64);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-sets", 256);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-size", 0x10000);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-line-size", 64);
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-sets", 256);
+ qemu_fdt_setprop_phandle(ms->fdt, nodename, "next-level-cache",
+ cachename);
+ g_free(cachename);
+}
+
static void fdt_add_cpu_nodes(const VirtMachineState *vms)
{
int cpu;
@@ -384,6 +450,11 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", addr_cells);
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0);
+ if (!vmc->no_cpu_topology) {
+ fdt_add_l3cache_nodes(vms);
+ fdt_add_l2cache_nodes(vms);
+ }
+
for (cpu = smp_cpus - 1; cpu >= 0; cpu--) {
char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
@@ -413,6 +484,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
}
if (!vmc->no_cpu_topology) {
+ fdt_add_l1cache_prop(vms, nodename, cpu);
qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
qemu_fdt_alloc_phandle(ms->fdt));
}
diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h
index 8346003a22..8e8ad8029e 100644
--- a/include/hw/acpi/aml-build.h
+++ b/include/hw/acpi/aml-build.h
@@ -221,6 +221,53 @@ struct AcpiBuildTables {
BIOSLinker *linker;
} AcpiBuildTables;
+#ifdef __aarch64__
+/* Definitions of the hardcoded cache info*/
+
+typedef enum {
+ ARM_L1D_CACHE,
+ ARM_L1I_CACHE,
+ ARM_L2_CACHE,
+ ARM_L3_CACHE
+} ArmCacheType;
+
+/* L1 data cache: */
+#define ARM_L1DCACHE_SIZE 65536
+#define ARM_L1DCACHE_SETS 256
+#define ARM_L1DCACHE_ASSOCIATIVITY 4
+#define ARM_L1DCACHE_ATTRIBUTES 2
+#define ARM_L1DCACHE_LINE_SIZE 64
+
+/* L1 instruction cache: */
+#define ARM_L1ICACHE_SIZE 65536
+#define ARM_L1ICACHE_SETS 256
+#define ARM_L1ICACHE_ASSOCIATIVITY 4
+#define ARM_L1ICACHE_ATTRIBUTES 4
+#define ARM_L1ICACHE_LINE_SIZE 64
+
+/* Level 2 unified cache: */
+#define ARM_L2CACHE_SIZE 524288
+#define ARM_L2CACHE_SETS 1024
+#define ARM_L2CACHE_ASSOCIATIVITY 8
+#define ARM_L2CACHE_ATTRIBUTES 10
+#define ARM_L2CACHE_LINE_SIZE 64
+
+/* Level 3 unified cache: */
+#define ARM_L3CACHE_SIZE 33554432
+#define ARM_L3CACHE_SETS 2048
+#define ARM_L3CACHE_ASSOCIATIVITY 15
+#define ARM_L3CACHE_ATTRIBUTES 10
+#define ARM_L3CACHE_LINE_SIZE 128
+
+struct offset_status {
+ uint32_t parent;
+ uint32_t l2_offset;
+ uint32_t l1d_offset;
+ uint32_t l1i_offset;
+};
+
+#endif
+
typedef
struct CrsRangeEntry {
uint64_t base;
--
2.27.0
1
https://gitee.com/panchenbo/qemu.git
git@gitee.com:panchenbo/qemu.git
panchenbo
qemu
qemu
master

搜索帮助