From 27e42c8321922ef76888db749f8f7d796b4a7c96 Mon Sep 17 00:00:00 2001
From: Alexey Makhalov <amakhalov@vmware.com>
Date: Mon, 24 Sep 2018 18:37:34 -0700
Subject: [PATCH] x86/vmware: Steal time accounting support
This patch implements the VMware steal time infrastructure.
Steal time can be enabled by adding VM configuration option
stealclock.enable = "TRUE". It is supported by VMs that run
hardware version 13 or newer.
[tomer: use READ_ONCE macros and 32bit guests support]
Signed-off-by: Alexey Makhalov <amakhalov@vmware.com>
Signed-off-by: Tomer Zeltzer <tomerr90@gmail.com>
Signed-off-by: Nadav Amit <namit@vmware.com>
---
Documentation/admin-guide/kernel-parameters.txt | 4 +-
arch/x86/kernel/cpu/vmware.c | 215 +++++++++++++++++++++++-
2 files changed, 212 insertions(+), 7 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1370b42..ff55d00 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2855,8 +2855,8 @@
[X86,PV_OPS] Disable paravirtualized VMware scheduler
clock and use the default one.
- no-steal-acc [X86,KVM] Disable paravirtualized steal time accounting.
- steal time is computed, but won't influence scheduler
+ no-steal-acc [X86,PV_OPS] Disable paravirtualized steal time accounting.
+ Steal time is computed, but won't influence scheduler
behaviour
nolapic [X86-32,APIC] Do not enable or use the local APIC.
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index eb8515b..3aac91c 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -25,6 +25,8 @@
#include <linux/init.h>
#include <linux/export.h>
#include <linux/clocksource.h>
+#include <linux/cpu.h>
+#include <linux/reboot.h>
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
@@ -48,6 +50,10 @@
#define VMWARE_PORT_CMD_VCPU_RESERVED 31
#define VMWARE_PORT_CMD_MESSAGE 30
#define VMWARE_HB_PORT_CMD_MESSAGE 0
+#define VMWARE_PORT_CMD_STEALCLOCK 91
+# define STEALCLOCK_NOT_AVAILABLE -1
+# define STEALCLOCK_DISABLED 0
+# define STEALCLOCK_ENABLED 1
#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
__asm__("inl (%%dx)" : \
@@ -57,6 +63,18 @@
"2"(VMWARE_HYPERVISOR_PORT), "3"(UINT_MAX) : \
"memory");
+struct vmware_steal_time {
+ union {
+ uint64_t clock; /* stolen time counter in units of vtsc */
+ struct {
+ /* only for little-endian */
+ uint32_t clock_low;
+ uint32_t clock_high;
+ };
+ };
+ uint64_t reserved[7];
+};
+
static unsigned long vmware_tsc_khz __ro_after_init;
static inline int __vmware_platform(void)
@@ -81,6 +99,8 @@ static struct kmsg_dumper kmsg_dumper = {
#ifdef CONFIG_PARAVIRT
static struct cyc2ns_data vmware_cyc2ns __ro_after_init;
static int vmw_sched_clock __initdata = 1;
+static DEFINE_PER_CPU_DECRYPTED(struct vmware_steal_time, steal_time) __aligned(64);
+static int has_steal_clock = 0;
static __init int setup_vmw_sched_clock(char *s)
{
@@ -99,7 +119,7 @@ static unsigned long long vmware_sched_clock(void)
return ns;
}
-static void __init vmware_sched_clock_setup(void)
+static void __init vmware_cyc2ns_setup(void)
{
struct cyc2ns_data *d = &vmware_cyc2ns;
unsigned long long tsc_now = rdtsc();
@@ -109,17 +129,202 @@ static void __init vmware_sched_clock_setup(void)
d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul,
d->cyc2ns_shift);
- pv_time_ops.sched_clock = vmware_sched_clock;
- pr_info("using sched offset of %llu ns\n", d->cyc2ns_offset);
+ pr_info("using clock offset of %llu ns\n", d->cyc2ns_offset);
}
+static int vmware_cmd_stealclock(uint32_t arg1, uint32_t arg2)
+{
+ uint32_t result, info;
+
+ __asm__ __volatile__ ("inl (%%dx)"
+ : "=a" (result),
+ "=c" (info)
+ : "a" (VMWARE_HYPERVISOR_MAGIC),
+ "c" (VMWARE_PORT_CMD_STEALCLOCK),
+ "d" (VMWARE_HYPERVISOR_PORT),
+ "b" (0),
+ "S" (arg1),
+ "D" (arg2)
+ : "memory");
+ return result;
+}
+
+static int stealclock_enable(phys_addr_t pa)
+{
+ return vmware_cmd_stealclock(pa >> 32, (u32)pa) == STEALCLOCK_ENABLED;
+}
+
+static int stealclock_disable(void)
+{
+ return vmware_cmd_stealclock(0, 1);
+}
+
+static int vmware_is_stealclock_available(void)
+{
+ return stealclock_disable() != STEALCLOCK_NOT_AVAILABLE;
+}
+
+/**
+ * vmware_steal_clock() - read the per-cpu steal clock
+ * @cpu: the cpu number whose steal clock we want to read
+ *
+ * The function reads the steal clock if we are on a 64-bit system, otherwise
+ * reads it in parts, checking that the high part didn't change in the
+ * meantime.
+ *
+ * Return:
+ * The steal clock reading in ns.
+ */
+static uint64_t vmware_steal_clock(int cpu)
+{
+ struct vmware_steal_time *steal = &per_cpu(steal_time, cpu);
+ uint64_t clock;
+
+#ifdef CONFIG_64BIT
+ clock = READ_ONCE(steal->clock);
+#else
+ uint32_t initial_high, low, high;
+
+ do {
+ initial_high = READ_ONCE(steal->clock_high);
+ low = READ_ONCE(steal->clock_low);
+ high = READ_ONCE(steal->clock_high);
+ } while (initial_high != high);
+
+ clock = ((uint64_t)high << 32) | low;
+#endif
+ return mul_u64_u32_shr(clock, vmware_cyc2ns.cyc2ns_mul,
+ vmware_cyc2ns.cyc2ns_shift);
+}
+
+static void vmware_register_steal_time(void)
+{
+ int cpu = smp_processor_id();
+ struct vmware_steal_time *st = &per_cpu(steal_time, cpu);
+
+ if (!has_steal_clock)
+ return;
+
+ memset(st, 0, sizeof(*st));
+
+ if (!stealclock_enable(slow_virt_to_phys(st))) {
+ has_steal_clock = 0;
+ return;
+ }
+
+ pr_info("vmware-stealtime: cpu %d, pa %llx\n",
+ cpu, (unsigned long long) slow_virt_to_phys(st));
+}
+
+static void vmware_disable_steal_time(void)
+{
+ if (!has_steal_clock)
+ return;
+
+ stealclock_disable();
+}
+
+static void vmware_guest_cpu_init(void)
+{
+ if (has_steal_clock)
+ vmware_register_steal_time();
+}
+
+static void vmware_pv_guest_cpu_reboot(void *unused)
+{
+ vmware_disable_steal_time();
+}
+
+static int vmware_pv_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ if (code == SYS_RESTART)
+ on_each_cpu(vmware_pv_guest_cpu_reboot, NULL, 1);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block vmware_pv_reboot_nb = {
+ .notifier_call = vmware_pv_reboot_notify,
+};
+
+#ifdef CONFIG_SMP
+static void __init vmware_smp_prepare_boot_cpu(void)
+{
+ vmware_guest_cpu_init();
+ native_smp_prepare_boot_cpu();
+}
+
+static int vmware_cpu_online(unsigned int cpu)
+{
+ local_irq_disable();
+ vmware_guest_cpu_init();
+ local_irq_enable();
+ return 0;
+}
+
+static int vmware_cpu_down_prepare(unsigned int cpu)
+{
+ local_irq_disable();
+ vmware_disable_steal_time();
+ local_irq_enable();
+ return 0;
+}
+#endif
+
+static int steal_acc __initdata = 1; /* steal time accounting */
+static __init int parse_no_stealacc(char *arg)
+{
+ steal_acc = 0;
+ return 0;
+}
+
+early_param("no-steal-acc", parse_no_stealacc);
+
+static __init int activate_jump_labels(void)
+{
+ if (has_steal_clock) {
+ static_key_slow_inc(¶virt_steal_enabled);
+ if (steal_acc)
+ static_key_slow_inc(¶virt_steal_rq_enabled);
+ }
+
+ return 0;
+}
+arch_initcall(activate_jump_labels);
+
+
static void __init vmware_paravirt_ops_setup(void)
{
pv_info.name = "VMware hypervisor";
pv_cpu_ops.io_delay = paravirt_nop;
- if (vmware_tsc_khz && vmw_sched_clock)
- vmware_sched_clock_setup();
+ if (vmware_tsc_khz == 0)
+ return;
+
+ vmware_cyc2ns_setup();
+
+ if (vmw_sched_clock)
+ pv_time_ops.sched_clock = vmware_sched_clock;
+
+ if (vmware_is_stealclock_available()) {
+ has_steal_clock = 1;
+ pv_time_ops.steal_clock = vmware_steal_clock;
+ }
+
+ if (has_steal_clock) {
+ /* We use reboot notifier only to disable steal clock */
+ register_reboot_notifier(&vmware_pv_reboot_nb);
+#ifdef CONFIG_SMP
+ smp_ops.smp_prepare_boot_cpu = vmware_smp_prepare_boot_cpu;
+ if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "x86/vmware:online",
+ vmware_cpu_online,
+ vmware_cpu_down_prepare) < 0)
+ pr_err("vmware_guest: Failed to install cpu hotplug callbacks\n");
+#else
+ vmware_guest_cpu_init();
+#endif
+ }
}
#else
#define vmware_paravirt_ops_setup() do {} while (0)