diff --git a/repos/ports/lib/mk/spec/nova/virtualbox5-nova.mk b/repos/ports/lib/mk/spec/nova/virtualbox5-nova.mk
index 6f0e9570f..dadb3f777 100644
--- a/repos/ports/lib/mk/spec/nova/virtualbox5-nova.mk
+++ b/repos/ports/lib/mk/spec/nova/virtualbox5-nova.mk
@@ -2,7 +2,7 @@ include $(REP_DIR)/lib/mk/virtualbox5-common.inc
LIBS += stdcxx
-SRC_CC = sup.cc pgm.cc
+SRC_CC = sup.cc pgm.cc sup_old.cc
INC_DIR += $(call select_from_repositories,src/lib/libc)
@@ -16,5 +16,6 @@ INC_DIR += $(REP_DIR)/src/virtualbox5/frontend
vpath sup.cc $(REP_DIR)/src/virtualbox5/spec/nova/
vpath pgm.cc $(REP_DIR)/src/virtualbox5/spec/nova/
+vpath sup_old.cc $(REP_DIR)/src/virtualbox5/spec/nova/
CC_CXX_WARN_STRICT =
diff --git a/repos/ports/lib/mk/virtualbox5-hwaccl-off.mk b/repos/ports/lib/mk/virtualbox5.mk
similarity index 71%
rename from repos/ports/lib/mk/virtualbox5-hwaccl-off.mk
rename to repos/ports/lib/mk/virtualbox5.mk
index a445a9cff..ad406d8b8 100644
--- a/repos/ports/lib/mk/virtualbox5-hwaccl-off.mk
+++ b/repos/ports/lib/mk/virtualbox5.mk
@@ -2,7 +2,7 @@ include $(REP_DIR)/lib/mk/virtualbox5-common.inc
LIBS += stdcxx
-SRC_CC = sup.cc
+SRC_CC = sup.cc sup_vmm.cc
INC_DIR += $(call select_from_repositories,src/lib/libc)
@@ -13,9 +13,8 @@ INC_DIR += $(VBOX_DIR)/Main/include
INC_DIR += $(VBOX_DIR)/VMM/include
INC_DIR += $(REP_DIR)/src/virtualbox5
INC_DIR += $(REP_DIR)/src/virtualbox5/frontend
-INC_DIR += $(REP_DIR)/src/virtualbox5/accloff
-#vpath pgm.cc $(REP_DIR)/src/virtualbox5/
-vpath sup.cc $(REP_DIR)/src/virtualbox5/accloff/
+vpath sup_vmm.cc $(REP_DIR)/src/virtualbox5/generic
+vpath sup.cc $(REP_DIR)/src/virtualbox5/generic
CC_CXX_WARN_STRICT =
diff --git a/repos/ports/ports/virtualbox5.hash b/repos/ports/ports/virtualbox5.hash
index 4e564ada8..13bfd34ec 100644
--- a/repos/ports/ports/virtualbox5.hash
+++ b/repos/ports/ports/virtualbox5.hash
@@ -1 +1 @@
-0db2b02901c5b62cd181d817ba71c3b0d417a40a
+0ccf5764631d03e73d2a952391c96fe457f6f5ee
diff --git a/repos/ports/recipes/src/vbox5/content.mk b/repos/ports/recipes/src/vbox5/content.mk
new file mode 100644
index 000000000..ec9508d66
--- /dev/null
+++ b/repos/ports/recipes/src/vbox5/content.mk
@@ -0,0 +1,84 @@
+LIB_MK_FILES := $(notdir $(wildcard $(REP_DIR)/lib/mk/virtualbox5*))
+
+MIRROR_FROM_REP_DIR := src/virtualbox5 \
+ src/virtualbox5/network.cpp \
+ src/virtualbox5/include \
+ include/vmm \
+ $(addprefix lib/mk/,$(LIB_MK_FILES))
+
+content: $(MIRROR_FROM_REP_DIR)
+
+$(MIRROR_FROM_REP_DIR):
+ $(mirror_from_rep_dir)
+
+# omit virtualbox5-rem binary (12 MiB) from binary archive
+content: disable_virtualbox_nova disable_assertions
+
+disable_virtualbox_nova: $(MIRROR_FROM_REP_DIR)
+ rm src/virtualbox5/nova/target.mk
+ rmdir src/virtualbox5/nova
+ rm -r src/virtualbox5/spec
+
+disable_assertions: $(MIRROR_FROM_REP_DIR)
+ rm lib/mk/virtualbox5-debug.inc
+ touch lib/mk/virtualbox5-debug.inc
+
+PORT_DIR := $(call port_dir,$(REP_DIR)/ports/virtualbox5)
+
+MIRROR_FROM_PORT_DIR := src/app/virtualbox src/app/virtualbox_sdk \
+ VBoxAPIWrap VirtualBox_stripped.xidl
+
+content: $(MIRROR_FROM_PORT_DIR)
+
+$(MIRROR_FROM_PORT_DIR):
+ mkdir -p $(dir $@)
+ cp -r $(PORT_DIR)/$@ $(dir $@)
+
+MIRROR_FROM_LIBPORTS := lib/mk/libc_pipe.mk \
+ src/lib/libc_pipe \
+ lib/mk/libc_terminal.mk \
+ src/lib/libc_terminal \
+ lib/mk/libc-mem.mk \
+ lib/mk/libc-common.inc \
+ src/lib/libc/libc_mem_alloc.cc \
+ src/lib/libc/libc_mem_alloc.h \
+ src/lib/libc/libc_init.h \
+ src/lib/libc/thread_create.h \
+ src/lib/libc/thread.h \
+ include/libc-plugin \
+ lib/import/import-qemu-usb_include.mk \
+ lib/mk/qemu-usb_include.mk \
+ lib/mk/qemu-usb.mk \
+ include/qemu \
+ src/lib/qemu-usb
+
+content: $(MIRROR_FROM_LIBPORTS)
+
+$(MIRROR_FROM_LIBPORTS):
+ mkdir -p $(dir $@)
+ cp -r $(GENODE_DIR)/repos/libports/$@ $(dir $@)
+
+QEMU_USB_PORT_DIR := $(call port_dir,$(GENODE_DIR)/repos/libports/ports/qemu-usb)
+
+MIRROR_FROM_QEMU_USB_PORT_DIR := src/lib/qemu
+
+content: $(MIRROR_FROM_QEMU_USB_PORT_DIR)
+
+$(MIRROR_FROM_QEMU_USB_PORT_DIR):
+ mkdir -p $(dir $@)
+ cp -r $(QEMU_USB_PORT_DIR)/$@ $(dir $@)
+
+MIRROR_FROM_OS := src/drivers/input/spec/ps2/scan_code_set_1.h \
+ include/pointer/shape_report.h \
+
+content: $(MIRROR_FROM_OS)
+
+$(MIRROR_FROM_OS):
+ mkdir -p $(dir $@)
+ cp -r $(GENODE_DIR)/repos/os/$@ $(dir $@)
+
+content: LICENSE
+
+LICENSE:
+ echo "GNU GPL version 2, see src/app/virtualbox/COPYING" > $@
+
diff --git a/repos/ports/recipes/src/vbox5/hash b/repos/ports/recipes/src/vbox5/hash
new file mode 100644
index 000000000..5f0a1b832
--- /dev/null
+++ b/repos/ports/recipes/src/vbox5/hash
@@ -0,0 +1 @@
+2019-05-20-d d04357716c16c4a70e53bba190ef5a73c4e64b51
diff --git a/repos/ports/recipes/src/vbox5/used_apis b/repos/ports/recipes/src/vbox5/used_apis
new file mode 100644
index 000000000..4bd26ca9a
--- /dev/null
+++ b/repos/ports/recipes/src/vbox5/used_apis
@@ -0,0 +1,19 @@
+base
+os
+libc
+so
+vfs
+libiconv
+stdcxx
+timer_session
+usb_session
+terminal_session
+audio_in_session
+audio_out_session
+nic_session
+input_session
+framebuffer_session
+report_session
+nitpicker_session
+rtc_session
+vm_session
diff --git a/repos/ports/run/test.vbox b/repos/ports/run/test.vbox
index 022421e77..26db043fb 100644
--- a/repos/ports/run/test.vbox
+++ b/repos/ports/run/test.vbox
@@ -32,7 +32,7 @@
-
+
diff --git a/repos/ports/run/vbox5_ubuntu_14_04_32.run b/repos/ports/run/vbox5_ubuntu_14_04_32.run
index 096b9830b..29d6a9554 100644
--- a/repos/ports/run/vbox5_ubuntu_14_04_32.run
+++ b/repos/ports/run/vbox5_ubuntu_14_04_32.run
@@ -19,4 +19,7 @@ set use_ps2 [have_spec ps2]
set use_vms 1
set use_cpu_load 1
+# use non-generic vbox5 VMM version
+set use_vbox5_nova 1
+
source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_ubuntu_14_04_64.run b/repos/ports/run/vbox5_ubuntu_14_04_64.run
index 1ca124a45..332f2ef5d 100644
--- a/repos/ports/run/vbox5_ubuntu_14_04_64.run
+++ b/repos/ports/run/vbox5_ubuntu_14_04_64.run
@@ -21,4 +21,7 @@ set use_ps2 [have_spec ps2]
set use_vms 1
set use_cpu_load 1
+# use non-generic vbox5 VMM version
+set use_vbox5_nova 1
+
source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_ubuntu_16_04_32.run b/repos/ports/run/vbox5_ubuntu_16_04_32.run
index 06d76e3a8..a63879d2a 100644
--- a/repos/ports/run/vbox5_ubuntu_16_04_32.run
+++ b/repos/ports/run/vbox5_ubuntu_16_04_32.run
@@ -19,4 +19,7 @@ set use_ps2 [have_spec ps2]
set use_vms 1
set use_cpu_load 1
+# use non-generic vbox5 VMM version
+set use_vbox5_nova 1
+
source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_ubuntu_16_04_64.run b/repos/ports/run/vbox5_ubuntu_16_04_64.run
index 84a837db9..660bf65d1 100644
--- a/repos/ports/run/vbox5_ubuntu_16_04_64.run
+++ b/repos/ports/run/vbox5_ubuntu_16_04_64.run
@@ -21,4 +21,7 @@ set use_ps2 [have_spec ps2]
set use_vms 1
set use_cpu_load 1
+# use non-generic vbox5 VMM version
+set use_vbox5_nova 1
+
source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_vm_ubuntu_16_04_32.run b/repos/ports/run/vbox5_vm_ubuntu_16_04_32.run
new file mode 100644
index 000000000..57f6ba053
--- /dev/null
+++ b/repos/ports/run/vbox5_vm_ubuntu_16_04_32.run
@@ -0,0 +1,25 @@
+#
+# Ubuntu 16.04 32bit in VBox 5
+#
+
+set flavor "ubuntu_16_04_32"
+set vm_ram "1280M"
+
+set use_vbox5 1
+
+set use_rumpfs 1
+# Write overlay only into ram
+set use_ram_fs 1
+# However read initial overlay from disk
+set use_overlay_from_disk 1
+
+set use_usb 1
+set use_ps2 [have_spec ps2]
+
+set use_vms 1
+set use_cpu_load 1
+
+# use generic vbox5 VMM version
+set use_vbox5_nova 0
+
+source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_vm_ubuntu_16_04_64.run b/repos/ports/run/vbox5_vm_ubuntu_16_04_64.run
new file mode 100644
index 000000000..944e73492
--- /dev/null
+++ b/repos/ports/run/vbox5_vm_ubuntu_16_04_64.run
@@ -0,0 +1,27 @@
+#
+# Ubuntu 16.04 64bit in VBox 5
+#
+
+assert_spec 64bit
+
+set flavor "ubuntu_16_04_64"
+set vm_ram "9460M"
+
+set use_vbox5 1
+
+set use_rumpfs 1
+# Write overlay only into ram
+set use_ram_fs 1
+# However read initial overlay from disk
+set use_overlay_from_disk 1
+
+set use_usb 1
+set use_ps2 [have_spec ps2]
+
+set use_vms 1
+set use_cpu_load 1
+
+# use generic vbox5 VMM version
+set use_vbox5_nova 0
+
+source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_vm_win7_32.run b/repos/ports/run/vbox5_vm_win7_32.run
new file mode 100644
index 000000000..dcdc1d290
--- /dev/null
+++ b/repos/ports/run/vbox5_vm_win7_32.run
@@ -0,0 +1,27 @@
+#
+# Windows 7 in VirtualBox 5
+#
+
+assert_spec nova
+
+set flavor "win7"
+set vm_ram "1280M"
+
+set use_vbox5 1
+
+set use_rumpfs 1
+# Write overlay only into ram
+set use_ram_fs 1
+# However read initial overlay from disk
+set use_overlay_from_disk 1
+
+set use_usb 1
+set use_ps2 [have_spec ps2]
+
+set use_vms 1
+set use_cpu_load 0
+
+# use generic vbox5 VMM version
+set use_vbox5_nova 0
+
+source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_vm_win7_64.run b/repos/ports/run/vbox5_vm_win7_64.run
new file mode 100644
index 000000000..4012202a9
--- /dev/null
+++ b/repos/ports/run/vbox5_vm_win7_64.run
@@ -0,0 +1,27 @@
+#
+# Windows 7 64bit in VirtualBox
+#
+
+assert_spec 64bit
+
+set flavor "win7_64"
+set vm_ram "9480M"
+
+set use_vbox5 1
+
+set use_rumpfs 1
+# Write overlay only into ram
+set use_ram_fs 1
+# However read initial overlay from disk
+set use_overlay_from_disk 1
+
+set use_usb 1
+set use_ps2 [have_spec ps2]
+
+set use_vms 1
+set use_cpu_load 0
+
+# use generic vbox5 VMM version
+set use_vbox5_nova 0
+
+source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_win10_64.run b/repos/ports/run/vbox5_win10_64.run
index f5c9f2b92..271e1c5e1 100644
--- a/repos/ports/run/vbox5_win10_64.run
+++ b/repos/ports/run/vbox5_win10_64.run
@@ -22,4 +22,7 @@ set use_ps2 [have_spec ps2]
set use_vms 1
set use_cpu_load 0
+# use non-generic vbox5 VMM version
+set use_vbox5_nova 1
+
source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_win7_32.run b/repos/ports/run/vbox5_win7_32.run
index b311ce416..f96d5985d 100644
--- a/repos/ports/run/vbox5_win7_32.run
+++ b/repos/ports/run/vbox5_win7_32.run
@@ -21,4 +21,7 @@ set use_ps2 [have_spec ps2]
set use_vms 1
set use_cpu_load 0
+# use non-generic vbox5 VMM version
+set use_vbox5_nova 1
+
source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_win7_64.run b/repos/ports/run/vbox5_win7_64.run
index ba7dfc2c6..eaf0aaeef 100644
--- a/repos/ports/run/vbox5_win7_64.run
+++ b/repos/ports/run/vbox5_win7_64.run
@@ -21,4 +21,7 @@ set use_ps2 [have_spec ps2]
set use_vms 1
set use_cpu_load 0
+# use non-generic vbox5 VMM version
+set use_vbox5_nova 1
+
source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_win7_64_multiple.run b/repos/ports/run/vbox5_win7_64_multiple.run
index 6a2d79306..8a4735cf6 100644
--- a/repos/ports/run/vbox5_win7_64_multiple.run
+++ b/repos/ports/run/vbox5_win7_64_multiple.run
@@ -33,4 +33,7 @@ set use_vms 6
set affinity_space_width 8
set use_cpu_load 0
+# use non-generic vbox5 VMM version
+set use_vbox5_nova 1
+
source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox5_win7_64_raw.run b/repos/ports/run/vbox5_win7_64_raw.run
index e048e621c..7e17baf3a 100644
--- a/repos/ports/run/vbox5_win7_64_raw.run
+++ b/repos/ports/run/vbox5_win7_64_raw.run
@@ -20,4 +20,7 @@ set use_ps2 [have_spec ps2]
set use_vms 1
set use_cpu_load 0
+# use non-generic vbox5 VMM version
+set use_vbox5_nova 1
+
source ${genode_dir}/repos/ports/run/vbox_win.inc
diff --git a/repos/ports/run/vbox_win.inc b/repos/ports/run/vbox_win.inc
index e7e91f7ca..938d39eb6 100644
--- a/repos/ports/run/vbox_win.inc
+++ b/repos/ports/run/vbox_win.inc
@@ -39,11 +39,8 @@ set boot_modules {
dynamic_rom
}
-set virtualbox5_binary "virtualbox5-rem"
-
-if {[have_spec nova]} {
- set virtualbox5_binary "virtualbox5-nova"
-}
+set virtualbox5_binary "virtualbox5"
+if {$use_vbox5_nova} { set virtualbox5_binary "virtualbox5-nova" }
set config_of_app {
diff --git a/repos/ports/run/virtualbox.run b/repos/ports/run/virtualbox.run
index f42b1fb89..de6463a90 100644
--- a/repos/ports/run/virtualbox.run
+++ b/repos/ports/run/virtualbox.run
@@ -2,8 +2,9 @@ set use_net 0
set use_ps2 [have_spec ps2]
set use_usb 0
set use_serial 1
+set use_top 1
-set use_vbox5 1
+set use_vbox5_nova 0
# use_gui starts two VMs
set use_gui 0
@@ -15,10 +16,12 @@ set build_components {
server/report_rom
}
-if {$use_vbox5} {
- append build_components virtualbox5
- set virtualbox_binary "virtualbox5-rem"
- if {[have_spec nova]} { set virtualbox_binary "virtualbox5-nova" }
+append build_components virtualbox5
+
+set virtualbox_binary "virtualbox5"
+
+if {$use_vbox5_nova} {
+ set virtualbox_binary "virtualbox5-nova"
}
create_boot_directory
@@ -31,6 +34,7 @@ source ${genode_dir}/repos/base/run/platform_drv.inc
# override defaults of platform_drv.inc
proc platform_drv_priority {} { return { priority="-1"} }
+lappend_if [expr $use_top] build_components app/top
lappend_if [expr $use_ps2] build_components drivers/input
lappend_if [expr $use_usb] build_components drivers/usb
lappend_if [expr $use_serial] build_components server/log_terminal
@@ -53,11 +57,12 @@ set config {
- }
-
-append_if [have_spec muen] config {
+
}
+append_if [expr $use_top] config {
+ }
+
append config {
@@ -69,11 +74,17 @@ append config {
}
+append_if [expr $use_top] config {
+
+
+
+ }
+
append_platform_drv_config
append_if [expr $use_ps2] config {
-
+
}
@@ -82,13 +93,8 @@ append_if [expr $use_usb] config {
}
-append_if [expr $use_usb && ![have_spec muen]] config {
- }
-
-append_if [expr $use_usb && [have_spec muen]] config {
- }
-
append_if [expr $use_usb] config {
+
}
@@ -97,8 +103,12 @@ append_if [have_spec framebuffer] config {
-
-
+ }
+append_if [expr [have_spec framebuffer] && [have_include power_on/qemu]] config {
+ }
+append_if [expr [have_spec framebuffer] && [have_include power_on/qemu]] config {
+ }
+append_if [have_spec framebuffer] config {
}
append_if [have_spec sdl] config {
@@ -196,11 +206,8 @@ append config {
}
append_if [expr $use_gui] config {
- }
-
-append_if [expr $use_gui] config "
- "
-append_if [expr $use_gui] config {
+
+
@@ -221,10 +228,8 @@ append_if [expr $use_gui] config {
}
append config {
- }
-append config "
- "
-append config {
+
+
@@ -269,7 +274,7 @@ lappend_if [expr $use_ps2] boot_modules ps2_drv
lappend_if [have_spec framebuffer] boot_modules vesa_fb_drv
lappend_if [have_spec linux] boot_modules fb_sdl
lappend_if [have_spec x86] boot_modules rtc_drv
-
+lappend_if [expr $use_top] boot_modules top
lappend_if [expr $use_gui] boot_modules report_rom
append boot_modules {
diff --git a/repos/ports/run/virtualbox_auto.inc b/repos/ports/run/virtualbox_auto.inc
index 5918bdf9a..6fb09bafa 100644
--- a/repos/ports/run/virtualbox_auto.inc
+++ b/repos/ports/run/virtualbox_auto.inc
@@ -81,9 +81,7 @@ append config {
- }
-
-append_if [have_spec muen] config {
+
}
append_if [expr $use_cpu_load] config {
diff --git a/repos/ports/src/virtualbox5/accloff/sup.cc b/repos/ports/src/virtualbox5/accloff/sup.cc
deleted file mode 100644
index 032d49b2a..000000000
--- a/repos/ports/src/virtualbox5/accloff/sup.cc
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * \brief Genode specific VirtualBox SUPLib supplements.
- * File used by Genode platforms not supporting hardware
- * virtualisation features.
- * \author Alexander Boettcher
- * \date 2013-11-18
- */
-
-/*
- * Copyright (C) 2013 Genode Labs GmbH
- *
- * This file is distributed under the terms of the GNU General Public License
- * version 2.
- */
-
-/* Genode includes */
-#include
-#include
-#include
-
-/* VirtualBox includes */
-#include
-#include
-
-/* Genode's VirtualBox includes */
-#include "sup.h"
-#include "vmm.h"
-
-/* Libc include */
-#include
-#include /* sched_yield */
-
-
-/* VirtualBox SUPLib interface */
-
-int SUPR3QueryVTxSupported(void)
-{
- return VERR_INTERNAL_ERROR;
-}
-
-int SUPR3CallVMMR0Fast(PVMR0 pVMR0, unsigned uOperation, VMCPUID idCpu)
-{
- return VERR_INTERNAL_ERROR;
-}
-
-
-static Genode::Semaphore *r0_halt_sem()
-{
- static Genode::Semaphore sem;
- return &sem;
-}
-
-
-int SUPR3CallVMMR0Ex(PVMR0 pVMR0, VMCPUID idCpu, unsigned
- uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
-{
- switch(uOperation)
- {
- case VMMR0_DO_GVMM_CREATE_VM:
- genode_VMMR0_DO_GVMM_CREATE_VM(pReqHdr);
- return VINF_SUCCESS;
-
- case VMMR0_DO_GVMM_SCHED_HALT:
- r0_halt_sem()->down();
- return VINF_SUCCESS;
-
- case VMMR0_DO_GVMM_SCHED_WAKE_UP:
- r0_halt_sem()->up();
- return VINF_SUCCESS;
-
- case VMMR0_DO_VMMR0_INIT:
- return VINF_SUCCESS;
-
- case VMMR0_DO_GVMM_SCHED_POLL:
- /* called by 'vmR3HaltGlobal1Halt' */
- Genode::log(__func__, ": SUPR3CallVMMR0Ex: VMMR0_DO_GVMM_SCHED_POLL");
- return VINF_SUCCESS;
-
- default:
- Genode::error("SUPR3CallVMMR0Ex: unhandled uOperation ", (int)uOperation);
- return VERR_GENERAL_FAILURE;
- }
-}
-
-
-bool create_emt_vcpu(pthread_t * thread, size_t stack_size,
- void *(*start_routine)(void *), void *arg,
- Genode::Cpu_session * cpu_session,
- Genode::Affinity::Location location,
- unsigned int cpu_id,
- const char * name)
-{
- /* no hardware acceleration support */
- return false;
-}
-
-
-/**
- * Dummies and unimplemented stuff.
- */
-
-uint64_t genode_cpu_hz() {
- return 1000000000ULL; /* XXX fixed 1GHz return value */
-}
-
-
-void genode_update_tsc(void (*update_func)(void), Genode::uint64_t update_us)
-{
- using namespace Genode;
-
- Timer::Connection timer(genode_env());
- Signal_context sig_ctx;
- Signal_receiver sig_rec;
- Signal_context_capability sig_cap = sig_rec.manage(&sig_ctx);
-
- timer.sigh(sig_cap);
- timer.trigger_once(update_us);
-
- for (;;) {
- Signal s = sig_rec.wait_for_signal();
- update_func();
-
- timer.trigger_once(update_us);
- }
-}
-
-
-HRESULT genode_setup_machine(ComObjPtr machine)
-{
- ULONG memory_vbox;
- HRESULT rc = machine->COMGETTER(MemorySize)(&memory_vbox);
- if (FAILED(rc))
- return rc;
-
- size_t const vmm_memory = 1024ULL * 1024 * (memory_vbox + 16);
-
- return genode_check_memory_config(machine, vmm_memory);
-}
-
-
-extern "C" int sched_yield(void)
-{
- Genode::warning(__func__, " unimplemented");
- return -1;
-}
-
-int SUPR3PageAllocEx(::size_t cPages, uint32_t fFlags, void **ppvPages,
- PRTR0PTR pR0Ptr, PSUPPAGE paPages)
-{
- Genode::error(__func__, " unimplemented");
- return VERR_GENERAL_FAILURE;
-}
-
-extern "C" bool PGMUnmapMemoryGenode(void *, ::size_t)
-{
- Genode::error(__func__, " unimplemented");
- return VERR_GENERAL_FAILURE;
-}
-
-extern "C" void PGMFlushVMMemory()
-{
- Genode::error(__func__, " unimplemented");
-}
diff --git a/repos/ports/src/virtualbox5/generic/sup.cc b/repos/ports/src/virtualbox5/generic/sup.cc
new file mode 100644
index 000000000..b54d7548f
--- /dev/null
+++ b/repos/ports/src/virtualbox5/generic/sup.cc
@@ -0,0 +1,227 @@
+/*
+ * \brief VirtualBox SUPLib supplements
+ * \author Norman Feske
+ * \date 2013-08-20
+ */
+
+/*
+ * Copyright (C) 2013-2017 Genode Labs GmbH
+ *
+ * This file is distributed under the terms of the GNU General Public License
+ * version 2.
+ */
+
+/* Genode includes */
+#include
+#include
+
+/* Genode/Virtualbox includes */
+#include "sup.h"
+#include "vmm.h"
+
+/* VirtualBox includes */
+#include
+#include
+#include
+
+
+SUPR3DECL(SUPPAGINGMODE) SUPR3GetPagingMode(void)
+{
+ return sizeof(void *) == 4 ? SUPPAGINGMODE_32_BIT : SUPPAGINGMODE_AMD64_NX;
+}
+
+
+int SUPR3Term(bool) { return VINF_SUCCESS; }
+
+
+int SUPR3HardenedLdrLoadAppPriv(const char *pszFilename, PRTLDRMOD phLdrMod,
+ uint32_t fFlags, PRTERRINFO pErrInfo)
+{
+ return RTLdrLoad(pszFilename, phLdrMod);
+}
+
+
+SUPR3DECL(int) SUPR3PageFreeEx(void *pvPages, size_t cPages)
+{
+ Genode::log(__func__, " pvPages=", pvPages, " pages=", cPages);
+ return VINF_SUCCESS;
+}
+
+
+int SUPR3QueryMicrocodeRev(uint32_t *puMicrocodeRev)
+{
+ return E_FAIL;
+}
+
+uint32_t SUPSemEventMultiGetResolution(PSUPDRVSESSION)
+{
+ return 100000*10; /* called by 'vmR3HaltGlobal1Init' */
+}
+
+
+int SUPSemEventCreate(PSUPDRVSESSION pSession, PSUPSEMEVENT phEvent)
+{
+ return RTSemEventCreate((PRTSEMEVENT)phEvent);
+}
+
+
+int SUPSemEventClose(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent)
+{
+ Assert (hEvent);
+
+ return RTSemEventDestroy((RTSEMEVENT)hEvent);
+}
+
+
+int SUPSemEventSignal(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent)
+{
+ Assert (hEvent);
+
+ return RTSemEventSignal((RTSEMEVENT)hEvent);
+}
+
+
+int SUPSemEventWaitNoResume(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent,
+ uint32_t cMillies)
+{
+ Assert (hEvent);
+
+ return RTSemEventWaitNoResume((RTSEMEVENT)hEvent, cMillies);
+}
+
+
+int SUPSemEventMultiCreate(PSUPDRVSESSION, PSUPSEMEVENTMULTI phEventMulti)
+{
+ RTSEMEVENTMULTI sem;
+
+ /*
+ * Input validation.
+ */
+ AssertPtrReturn(phEventMulti, VERR_INVALID_POINTER);
+
+ /*
+ * Create the event semaphore object.
+ */
+ int rc = RTSemEventMultiCreate(&sem);
+
+ static_assert(sizeof(sem) == sizeof(*phEventMulti), "oi");
+ *phEventMulti = reinterpret_cast(sem);
+ return rc;
+}
+
+
+int SUPSemEventMultiWaitNoResume(PSUPDRVSESSION, SUPSEMEVENTMULTI event,
+ uint32_t ms)
+{
+ RTSEMEVENTMULTI const rtevent = reinterpret_cast(event);
+ return RTSemEventMultiWait(rtevent, ms);
+}
+
+int SUPSemEventMultiSignal(PSUPDRVSESSION, SUPSEMEVENTMULTI event) {
+ return RTSemEventMultiSignal(reinterpret_cast(event)); }
+
+int SUPSemEventMultiReset(PSUPDRVSESSION, SUPSEMEVENTMULTI event) {
+ return RTSemEventMultiReset(reinterpret_cast(event)); }
+
+int SUPSemEventMultiClose(PSUPDRVSESSION, SUPSEMEVENTMULTI event) {
+ return RTSemEventMultiDestroy(reinterpret_cast(event)); }
+
+
+int SUPR3CallVMMR0(PVMR0 pVMR0, VMCPUID idCpu, unsigned uOperation,
+ void *pvArg)
+{
+ if (uOperation == VMMR0_DO_CALL_HYPERVISOR) {
+ Genode::log(__func__, ": VMMR0_DO_CALL_HYPERVISOR - doing nothing");
+ return VINF_SUCCESS;
+ }
+ if (uOperation == VMMR0_DO_VMMR0_TERM) {
+ Genode::log(__func__, ": VMMR0_DO_VMMR0_TERM - doing nothing");
+ return VINF_SUCCESS;
+ }
+ if (uOperation == VMMR0_DO_GVMM_DESTROY_VM) {
+ Genode::log(__func__, ": VMMR0_DO_GVMM_DESTROY_VM - doing nothing");
+ return VINF_SUCCESS;
+ }
+
+ AssertMsg(uOperation != VMMR0_DO_VMMR0_TERM &&
+ uOperation != VMMR0_DO_CALL_HYPERVISOR &&
+ uOperation != VMMR0_DO_GVMM_DESTROY_VM,
+ ("SUPR3CallVMMR0: unhandled uOperation %d", uOperation));
+ return VERR_GENERAL_FAILURE;
+}
+
+
+void genode_VMMR0_DO_GVMM_CREATE_VM(PSUPVMMR0REQHDR pReqHdr)
+{
+ GVMMCREATEVMREQ &req = reinterpret_cast(*pReqHdr);
+
+ size_t const cCpus = req.cCpus;
+
+ /*
+ * Allocate and initialize VM struct
+ *
+ * The VM struct is followed by the variable-sizedA array of VMCPU
+ * objects. 'RT_UOFFSETOF' is used to determine the size including
+ * the VMCPU array.
+ *
+ * VM struct must be page-aligned, which is checked at least in
+ * PDMR3CritSectGetNop().
+ */
+ size_t const cbVM = RT_UOFFSETOF(VM, aCpus[cCpus]);
+
+ static Genode::Attached_ram_dataspace vm(genode_env().ram(),
+ genode_env().rm(),
+ cbVM);
+ Assert (vm.size() >= cbVM);
+
+ VM *pVM = vm.local_addr();
+ Genode::memset(pVM, 0, cbVM);
+
+ /*
+ * On Genode, VMMR0 and VMMR3 share a single address space. Hence, the
+ * same pVM pointer is valid as pVMR0 and pVMR3.
+ */
+ pVM->enmVMState = VMSTATE_CREATING;
+ pVM->pVMR0 = (RTHCUINTPTR)pVM;
+ pVM->pVMRC = (RTGCUINTPTR)pVM;
+ pVM->pSession = req.pSession;
+ pVM->cbSelf = cbVM;
+ pVM->cCpus = cCpus;
+ pVM->uCpuExecutionCap = 100; /* expected by 'vmR3CreateU()' */
+ pVM->offVMCPU = RT_UOFFSETOF(VM, aCpus);
+
+ for (uint32_t i = 0; i < cCpus; i++) {
+ pVM->aCpus[i].pVMR0 = pVM->pVMR0;
+ pVM->aCpus[i].pVMR3 = pVM;
+ pVM->aCpus[i].idHostCpu = NIL_RTCPUID;
+ pVM->aCpus[i].hNativeThreadR0 = NIL_RTNATIVETHREAD;
+ }
+
+ pVM->aCpus[0].hNativeThreadR0 = RTThreadNativeSelf();
+
+ /* out parameters of the request */
+ req.pVMR0 = pVM->pVMR0;
+ req.pVMR3 = pVM;
+}
+
+
+void genode_VMMR0_DO_GVMM_REGISTER_VMCPU(PVMR0 pVMR0, VMCPUID idCpu)
+{
+ PVM pVM = reinterpret_cast(pVMR0);
+ pVM->aCpus[idCpu].hNativeThreadR0 = RTThreadNativeSelf();
+}
+
+
+HRESULT genode_check_memory_config(ComObjPtr,
+ size_t const memory_vmm)
+{
+ /* Request max available memory */
+ size_t const memory_available = genode_env().pd().avail_ram().value;
+
+ if (memory_vmm <= memory_available)
+ return S_OK;
+
+ Genode::error("Available memory too low to start the VM - available: ",
+ memory_available, " MB < ", memory_vmm, " MB requested");
+ return E_FAIL;
+}
diff --git a/repos/ports/src/virtualbox5/generic/sup_vmm.cc b/repos/ports/src/virtualbox5/generic/sup_vmm.cc
new file mode 100644
index 000000000..06d83505f
--- /dev/null
+++ b/repos/ports/src/virtualbox5/generic/sup_vmm.cc
@@ -0,0 +1,1266 @@
+/*
+ * \brief Genode specific VirtualBox SUPLib supplements
+ * \author Alexander Boettcher
+ * \author Norman Feske
+ * \author Christian Helmuth
+ */
+
+/*
+ * Copyright (C) 2006-2013 Oracle Corporation
+ * Copyright (C) 2013-2019 Genode Labs GmbH
+ *
+ * This file is distributed under the terms of the GNU General Public License
+ * version 2.
+ */
+
+/* Genode includes */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+/* Genode's VirtualBox includes */
+#include "EMInternal.h" /* enable access to em.s.* */
+#include "vmm.h"
+#include "vcpu.h"
+#include "vcpu_svm.h"
+#include "vcpu_vmx.h"
+
+/* libc memory allocator */
+#include
+
+/* Genode libc pthread binding */
+#include
+
+/* libc */
+#include
+#include /* sched_yield */
+
+#include "mm.h"
+
+/* VirtualBox includes */
+#include
+#include
+
+extern "C" void PGMUnmapMemoryGenode(void *, RTGCPHYS, ::size_t);
+extern "C" void PGMFlushVMMemory();
+
+enum { VERBOSE_PGM = false };
+
+/*
+ * Tracking required to fulfill VMM allocation requests of VM memory
+ */
+enum {
+ CHUNKID_PAGE_START = 1,
+ CHUNKID_PAGE_END = 2,
+ CHUNKID_START = CHUNKID_PAGE_END + 1,
+
+ ONE_PAGE_SIZE = 4096,
+ PAGES_SUPERPAGE = GMM_CHUNK_SIZE / ONE_PAGE_SIZE,
+ HANDY_PAGES = PAGES_SUPERPAGE * (CHUNKID_PAGE_END - CHUNKID_PAGE_START + 1),
+
+ MAX_VM_MEMORY = 16ULL * 1024 * 1024 * 1024, /* 16 Gb */
+ MAX_CHUNK_IDS = MAX_VM_MEMORY / GMM_CHUNK_SIZE,
+};
+
+typedef Genode::Bit_allocator Page_ids;
+typedef Genode::Bit_array Free_ids;
+
+class Chunk_ids: public Genode::Bit_allocator
+{
+ public:
+
+ void reserve(Genode::addr_t bit_start, size_t const num) {
+ _reserve(bit_start, num); };
+};
+
+static Page_ids page_ids;
+static Chunk_ids chunk_ids;
+
+struct Region : Genode::List::Element
+{
+ Genode::uint64_t vmm_local;
+ Genode::uint64_t size;
+ Genode::Ram_dataspace_capability cap;
+
+ Region(uint64_t gp, uint64_t gs, Genode::Ram_dataspace_capability c)
+ : vmm_local(gp), size(gs), cap(c) { }
+
+ bool contains(Genode::uint64_t p)
+ {
+ return (vmm_local <= p) && (p < vmm_local + size);
+ }
+};
+
+static Genode::List regions;
+
+static Genode::Allocator & heap()
+{
+ static Genode::Heap heap(genode_env().ram(), genode_env().rm());
+ return heap;
+}
+
+
+static Sub_rm_connection &vm_memory(Genode::uint64_t vm_size = 0)
+{
+ /* memory used by the VM in any order as the VMM asks for allocations */
+ static Sub_rm_connection vm_memory(genode_env(), vm_size);
+
+ if (!vm_size)
+ return vm_memory;
+
+ using namespace Genode;
+
+ /* create iterator for aligned allocation and attachment of memory */
+ addr_t const vmm_local = vm_memory.local_addr(0);
+ Flexpage_iterator fli(vmm_local, vm_size, 0, ~0UL, 0);
+
+ /* start iteration */
+ Flexpage memory = fli.page();
+ while (memory.valid()) {
+ addr_t const memory_size = 1UL << memory.log2_order;
+ addr_t allocated = 0;
+
+ addr_t alloc_size = 128 * 1024 * 1024;
+ if (alloc_size > memory_size)
+ alloc_size = memory_size;
+
+ while (allocated < memory_size) {
+ Ram_dataspace_capability ds = genode_env().ram().alloc(alloc_size);
+
+ addr_t to = vm_memory.attach_executable(ds, memory.addr +
+ allocated - vmm_local,
+ alloc_size);
+ Assert(to == vm_memory.local_addr(memory.addr + allocated - vmm_local));
+ allocated += alloc_size;
+
+ regions.insert(new (heap()) Region(to, alloc_size, ds));
+
+ if (memory_size - allocated < alloc_size)
+ alloc_size = memory_size - allocated;
+ }
+
+ /* request next aligned memory range to be allocated and attached */
+ memory = fli.page();
+ }
+
+ /* reserve chunkids which are special or unused */
+ chunk_ids.reserve(0, CHUNKID_START);
+ addr_t const unused_id = CHUNKID_START + vm_size / GMM_CHUNK_SIZE;
+ addr_t const unused_count = MAX_CHUNK_IDS - unused_id - 1;
+ chunk_ids.reserve(unused_id, unused_count);
+
+ return vm_memory;
+}
+
+
+static Genode::Vm_connection &vm_connection(long prio = 0)
+{
+ static Genode::Vm_connection vm_session(genode_env(), "VBox vCPUs", prio);
+ return vm_session;
+}
+
+
+/*
+ * VCPU handling
+ */
+
+static Genode::List &vcpu_handler_list()
+{
+ static Genode::List _inst;
+ return _inst;
+}
+
+
+static Vcpu_handler *lookup_vcpu_handler(unsigned int cpu_id)
+{
+ for (Vcpu_handler *vcpu_handler = vcpu_handler_list().first();
+ vcpu_handler;
+ vcpu_handler = vcpu_handler->next())
+ if (vcpu_handler->cpu_id() == cpu_id)
+ return vcpu_handler;
+
+ return 0;
+}
+
+
+HRESULT genode_setup_machine(ComObjPtr machine)
+{
+ ULONG memory_vbox;
+ HRESULT rc = machine->COMGETTER(MemorySize)(&memory_vbox);
+ if (FAILED(rc))
+ return rc;
+
+ /*
+ * Extra memory because of:
+ * - first chunkid (0) can't be used (VBox don't like chunkid 0)
+ * - second chunkid (1..2) is reserved for handy pages allocation
+ * - another chunkid is used additional for handy pages but as large page
+ */
+ size_t const vmm_memory = 1024ULL * 1024 * (memory_vbox + 16) +
+ (CHUNKID_START + 1) * GMM_CHUNK_SIZE;
+ HRESULT ret = genode_check_memory_config(machine, vmm_memory);
+ if (ret == VINF_SUCCESS)
+ vm_memory(vmm_memory);
+
+ return ret;
+};
+
+
+/* Genode specific function */
+
+Genode::Xml_node platform_rom()
+{
+ static Genode::Attached_rom_dataspace const platform(genode_env(),
+ "platform_info");
+ return platform.xml().sub_node("hardware");
+}
+
+
+void SUPR3QueryHWACCLonGenodeSupport(VM * pVM)
+{
+ try {
+ Genode::Xml_node const features = platform_rom().sub_node("features");
+ pVM->hm.s.svm.fSupported = features.attribute_value("svm", false);
+ pVM->hm.s.vmx.fSupported = features.attribute_value("vmx", false);
+
+ if (pVM->hm.s.svm.fSupported || pVM->hm.s.vmx.fSupported) {
+ Genode::log("Using ", pVM->hm.s.svm.fSupported ? "SVM" : "VMX",
+ " virtualization extension.");
+ return;
+ }
+ } catch (...) { /* if we get an exception let hardware support off */ }
+
+ Genode::warning("No virtualization hardware acceleration available");
+}
+
+
+/* VirtualBox SUPLib interface */
+int SUPR3QueryVTxSupported(void) { return VINF_SUCCESS; }
+
+
+int SUPR3CallVMMR0Fast(PVMR0 pVMR0, unsigned uOperation, VMCPUID idCpu)
+{
+ switch (uOperation) {
+ case SUP_VMMR0_DO_HM_RUN:
+ Vcpu_handler *vcpu_handler = lookup_vcpu_handler(idCpu);
+ Assert(vcpu_handler);
+ return vcpu_handler->run_hw(pVMR0);
+ }
+ return VERR_INTERNAL_ERROR;
+}
+
+int SUPR3PageAllocEx(::size_t cPages, uint32_t fFlags, void **ppvPages,
+ PRTR0PTR pR0Ptr, PSUPPAGE paPages)
+{
+ Assert(ppvPages);
+ Assert(!fFlags);
+
+ using Genode::Attached_ram_dataspace;
+ Attached_ram_dataspace * ds = new (heap()) Attached_ram_dataspace(genode_env().ram(),
+ genode_env().rm(),
+ cPages * ONE_PAGE_SIZE);
+
+ Genode::addr_t const vmm_local = reinterpret_cast(ds->local_addr());
+
+ regions.insert(new (heap()) Region(vmm_local, cPages * ONE_PAGE_SIZE, ds->cap()));
+
+ *ppvPages = ds->local_addr();
+ if (pR0Ptr)
+ *pR0Ptr = vmm_local;
+
+ if (!paPages)
+ return VINF_SUCCESS;
+
+ for (unsigned iPage = 0; iPage < cPages; iPage++)
+ {
+ paPages[iPage].uReserved = 0;
+ paPages[iPage].Phys = vmm_local + iPage * ONE_PAGE_SIZE;
+ }
+
+ return VINF_SUCCESS;
+}
+
+enum { MAX_TRACKING = 4 };
+static struct {
+ Free_ids free;
+ unsigned freed;
+ unsigned chunkid;
+} track_free[MAX_TRACKING];
+
+static void partial_free_large_page(unsigned chunkid, unsigned page_id)
+{
+ unsigned pos = 0;
+
+ /* lookup if already exist */
+ for (; pos < MAX_TRACKING; pos++)
+ {
+ if (track_free[pos].chunkid == chunkid)
+ break;
+ }
+
+ /* if not exist find free place */
+ if (pos >= MAX_TRACKING) {
+ for (int i = 0; i < MAX_TRACKING; i++) {
+ if (track_free[i].chunkid)
+ continue;
+
+ track_free[i].chunkid = chunkid;
+ track_free[i].freed = 0;
+ pos = i;
+ break;
+ }
+
+ /* too many chunkids in use ? */
+ Assert (pos < MAX_TRACKING);
+ if (pos >= MAX_TRACKING)
+ return;
+ }
+
+ try {
+ /* mark as in use */
+ track_free[pos].free.set(page_id, 1);
+ track_free[pos].freed += 1;
+
+ if (track_free[pos].freed >= 512) {
+ /* slow ? optimize ? XXX */
+ for (unsigned i = 0; i < 512; i++) {
+ if (!track_free[pos].free.get(i, 1))
+ throw 1;
+ track_free[pos].free.clear(i, 1);
+ }
+
+ track_free[pos].chunkid = 0;
+ track_free[pos].freed = 0;
+
+ chunk_ids.free(chunkid);
+ }
+ } catch (...) {
+ Genode::error(__func__," ", __LINE__, " allocation failed ", pos, ":",
+ chunkid, ":", page_id);
+ throw;
+ }
+}
+
+int SUPR3CallVMMR0Ex(PVMR0 pVMR0, VMCPUID idCpu, unsigned uOperation,
+ uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
+{
+
+ switch (uOperation) {
+
+ case VMMR0_DO_GVMM_CREATE_VM:
+ {
+ genode_VMMR0_DO_GVMM_CREATE_VM(pReqHdr);
+
+ GVMMCREATEVMREQ &req = reinterpret_cast(*pReqHdr);
+ SUPR3QueryHWACCLonGenodeSupport(reinterpret_cast(req.pVMR3));
+
+ return VINF_SUCCESS;
+ }
+ case VMMR0_DO_GVMM_REGISTER_VMCPU:
+ genode_VMMR0_DO_GVMM_REGISTER_VMCPU(pVMR0, idCpu);
+ return VINF_SUCCESS;
+
+ case VMMR0_DO_GVMM_SCHED_HALT:
+ {
+ uint64_t const u64NowGip = RTTimeNanoTS();
+ uint64_t const ns_diff = u64Arg > u64NowGip ? u64Arg - u64NowGip : 0;
+
+ if (!ns_diff)
+ return VINF_SUCCESS;
+
+ using namespace Genode;
+
+ if (ns_diff > RT_NS_1SEC)
+ warning(" more than 1 sec vcpu halt ", ns_diff, " ns");
+
+ Vcpu_handler *vcpu_handler = lookup_vcpu_handler(idCpu);
+ Assert(vcpu_handler);
+ vcpu_handler->halt(u64NowGip + ns_diff);
+
+ return VINF_SUCCESS;
+ }
+
+ case VMMR0_DO_GVMM_SCHED_WAKE_UP:
+ {
+ Vcpu_handler *vcpu_handler = lookup_vcpu_handler(idCpu);
+ Assert(vcpu_handler);
+
+ vcpu_handler->wake_up();
+ return VINF_SUCCESS;
+ }
+
+ /* called by 'vmR3HaltGlobal1Halt' */
+ case VMMR0_DO_GVMM_SCHED_POLL:
+ return VINF_SUCCESS;
+
+ case VMMR0_DO_VMMR0_INIT:
+ return VINF_SUCCESS;
+
+ case VMMR0_DO_GVMM_DESTROY_VM:
+ case VMMR0_DO_VMMR0_TERM:
+ case VMMR0_DO_HM_SETUP_VM:
+ return VINF_SUCCESS;
+
+ case VMMR0_DO_HM_ENABLE:
+ return VINF_SUCCESS;
+
+ case VMMR0_DO_GVMM_SCHED_POKE:
+ {
+ PVM pVM = reinterpret_cast(pVMR0);
+ Vcpu_handler *vcpu_handler = lookup_vcpu_handler(idCpu);
+ Assert(vcpu_handler);
+ if (vcpu_handler)
+ vcpu_handler->recall(pVM);
+ return VINF_SUCCESS;
+ }
+ case VMMR0_DO_GMM_ALLOCATE_PAGES:
+ {
+ /*
+ * VMM is asking for some host virtual memory pages without
+ * allocating the backing store actually. The backing store allocation
+ * takes place via VMMR0_DO_GMM_MAP_UNMAP_CHUNK. The assignment of the
+ * guest addresses is known at this point.
+ */
+
+ static_assert(PGM_HANDY_PAGES * ONE_PAGE_SIZE == GMM_CHUNK_SIZE,
+ "Don't do that - you're going to waste tons of memory");
+ Assert(pReqHdr->u32Magic == SUPVMMR0REQHDR_MAGIC);
+
+ /* XXX VMM/VMMR0/GMMR0.cpp check there XXX */
+
+ PGMMALLOCATEPAGESREQ req = reinterpret_cast(pReqHdr);
+
+ for (unsigned i = 0; i < req->cPages; i++) {
+ RTHCPHYS guest_addr = req->aPages[i].HCPhysGCPhys;
+ unsigned long page_idx = 0;
+ unsigned long chunk_id = 0;
+
+ try {
+ page_idx = page_ids.alloc();
+ chunk_id = CHUNKID_PAGE_START + page_idx / PAGES_SUPERPAGE;
+ } catch (...) {
+ Genode::error(__func__," ", __LINE__, " allocation failed");
+ throw;
+ }
+
+ Assert (page_idx <= GMM_PAGEID_IDX_MASK);
+
+ req->aPages[i].idPage = (chunk_id << GMM_CHUNKID_SHIFT) | page_idx;
+ req->aPages[i].HCPhysGCPhys = vm_memory().local_addr((chunk_id * GMM_CHUNK_SIZE) | (page_idx * ONE_PAGE_SIZE));
+ Assert(vm_memory().contains(req->aPages[i].HCPhysGCPhys));
+
+ #if 0
+ Genode::log("cPages ", Genode::Hex(req->cPages), " "
+ "chunkID=", req->aPages[i].idPage >> GMM_CHUNKID_SHIFT, " "
+ "pageIDX=", req->aPages[i].idPage & GMM_PAGEID_IDX_MASK, " "
+ "idPage=", Genode::Hex(req->aPages[i].idPage), " "
+ "GCPhys=", Genode::Hex(guest_addr), " "
+ "HCPhys=", Genode::Hex(req->aPages[i].HCPhysGCPhys), " "
+ "(", Genode::Hex(chunk_id * GMM_CHUNK_SIZE), " "
+ "| ", Genode::Hex(page_idx * ONE_PAGE_SIZE), ") pageidx=", page_idx, " "
+ "start_vm=", vm_memory().local_addr(0));
+ #endif
+
+ }
+
+ return VINF_SUCCESS;
+ }
+ case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
+ {
+ PGMMMAPUNMAPCHUNKREQ req = reinterpret_cast(pReqHdr);
+
+ Assert(pReqHdr->u32Magic == SUPVMMR0REQHDR_MAGIC);
+ Assert(req->idChunkUnmap == NIL_GMM_CHUNKID);
+ Assert(req->idChunkMap != NIL_GMM_CHUNKID);
+
+ Genode::addr_t local_addr_offset = (uintptr_t)req->idChunkMap << GMM_CHUNK_SHIFT;
+ Genode::addr_t to = vm_memory().local_addr(local_addr_offset);
+
+ req->pvR3 = reinterpret_cast(to);
+
+ return VINF_SUCCESS;
+ }
+ case VMMR0_DO_GMM_QUERY_MEM_STATS:
+ {
+ PGMMMEMSTATSREQ req = reinterpret_cast(pReqHdr);
+ req->cAllocPages = 0;
+ req->cMaxPages = 0;
+ req->cBalloonedPages = 0;
+ return VINF_SUCCESS;
+ }
+ case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
+ {
+ /*
+ * VMM is asking for some host virtual memory pages without
+ * allocating the backing store actually. The backing store allocation
+ * takes place via VMMR0_DO_GMM_MAP_UNMAP_CHUNK. The assignment of the
+ * guest addresses to these host pages is unknown at this point.
+ */
+
+ PVM pVM = reinterpret_cast(pVMR0);
+
+ /* based on PGMR0PhysAllocateHandyPages() in VMM/VMMR0/PGMR0.cpp - start */
+ uint32_t iFirst = pVM->pgm.s.cHandyPages;
+ uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
+ uint32_t cPagesToUpdate = cPages;
+ uint32_t cPagesToAlloc = cPages;
+ /* based on PGMR0PhysAllocateHandyPages() in VMM/VMMR0/PGMR0.cpp - end */
+
+ /* based on GMMR0AllocateHandyPages in VMM/VMMR0/GMMR0.cpp - start */
+ unsigned iPage = 0;
+ for (; iPage < cPagesToUpdate; iPage++)
+ {
+ AssertMsgReturn( ( pVM->pgm.s.aHandyPages[iFirst + iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST
+ && !(pVM->pgm.s.aHandyPages[iFirst + iPage].HCPhysGCPhys & PAGE_OFFSET_MASK))
+ || pVM->pgm.s.aHandyPages[iFirst + iPage].HCPhysGCPhys == NIL_RTHCPHYS
+ || pVM->pgm.s.aHandyPages[iFirst + iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE,
+ ("#%#x: %RHp\n", iFirst + iPage, pVM->pgm.s.aHandyPages[iFirst + iPage].HCPhysGCPhys),
+ VERR_INVALID_PARAMETER);
+ AssertMsgReturn( pVM->pgm.s.aHandyPages[iFirst + iPage].idPage <= GMM_PAGEID_LAST
+ /*|| pVM->pgm.s.aHandyPages[iFirst + iPage].idPage == NIL_GMM_PAGEID*/,
+ ("#%#x: %#x\n", iFirst + iPage, pVM->pgm.s.aHandyPages[iFirst + iPage].idPage), VERR_INVALID_PARAMETER);
+ AssertMsgReturn( pVM->pgm.s.aHandyPages[iFirst + iPage].idPage <= GMM_PAGEID_LAST
+ /*|| pVM->pgm.s.aHandyPages[iFirst + iPage].idSharedPage == NIL_GMM_PAGEID*/,
+ ("#%#x: %#x\n", iFirst + iPage, pVM->pgm.s.aHandyPages[iFirst + iPage].idSharedPage), VERR_INVALID_PARAMETER);
+ }
+
+ for (; iPage < cPagesToAlloc; iPage++)
+ {
+ AssertMsgReturn(pVM->pgm.s.aHandyPages[iFirst + iPage].HCPhysGCPhys == NIL_RTHCPHYS, ("#%#x: %RHp\n", iFirst + iPage, pVM->pgm.s.aHandyPages[iFirst + iPage].HCPhysGCPhys), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pVM->pgm.s.aHandyPages[iFirst + iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iFirst + iPage, pVM->pgm.s.aHandyPages[iFirst + iPage].idPage), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pVM->pgm.s.aHandyPages[iFirst + iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iFirst + iPage, pVM->pgm.s.aHandyPages[iFirst + iPage].idSharedPage), VERR_INVALID_PARAMETER);
+ }
+
+ Genode::uint64_t chunkid = 0;
+
+ try {
+ chunkid = chunk_ids.alloc();
+ } catch (...) {
+ Genode::error(__func__," ", __LINE__, " allocation failed");
+ throw;
+ }
+
+ if (cPagesToAlloc != GMM_CHUNK_SIZE / ONE_PAGE_SIZE)
+ Genode::log("special chunkid=", chunkid, " "
+ "toupdate=", cPagesToUpdate, " "
+ "toalloc=", cPagesToAlloc, " "
+ "virt=", Genode::Hex(vm_memory().local_addr(chunkid << GMM_CHUNK_SHIFT)));
+
+ for (unsigned i = 0; i < cPagesToUpdate; i++) {
+ if (pVM->pgm.s.aHandyPages[iFirst + i].idPage != NIL_GMM_PAGEID)
+ {
+ pVM->pgm.s.aHandyPages[iFirst + i].idPage = NIL_GMM_PAGEID;
+ pVM->pgm.s.aHandyPages[iFirst + i].HCPhysGCPhys = NIL_RTHCPHYS;
+ }
+
+ if (pVM->pgm.s.aHandyPages[iFirst + i].idSharedPage != NIL_GMM_PAGEID)
+ AssertMsgReturn(false, ("%s %u - not implemented", __func__, __LINE__), VERR_GENERAL_FAILURE);
+ }
+
+ for (unsigned i = 0; i < cPagesToAlloc; i++)
+ {
+ Assert(pVM->pgm.s.aHandyPages[iFirst + i].HCPhysGCPhys == NIL_RTHCPHYS);
+ Assert(pVM->pgm.s.aHandyPages[iFirst + i].idPage == NIL_GMM_PAGEID);
+ Assert(pVM->pgm.s.aHandyPages[iFirst + i].idSharedPage == NIL_GMM_PAGEID);
+ }
+
+ for (unsigned i = 0; i < cPagesToUpdate; i++) {
+ unsigned reverse = i; //cPagesToUpdate - 1 - i;
+ Assert (pVM->pgm.s.aHandyPages[iFirst + i].HCPhysGCPhys == NIL_RTHCPHYS);
+ {
+ pVM->pgm.s.aHandyPages[iFirst + i].idPage = (chunkid << GMM_CHUNKID_SHIFT) | (iFirst + reverse);
+ pVM->pgm.s.aHandyPages[iFirst + i].idSharedPage = NIL_GMM_PAGEID;
+
+ pVM->pgm.s.aHandyPages[iFirst + i].HCPhysGCPhys = vm_memory().local_addr((chunkid << GMM_CHUNK_SHIFT) | ((iFirst + reverse) * ONE_PAGE_SIZE));
+ }
+ }
+ /* based on GMMR0AllocateHandyPages in VMM/VMMR0/GMMR0.cpp - end */
+
+ /* based on PGMR0PhysAllocateHandyPages() in VMM/VMMR0/PGMR0.cpp - start */
+ pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
+
+ for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
+ {
+ Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
+ Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
+ Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
+ Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
+ Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
+ }
+ /* based on PGMR0PhysAllocateHandyPages() in VMM/VMMR0/PGMR0.cpp - end */
+
+ return VINF_SUCCESS;
+ }
+ case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
+ {
+ /*
+ * VMM is asking for some host large virtual memory pages without
+ * allocating the backing store actually. The backing store allocation
+ * takes place via VMMR0_DO_GMM_MAP_UNMAP_CHUNK. The assignment of the
+ * guest addresses to these host pages is unknown at this point.
+ */
+
+ PVM pVM = reinterpret_cast(pVMR0);
+
+ Assert(pVM);
+ Assert(pVM->pgm.s.cLargeHandyPages == 0);
+
+ try {
+ Genode::uint64_t chunkid = chunk_ids.alloc();
+
+ pVM->pgm.s.aLargeHandyPage[0].idPage = (chunkid << GMM_CHUNKID_SHIFT);
+ pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys = vm_memory().local_addr(chunkid << GMM_CHUNK_SHIFT);
+
+ pVM->pgm.s.cLargeHandyPages = 1;
+ } catch (...) {
+ Genode::error(__func__," ", __LINE__, " allocation failed");
+ throw;
+ }
+
+ return VINF_SUCCESS;
+ }
+ case VMMR0_DO_GMM_BALLOONED_PAGES:
+ case VMMR0_DO_GMM_RESET_SHARED_MODULES:
+ case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
+ {
+ PVM const pVM = reinterpret_cast(pVMR0);
+ PVMCPU const pVCpu = &pVM->aCpus[idCpu];
+
+ /* if not in VM shutdown - complain - bug ahead */
+ if (pVCpu->em.s.enmState != EMSTATE_TERMINATING)
+ Genode::error("unexpected call of type ", uOperation, ", "
+ "em state=", (int)pVCpu->em.s.enmState);
+
+ return VINF_SUCCESS;
+ }
+ case VMMR0_DO_GMM_FREE_PAGES:
+ {
+ if (u64Arg)
+ return VERR_INVALID_PARAMETER;
+
+ PVM pVM = reinterpret_cast(pVMR0);
+ PGMMFREEPAGESREQ pReq = reinterpret_cast(pReqHdr);
+
+ AssertPtrReturn(pVM, VERR_INVALID_POINTER);
+ AssertPtrReturn(pReq, VERR_INVALID_POINTER);
+ AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0]),
+ ("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0])),
+ VERR_INVALID_PARAMETER);
+ AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages]),
+ ("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages])),
+ VERR_INVALID_PARAMETER);
+
+ uint32_t cPages = pReq->cPages;
+ PGMMFREEPAGEDESC paPages = &pReq->aPages[0];
+ GMMACCOUNT enmAccount = pReq->enmAccount;
+
+ AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
+ AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
+ AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
+
+ for (unsigned iPage = 0; iPage < cPages; iPage++)
+ AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
+ /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/,
+ ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
+
+ for (uint32_t last_chunk = ~0U, iPage = 0; iPage < cPages; iPage++)
+ {
+ uint32_t const idPage = paPages[iPage].idPage;
+ uint32_t const page_idx = idPage & GMM_PAGEID_IDX_MASK;
+ uint32_t const chunkid = idPage >> GMM_CHUNKID_SHIFT;
+
+ if (last_chunk != chunkid) {
+ /* revoke mapping from guest VM */
+ PGMUnmapMemoryGenode(nullptr, (0UL + chunkid) << GMM_CHUNK_SHIFT, GMM_CHUNK_SIZE);
+
+ last_chunk = chunkid;
+ }
+
+ if (CHUNKID_PAGE_START <= chunkid && chunkid <= CHUNKID_PAGE_END) {
+ try {
+ page_ids.free((chunkid - CHUNKID_PAGE_START) * PAGES_SUPERPAGE + page_idx);
+ } catch (...) {
+ Genode::error(__func__," ", __LINE__, " clearing failed");
+ throw;
+ }
+ }
+
+ partial_free_large_page(chunkid, page_idx);
+
+ paPages[iPage].idPage = NIL_GMM_PAGEID;
+ }
+
+ return VINF_SUCCESS;
+ }
+ case VMMR0_DO_GMM_INITIAL_RESERVATION:
+ return VINF_SUCCESS;
+ case VMMR0_DO_GMM_UPDATE_RESERVATION:
+ return VINF_SUCCESS;
+ default:
+ Genode::error("SUPR3CallVMMR0Ex: unhandled uOperation ", uOperation,
+ " ", (int)VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, " ",
+ (int)VMMR0_DO_GMM_QUERY_MEM_STATS);
+ return VERR_GENERAL_FAILURE;
+ }
+}
+
+
+/**
+ * Various support stuff.
+ */
+uint64_t genode_cpu_hz()
+{
+ static uint64_t cpu_freq = 0;
+
+ if (!cpu_freq) {
+ try {
+ platform_rom().sub_node("tsc").attribute("freq_khz").value(&cpu_freq);
+ cpu_freq *= 1000ULL;
+ } catch (...) {
+ Genode::error("could not read out CPU frequency");
+ Genode::Lock lock;
+ lock.lock();
+ }
+ }
+
+ return cpu_freq;
+}
+
+
+void PGMUnmapMemoryGenode(void *, RTGCPHYS guest_phys, ::size_t size)
+{
+ vm_connection().detach(guest_phys, size);
+}
+
+extern "C" void PGMFlushVMMemory()
+{
+ /* XXX PGMUnmapMemoryGenode on vm_session does not flush all caps */
+ PGMUnmapMemoryGenode(nullptr, 0, MAX_VM_MEMORY);
+}
+
+
+extern "C" int sched_yield(void)
+{
+ static unsigned long counter = 0;
+
+ if (++counter % 50000 == 0)
+ Genode::warning(__func__, " called ", counter, " times");
+
+ return -1;
+}
+
+
+bool create_emt_vcpu(pthread_t * thread, ::size_t stack_size,
+ void *(*start_routine)(void *), void *arg,
+ Genode::Cpu_session * cpu_session,
+ Genode::Affinity::Location location,
+ unsigned int cpu_id, const char * name, long prio)
+{
+ Genode::Xml_node const features = platform_rom().sub_node("features");
+ bool const svm = features.attribute_value("svm", false);
+ bool const vmx = features.attribute_value("vmx", false);
+
+ if (!svm && !vmx)
+ return false;
+
+ Vcpu_handler *vcpu_handler = 0;
+
+ if (vmx)
+ vcpu_handler = new (heap()) Vcpu_handler_vmx(genode_env(),
+ stack_size,
+ location,
+ cpu_id,
+ vm_connection(prio),
+ heap());
+
+ if (svm)
+ vcpu_handler = new (heap()) Vcpu_handler_svm(genode_env(),
+ stack_size,
+ location,
+ cpu_id,
+ vm_connection(prio),
+ heap());
+
+ vcpu_handler_list().insert(vcpu_handler);
+
+ Libc::pthread_create(thread, start_routine, arg,
+ stack_size, name, cpu_session, location);
+
+ return true;
+}
+
+static int _map_memory(Genode::Vm_connection &vm_session,
+ RTGCPHYS const GCPhys,
+ RTHCPHYS const vmm_local,
+ size_t const mapping_size,
+ bool writeable)
+{
+ for (Region *region = regions.first(); region; region = region->next())
+ {
+ if (!region->contains(vmm_local))
+ continue;
+
+ bool retry = false;
+
+ do {
+ Genode::addr_t const offset = vmm_local - region->vmm_local;
+
+ try {
+ vm_session.with_upgrade([&]() {
+ vm_session.attach(region->cap, GCPhys,
+ { .offset = offset,
+ .size = mapping_size,
+ .executable = true,
+ .writeable = writeable });
+ });
+ } catch (Genode::Vm_session::Region_conflict) {
+ /* XXX PGMUnmapMemoryGenode on vm_session does not flush caps */
+ vm_session.detach(GCPhys, mapping_size);
+
+ if (retry) {
+ Genode::error("region conflict - ", Genode::Hex(GCPhys),
+ " ", Genode::Hex(mapping_size), " vmm_local=",
+ Genode::Hex(vmm_local), " ", region->cap,
+ " region=", Genode::Hex(region->vmm_local),
+ "+", Genode::Hex(region->size));
+
+ return VERR_PGM_DYNMAP_FAILED;
+ }
+
+ if (!retry) {
+ retry = true;
+ continue;
+ }
+ }
+ retry = false;
+ } while (retry);
+
+ return VINF_SUCCESS;
+ }
+ Genode::error(" no mapping ?");
+ return VERR_PGM_DYNMAP_FAILED;
+}
+
+class Pgm_guard
+{
+ private:
+ VM &_vm;
+
+ public:
+ Pgm_guard(VM &vm) : _vm(vm) { pgmLock(&_vm); }
+ ~Pgm_guard() { pgmUnlock(&_vm); }
+};
+
+#include "PGMInline.h"
+
+int Vcpu_handler::map_memory(Genode::Vm_connection &vm_session,
+ RTGCPHYS const GCPhys, RTGCUINT vbox_fault_reason)
+{
+ Pgm_guard guard(*_vm);
+
+ _ept_fault_addr_type = PGMPAGETYPE_INVALID;
+
+ PPGMRAMRANGE const pRam = pgmPhysGetRangeAtOrAbove(_vm, GCPhys);
+ if (!pRam)
+ return VERR_PGM_DYNMAP_FAILED;
+
+ RTGCPHYS off = GCPhys - pRam->GCPhys;
+ if (off >= pRam->cb)
+ return VERR_PGM_DYNMAP_FAILED;
+
+ unsigned iPage = off >> PAGE_SHIFT;
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+
+ _ept_fault_addr_type = PGM_PAGE_GET_TYPE(pPage);
+
+ /*
+ * If page is not allocated (== zero page) and no MMIO or active page, allocate and map it
+ * immediately. Important do not do this if A20 gate is disabled, A20 gate
+ * is handled by IEM/REM in this case.
+ */
+ if (PGM_PAGE_IS_ZERO(pPage)
+ && !PGM_PAGE_IS_ALLOCATED(pPage)
+ && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
+ && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)
+ && PGM_A20_IS_ENABLED(_vcpu))
+ {
+ pgmPhysPageMakeWritable(_vm, pPage, GCPhys);
+ }
+
+ if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) ||
+ PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage) ||
+ PGM_PAGE_IS_ZERO(pPage)) {
+
+ if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO &&
+ !PGM_PAGE_IS_ZERO(pPage)) {
+
+ Genode::log(__LINE__, " GCPhys=", Genode::Hex(GCPhys), " ",
+ PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage), " ",
+ PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage), " ",
+ PGM_PAGE_IS_ZERO(pPage), " "
+ " vbox_fault_reason=", Genode::Hex(vbox_fault_reason));
+ Genode::log(__LINE__, " GCPhys=", Genode::Hex(GCPhys), " "
+ "host=", Genode::Hex(PGM_PAGE_GET_HCPHYS(pPage)), " "
+ "type=", Genode::Hex(PGM_PAGE_GET_TYPE(pPage)), " "
+ "writeable=true "
+ "state=", Genode::Hex(PGM_PAGE_GET_STATE(pPage)));
+ }
+ return VERR_PGM_DYNMAP_FAILED;
+ }
+
+ if (!PGM_PAGE_IS_ALLOCATED(pPage))
+ Genode::log("unknown page state ", Genode::Hex(PGM_PAGE_GET_STATE(pPage)),
+ " GCPhys=", Genode::Hex(GCPhys));
+ Assert(PGM_PAGE_IS_ALLOCATED(pPage));
+
+ if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM &&
+ PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO2 &&
+ PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM)
+ {
+ if (VERBOSE_PGM)
+ Genode::log(__LINE__, " GCPhys=", Genode::Hex(GCPhys), " "
+ "vbox_fault_reason=", Genode::Hex(vbox_fault_reason), " "
+ "host=", Genode::Hex(PGM_PAGE_GET_HCPHYS(pPage)), " "
+ "type=", Genode::Hex(PGM_PAGE_GET_TYPE(pPage)), " "
+ "state=", Genode::Hex(PGM_PAGE_GET_STATE(pPage)));
+ return VERR_PGM_DYNMAP_FAILED;
+ }
+
+ Assert(!PGM_PAGE_IS_ZERO(pPage));
+
+ /* write fault on a ROM region */
+ if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM &&
+ vbox_fault_reason & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE) {
+ Genode::warning(__func__, " - write fault on ROM region!? gp=",
+ Genode::Hex(GCPhys));
+ return VERR_PGM_DYNMAP_FAILED;
+ }
+
+ /* nothing should be mapped - otherwise we get endless overmap loops */
+ Assert(!(vbox_fault_reason & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT));
+
+ bool const writeable = PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM;
+
+ PPGMPHYSHANDLER handler = pgmHandlerPhysicalLookup(_vm, GCPhys);
+
+ if (VERBOSE_PGM && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2 &&
+ !handler)
+ Genode::log(__LINE__, " GCPhys=", Genode::Hex(GCPhys), " ",
+ "type=", Genode::Hex(PGM_PAGE_GET_TYPE(pPage)), " "
+ "state=", Genode::Hex(PGM_PAGE_GET_STATE(pPage)), " "
+ "- MMIO2 w/o handler");
+
+ if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2 && handler) {
+ PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(_vm, handler)->CTX_SUFF(pfnHandler);
+ if (!pfnHandler) {
+ Genode::log(__LINE__, " GCPhys=", Genode::Hex(GCPhys), " "
+ "type=", Genode::Hex(PGM_PAGE_GET_TYPE(pPage)));
+ return VERR_PGM_DYNMAP_FAILED;
+ }
+ void *pvUser = handler->CTX_SUFF(pvUser);
+ if (!pvUser) {
+ Genode::log(__LINE__, " GCPhys=", Genode::Hex(GCPhys), " "
+ "type=", Genode::Hex(PGM_PAGE_GET_TYPE(pPage)));
+ return VERR_PGM_DYNMAP_FAILED;
+ }
+
+ PGMACCESSTYPE access_type = (vbox_fault_reason & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE) ? PGMACCESSTYPE_WRITE : PGMACCESSTYPE_READ;
+
+ VBOXSTRICTRC rcStrict = pfnHandler(_vm, _vcpu, GCPhys, nullptr, nullptr, 0, access_type, PGMACCESSORIGIN_HM, pvUser);
+ if (rcStrict != VINF_PGM_HANDLER_DO_DEFAULT) {
+ Genode::log(__LINE__, " nodefault GCPhys=", Genode::Hex(GCPhys), " "
+ "type=", Genode::Hex(PGM_PAGE_GET_TYPE(pPage)), " "
+ "pfnHandler=", pfnHandler);
+ return VERR_PGM_DYNMAP_FAILED;
+ }
+ }
+
+/*
+ if (VERBOSE_PGM)
+ Genode::log(Genode::Hex(PGM_PAGE_GET_HCPHYS(pPage)),
+ "->", Genode::Hex(GCPhys),
+ " type=", PGM_PAGE_GET_TYPE(pPage),
+ " state=", PGM_PAGE_GET_STATE(pPage),
+ " pde_type=", PGM_PAGE_GET_PDE_TYPE(pPage),
+ PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE ? "(is pde)" : "(not pde)",
+ " iPage=", iPage,
+ " range_start=", Genode::Hex(pRam->GCPhys),
+ " range_size=", Genode::Hex(pRam->cb),
+ " pages=", pRam->cb >> PAGE_SHIFT
+ );
+*/
+
+ if (PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE)
+ return _map_memory(vm_session, GCPhys, PGM_PAGE_GET_HCPHYS(pPage), 4096, writeable); /* one page mapping */
+
+ Genode::addr_t const superpage_log2 = 21;
+ Genode::addr_t const max_pages = pRam->cb >> PAGE_SHIFT;
+ Genode::addr_t const superpage_pages = (1UL << superpage_log2) / 4096;
+ Genode::addr_t const mask = (1UL << superpage_log2) - 1;
+ Genode::addr_t const super_gcphys = GCPhys & ~mask;
+
+ RTGCPHYS max_off = super_gcphys - pRam->GCPhys;
+ if (max_off > pRam->cb)
+ return _map_memory(vm_session, GCPhys, PGM_PAGE_GET_HCPHYS(pPage), 4096, writeable); /* one page mapping */
+
+ Genode::addr_t const super_hcphys = PGM_PAGE_GET_HCPHYS(pPage) & ~mask;
+
+ unsigned const i_s = max_off >> PAGE_SHIFT;
+
+ if (i_s + superpage_pages > max_pages)
+ return _map_memory(vm_session, GCPhys, PGM_PAGE_GET_HCPHYS(pPage), 4096, writeable); /* one page mapping */
+
+ if (VERBOSE_PGM)
+ Genode::log(Genode::Hex(PGM_PAGE_GET_HCPHYS(pPage)), "->",
+ Genode::Hex(GCPhys), " - iPage ", iPage, " [",
+ i_s, ",", i_s + superpage_pages, ")", " "
+ "range_size=", Genode::Hex(pRam->cb));
+
+ /* paranoia sanity checks */
+ for (Genode::addr_t i = i_s; i < i_s + superpage_pages; i++) {
+ PPGMPAGE page = &pRam->aPages[i];
+
+ Genode::addr_t const gcpage = pRam->GCPhys + (i << PAGE_SHIFT);
+
+ if (!(super_hcphys == (PGM_PAGE_GET_HCPHYS(page) & ~mask)) ||
+ !(super_gcphys == (gcpage & ~mask)) ||
+ !(PGM_PAGE_GET_PDE_TYPE(page) == PGM_PAGE_PDE_TYPE_PDE) ||
+ !(PGM_PAGE_GET_TYPE(page) == PGM_PAGE_GET_TYPE(pPage)) ||
+ !(PGM_PAGE_GET_STATE(page) == PGM_PAGE_GET_STATE(pPage)))
+ {
+ if (VERBOSE_PGM)
+ Genode::error(Genode::Hex(PGM_PAGE_GET_HCPHYS(pPage)), "->",
+ Genode::Hex(GCPhys), " - iPage ", iPage, " i ", i, " [",
+ i_s, ",", i_s + superpage_pages, ")", " "
+ "range_size=", Genode::Hex(pRam->cb), " "
+ "super_hcphys=", Genode::Hex(super_hcphys), "?=", Genode::Hex((PGM_PAGE_GET_HCPHYS(page) & ~mask)), " "
+ "super_gcphys=", Genode::Hex(super_gcphys), "?=", Genode::Hex((gcpage & ~mask)), " ",
+ (int)(PGM_PAGE_GET_PDE_TYPE(page)), "?=", (int)PGM_PAGE_PDE_TYPE_PDE, " ",
+ (int)(PGM_PAGE_GET_TYPE(page)), "?=", (int)PGM_PAGE_GET_TYPE(pPage), " ",
+ (int)(PGM_PAGE_GET_STATE(page)), "?=", (int)PGM_PAGE_GET_STATE(pPage));
+ return _map_memory(vm_session, GCPhys, PGM_PAGE_GET_HCPHYS(pPage), 4096, writeable); /* one page mapping */
+ }
+ }
+
+ /* XXX revoke of old mappings required ? */
+ /* super page mapping */
+ return _map_memory(vm_session, super_gcphys, super_hcphys, 1UL << superpage_log2, writeable);
+}
+
+
+Genode::uint64_t * Vcpu_handler::pdpte_map(VM *pVM, RTGCPHYS cr3)
+{
+ Pgm_guard guard(*_vm);
+
+ PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, cr3);
+ Assert (pRam);
+
+ RTGCPHYS off = cr3 - pRam->GCPhys;
+ Assert (off < pRam->cb);
+
+ unsigned iPage = off >> PAGE_SHIFT;
+ PPGMPAGE pPage = &pRam->aPages[iPage];
+
+/*
+ if (VERBOSE_PGM)
+ Genode::log(__LINE__, " gcphys=", Genode::Hex(cr3),
+ " host=", Genode::Hex(PGM_PAGE_GET_HCPHYS(pPage)),
+ " type=", Genode::Hex(PGM_PAGE_GET_TYPE(pPage)),
+ " state=",Genode::Hex(PGM_PAGE_GET_STATE(pPage)));
+*/
+
+ Genode::uint64_t *pdpte = reinterpret_cast(PGM_PAGE_GET_HCPHYS(pPage) + (cr3 & PAGE_OFFSET_MASK));
+
+ Assert(pdpte != 0);
+
+ return pdpte;
+}
+
+static PFNRTTIMER rttimer_func = nullptr;
+static void * rttimer_obj = nullptr;
+
+enum {
+ UPDATE_HZ = 1000,
+ UPDATE_US = 1000 * 1000 / UPDATE_HZ,
+ UPDATE_NS = UPDATE_US * 1000,
+};
+
+
+PSUPGLOBALINFOPAGE g_pSUPGlobalInfoPage;
+
+
+class Periodic_gip
+{
+ private :
+
+ void update()
+ {
+ /**
+ * We're using rdtsc here since timer_session->elapsed_ms produces
+ * instable results when the timer service is using the Genode PIC
+ * driver.
+ */
+
+ Genode::uint64_t tsc_current = Genode::Trace::timestamp();
+
+ /*
+ * Convert tsc to nanoseconds.
+ *
+ * There is no 'uint128_t' type on x86_32, so we use the 128-bit type
+ * and functions provided by VirtualBox.
+ *
+ * nanots128 = tsc_current * 1000*1000*1000 / genode_cpu_hz()
+ *
+ */
+
+ RTUINT128U nanots128;
+ RTUInt128AssignU64(&nanots128, tsc_current);
+
+ RTUINT128U multiplier;
+ RTUInt128AssignU32(&multiplier, 1000*1000*1000);
+ RTUInt128AssignMul(&nanots128, &multiplier);
+
+ RTUINT128U divisor;
+ RTUInt128AssignU64(&divisor, genode_cpu_hz());
+ RTUInt128AssignDiv(&nanots128, &divisor);
+
+ SUPGIPCPU *cpu = &g_pSUPGlobalInfoPage->aCPUs[0];
+
+ /*
+ * Transaction id must be incremented before and after update,
+ * read struct SUPGIPCPU description for more details.
+ */
+ ASMAtomicIncU32(&cpu->u32TransactionId);
+
+ cpu->u64TSC = tsc_current;
+ cpu->u64NanoTS = nanots128.s.Lo;
+
+ /*
+ * Transaction id must be incremented before and after update,
+ * read struct SUPGIPCPU description for more details.
+ */
+ ASMAtomicIncU32(&cpu->u32TransactionId);
+
+ /* call the timer function of the RTTimerCreate call */
+ if (rttimer_func)
+ rttimer_func(nullptr, rttimer_obj, 0);
+
+ for (Vcpu_handler *vcpu_handler = vcpu_handler_list().first();
+ vcpu_handler;
+ vcpu_handler = vcpu_handler->next())
+ {
+ vcpu_handler->check_time();
+ }
+ }
+
+ public:
+
+ Timer::Connection _timer;
+ Genode::Signal_handler _timer_handler;
+
+ Periodic_gip(Genode::Env &env)
+ :
+ _timer(env),
+ _timer_handler(env.ep(), *this, &Periodic_gip::update)
+ {
+ _timer.sigh(_timer_handler);
+ _timer.trigger_periodic(UPDATE_US);
+ }
+};
+
+
+struct Attached_gip : Genode::Attached_ram_dataspace
+{
+ Attached_gip()
+ : Attached_ram_dataspace(genode_env().ram(), genode_env().rm(), PAGE_SIZE)
+ {
+ g_pSUPGlobalInfoPage = local_addr();
+
+ /* checked by TMR3Init */
+ g_pSUPGlobalInfoPage->u32Version = SUPGLOBALINFOPAGE_VERSION;
+ g_pSUPGlobalInfoPage->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
+ g_pSUPGlobalInfoPage->u32Mode = SUPGIPMODE_SYNC_TSC;
+ g_pSUPGlobalInfoPage->cCpus = 1;
+ g_pSUPGlobalInfoPage->cPages = 1;
+ g_pSUPGlobalInfoPage->u32UpdateHz = UPDATE_HZ;
+ g_pSUPGlobalInfoPage->u32UpdateIntervalNS = UPDATE_NS;
+ g_pSUPGlobalInfoPage->cOnlineCpus = 0;
+ g_pSUPGlobalInfoPage->cPresentCpus = 0;
+ g_pSUPGlobalInfoPage->cPossibleCpus = 0;
+ g_pSUPGlobalInfoPage->idCpuMax = 0;
+ g_pSUPGlobalInfoPage->u64CpuHz = genode_cpu_hz();
+ /* evaluated by rtTimeNanoTSInternalRediscover in Runtime/common/time/timesup.cpp */
+ g_pSUPGlobalInfoPage->fGetGipCpu = SUPGIPGETCPU_APIC_ID;
+
+ SUPGIPCPU *cpu = &g_pSUPGlobalInfoPage->aCPUs[0];
+
+ cpu->u32TransactionId = 0;
+ cpu->u32UpdateIntervalTSC = genode_cpu_hz() / UPDATE_HZ;
+ cpu->u64NanoTS = 0ULL;
+ cpu->u64TSC = 0ULL;
+ cpu->u64CpuHz = genode_cpu_hz();
+ cpu->cErrors = 0;
+ cpu->iTSCHistoryHead = 0;
+ cpu->u32PrevUpdateIntervalNS = UPDATE_NS;
+ cpu->enmState = SUPGIPCPUSTATE_ONLINE;
+ cpu->idCpu = 0;
+ cpu->iCpuSet = 0;
+ cpu->idApic = 0;
+
+ /* schedule periodic call of GIP update function */
+ static Periodic_gip periodic_gip(genode_env());
+ }
+};
+
+
+int RTTimerCreate(PRTTIMER *pptimer, unsigned ms, PFNRTTIMER func, void *obj)
+{
+ if (pptimer)
+ *pptimer = NULL;
+
+ /* used solely at one place in TM.cpp */
+ Assert(!rttimer_func);
+
+ /*
+ * Ignore (10) ms which is too high for audio. Instead the callback
+ * handler will run at UPDATE_HZ rate.
+ */
+ rttimer_func = func;
+ rttimer_obj = obj;
+
+ return VINF_SUCCESS;
+}
+
+
+int RTTimerDestroy(PRTTIMER)
+{
+ rttimer_obj = nullptr;
+ rttimer_func = nullptr;
+ return VINF_SUCCESS;
+}
+
+
+int SUPR3Init(PSUPDRVSESSION *ppSession)
+{
+ static Attached_gip gip;
+
+ return VINF_SUCCESS;
+}
+
+int SUPR3GipGetPhys(PRTHCPHYS pHCPhys)
+{
+ /*
+ * Return VMM-local address as physical address. This address is
+ * then fed to MMR3HyperMapHCPhys. (TMR3Init)
+ */
+ *pHCPhys = (RTHCPHYS)g_pSUPGlobalInfoPage;
+
+ return VINF_SUCCESS;
+}
diff --git a/repos/ports/src/virtualbox5/patches/iem_wip.patch b/repos/ports/src/virtualbox5/patches/iem_wip.patch
index 2fa84d133..469f4c77c 100644
--- a/repos/ports/src/virtualbox5/patches/iem_wip.patch
+++ b/repos/ports/src/virtualbox5/patches/iem_wip.patch
@@ -51,7 +51,7 @@
* @param pRam The RAM range.
*/
+#include
-+extern "C" bool PGMUnmapMemoryGenode(void *, size_t size);
++extern "C" bool PGMUnmapMemoryGenode(void *, RTGCPHYS, size_t size);
+
static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
{
@@ -64,7 +64,7 @@
+ if (pCur->cPages != pRam->cb / 4096)
+ Genode::warning("dubious ? phys=", Genode::Hex(pRam->GCPhys));
+
-+ PGMUnmapMemoryGenode(pRam->pvR3, pRam->cb);
++ PGMUnmapMemoryGenode(pRam->pvR3, pRam->GCPhys, pRam->cb);
+ }
+
if (fFlushTLBs)
diff --git a/repos/ports/src/virtualbox5/pgm.cc b/repos/ports/src/virtualbox5/pgm.cc
index 83ff12228..241a4010d 100644
--- a/repos/ports/src/virtualbox5/pgm.cc
+++ b/repos/ports/src/virtualbox5/pgm.cc
@@ -60,7 +60,8 @@ int PGMR3MapPT(PVM, RTGCPTR GCPtr, uint32_t cb, uint32_t fFlags,
int PGMR3MappingsSize(PVM pVM, uint32_t *pcb)
{
- Genode::log(__func__, ": not implemented ", __builtin_return_address(0));
+ if (verbose)
+ Genode::log(__func__, ": not implemented ", __builtin_return_address(0));
*pcb = 0;
diff --git a/repos/ports/src/virtualbox5/spec/nova/sup.cc b/repos/ports/src/virtualbox5/spec/nova/sup.cc
index 90e2cd402..bcd8e0a62 100644
--- a/repos/ports/src/virtualbox5/spec/nova/sup.cc
+++ b/repos/ports/src/virtualbox5/spec/nova/sup.cc
@@ -47,7 +47,7 @@
#include "mm.h"
-extern "C" bool PGMUnmapMemoryGenode(void *, ::size_t);
+extern "C" bool PGMUnmapMemoryGenode(void *, RTGCPHYS, ::size_t);
extern "C" void PGMFlushVMMemory();
@@ -650,7 +650,7 @@ int SUPR3CallVMMR0Ex(PVMR0 pVMR0, VMCPUID idCpu, unsigned uOperation,
if (last_chunk != chunkid) {
/* revoke mapping from guest VM */
void * vmm_local = reinterpret_cast(vm_memory().local_addr(chunkid << GMM_CHUNK_SHIFT));
- PGMUnmapMemoryGenode(vmm_local, GMM_CHUNK_SIZE);
+ PGMUnmapMemoryGenode(vmm_local, 0, GMM_CHUNK_SIZE);
last_chunk = chunkid;
}
@@ -732,7 +732,7 @@ void genode_update_tsc(void (*update_func)(void), Genode::uint64_t update_us)
}
-bool PGMUnmapMemoryGenode(void * vmm_local, ::size_t size)
+bool PGMUnmapMemoryGenode(void * vmm_local, RTGCPHYS, ::size_t size)
{
Assert(vmm_local);
@@ -761,7 +761,7 @@ bool PGMUnmapMemoryGenode(void * vmm_local, ::size_t size)
extern "C" void PGMFlushVMMemory()
{
- PGMUnmapMemoryGenode((void *)vm_memory().local_addr(0), MAX_VM_MEMORY);
+ PGMUnmapMemoryGenode((void *)vm_memory().local_addr(0), 0, MAX_VM_MEMORY);
}
@@ -782,7 +782,7 @@ bool create_emt_vcpu(pthread_t * pthread, ::size_t stack,
void *(*start_routine)(void *), void *arg,
Genode::Cpu_session * cpu_session,
Genode::Affinity::Location location,
- unsigned int cpu_id, const char * name)
+ unsigned int cpu_id, const char * name, long)
{
Genode::Xml_node const features = platform_rom().sub_node("features");
bool const svm = features.attribute_value("svm", false);
diff --git a/repos/ports/src/virtualbox5/sup.cc b/repos/ports/src/virtualbox5/spec/nova/sup_old.cc
similarity index 99%
rename from repos/ports/src/virtualbox5/sup.cc
rename to repos/ports/src/virtualbox5/spec/nova/sup_old.cc
index 02e08503d..64d734532 100644
--- a/repos/ports/src/virtualbox5/sup.cc
+++ b/repos/ports/src/virtualbox5/spec/nova/sup_old.cc
@@ -386,6 +386,6 @@ HRESULT genode_check_memory_config(ComObjPtr,
return S_OK;
Genode::error("Available memory too low to start the VM - available: ",
- memory_vmm, "MB < ", memory_available, "MB requested");
+ memory_available, " MB < ", memory_vmm, " MB requested");
return E_FAIL;
}
diff --git a/repos/ports/src/virtualbox5/sup.h b/repos/ports/src/virtualbox5/sup.h
index 78b8de7a4..c07a14571 100644
--- a/repos/ports/src/virtualbox5/sup.h
+++ b/repos/ports/src/virtualbox5/sup.h
@@ -37,7 +37,7 @@ bool create_emt_vcpu(pthread_t * pthread, size_t stack,
Genode::Cpu_session * cpu_session,
Genode::Affinity::Location location,
unsigned int cpu_id,
- const char * name);
+ const char * name, long prio);
uint64_t genode_cpu_hz();
diff --git a/repos/ports/src/virtualbox5/svm.h b/repos/ports/src/virtualbox5/svm.h
new file mode 100644
index 000000000..e7b1e0e30
--- /dev/null
+++ b/repos/ports/src/virtualbox5/svm.h
@@ -0,0 +1,101 @@
+/*
+ * \brief Genode specific VirtualBox SUPLib supplements
+ * \author Norman Feske
+ * \author Alexander Boettcher
+ */
+
+/*
+ * Copyright (C) 2013-2019 Genode Labs GmbH
+ *
+ * This file is distributed under the terms of the GNU General Public License
+ * version 2.
+ */
+
+#ifndef _VIRTUALBOX__SVM_H_
+#define _VIRTUALBOX__SVM_H_
+
+/* based on HWSVMR0.h - adjusted to Genode */
+
+#define GENODE_SVM_ASSERT_SELREG(REG) \
+ AssertMsg(!pCtx->REG.Attr.n.u1Present || \
+ (pCtx->REG.Attr.n.u1Granularity \
+ ? (pCtx->REG.u32Limit & 0xfffU) == 0xfffU \
+ : pCtx->REG.u32Limit <= 0xfffffU), \
+ ("%u %u %#x %#x %#llx\n", pCtx->REG.Attr.n.u1Present, \
+ pCtx->REG.Attr.n.u1Granularity, pCtx->REG.u32Limit, \
+ pCtx->REG.Attr.u, pCtx->REG.u64Base))
+
+#define GENODE_READ_SELREG(REG) \
+ pCtx->REG.Sel = state->REG.value().sel; \
+ pCtx->REG.ValidSel = state->REG.value().sel; \
+ pCtx->REG.fFlags = CPUMSELREG_FLAGS_VALID; \
+ pCtx->REG.u32Limit = state->REG.value().limit; \
+ pCtx->REG.u64Base = state->REG.value().base; \
+ pCtx->REG.Attr.u = sel_ar_conv_from_genode(state->REG.value().ar)
+
+static inline bool svm_save_state(Genode::Vm_state * state, VM * pVM, PVMCPU pVCpu)
+{
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+
+ GENODE_READ_SELREG(cs);
+ GENODE_READ_SELREG(ds);
+ GENODE_READ_SELREG(es);
+ GENODE_READ_SELREG(fs);
+ GENODE_READ_SELREG(gs);
+ GENODE_READ_SELREG(ss);
+
+ GENODE_SVM_ASSERT_SELREG(cs);
+ GENODE_SVM_ASSERT_SELREG(ds);
+ GENODE_SVM_ASSERT_SELREG(es);
+ GENODE_SVM_ASSERT_SELREG(fs);
+ GENODE_SVM_ASSERT_SELREG(gs);
+ GENODE_SVM_ASSERT_SELREG(ss);
+
+ GENODE_READ_SELREG(ldtr);
+ GENODE_READ_SELREG(tr);
+
+ return true;
+}
+
+#undef GENODE_ASSERT_SELREG
+#undef GENODE_READ_SELREG
+
+
+
+
+#define GENODE_WRITE_SELREG(REG) \
+ Assert(pCtx->REG.fFlags & CPUMSELREG_FLAGS_VALID); \
+ Assert(pCtx->REG.ValidSel == pCtx->REG.Sel); \
+ state->REG.value(Segment{pCtx->REG.Sel, sel_ar_conv_to_genode(pCtx->REG.Attr.u), \
+ pCtx->REG.u32Limit, pCtx->REG.u64Base});
+
+static inline bool svm_load_state(Genode::Vm_state * state, VM * pVM, PVMCPU pVCpu)
+{
+ typedef Genode::Vm_state::Segment Segment;
+
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+
+ state->efer.value(pCtx->msrEFER | MSR_K6_EFER_SVME);
+ /* unimplemented */
+ if (CPUMIsGuestInLongModeEx(pCtx))
+ return false;
+ state->efer.value(state->efer.value() & ~MSR_K6_EFER_LME);
+
+ GENODE_WRITE_SELREG(es);
+ GENODE_WRITE_SELREG(ds);
+
+ GENODE_WRITE_SELREG(fs);
+ GENODE_WRITE_SELREG(gs);
+
+ GENODE_WRITE_SELREG(cs);
+ GENODE_WRITE_SELREG(ss);
+
+ GENODE_WRITE_SELREG(ldtr);
+ GENODE_WRITE_SELREG(tr);
+
+ return true;
+}
+
+#undef GENODE_WRITE_SELREG
+
+#endif /* _VIRTUALBOX__SVM_H_ */
diff --git a/repos/ports/src/virtualbox5/target.inc b/repos/ports/src/virtualbox5/target.inc
index 73adb0a18..0049bea47 100644
--- a/repos/ports/src/virtualbox5/target.inc
+++ b/repos/ports/src/virtualbox5/target.inc
@@ -8,7 +8,7 @@ CC_WARN += -Wall
SRC_CC = frontend/main.cc frontend/console.cc \
frontend/VirtualBoxErrorInfoImpl.cpp \
devices.cc drivers.cc dummies.cc libc.cc \
- logger.cc mm.cc pdm.cc pgm.cc rt.cc sup.cc \
+ logger.cc mm.cc pdm.cc pgm.cc rt.cc \
hm.cc thread.cc dynlib.cc unimpl.cc
LIBS += base
diff --git a/repos/ports/src/virtualbox5/target.mk b/repos/ports/src/virtualbox5/target.mk
index a7f7d55d5..e191b5d88 100644
--- a/repos/ports/src/virtualbox5/target.mk
+++ b/repos/ports/src/virtualbox5/target.mk
@@ -1,7 +1,10 @@
-TARGET = virtualbox5-rem
-
-LIBS += virtualbox5-hwaccl-off
+TARGET = virtualbox5
include $(REP_DIR)/src/virtualbox5/target.inc
+LIBS += virtualbox5
+
+vpath frontend/% $(REP_DIR)/src/virtualbox5/
+vpath %.cc $(REP_DIR)/src/virtualbox5/
+
CC_CXX_WARN_STRICT =
diff --git a/repos/ports/src/virtualbox5/thread.cc b/repos/ports/src/virtualbox5/thread.cc
index 5c9952fef..176e23faa 100644
--- a/repos/ports/src/virtualbox5/thread.cc
+++ b/repos/ports/src/virtualbox5/thread.cc
@@ -31,7 +31,18 @@
/* vbox */
#include
-static Genode::Cpu_connection * cpu_connection(RTTHREADTYPE type) {
+static long prio_class(RTTHREADTYPE const type)
+{
+ unsigned const VIRTUAL_GENODE_VBOX_LEVELS = 16;
+ static_assert (RTTHREADTYPE_END < VIRTUAL_GENODE_VBOX_LEVELS,
+ "prio levels exceeds VIRTUAL_GENODE_VBOX_LEVELS");
+
+ return (VIRTUAL_GENODE_VBOX_LEVELS - type) *
+ Genode::Cpu_session::PRIORITY_LIMIT / VIRTUAL_GENODE_VBOX_LEVELS;
+}
+
+static Genode::Cpu_connection * cpu_connection(RTTHREADTYPE type)
+{
using namespace Genode;
static Cpu_connection * con[RTTHREADTYPE_END - 1];
@@ -44,18 +55,12 @@ static Genode::Cpu_connection * cpu_connection(RTTHREADTYPE type) {
if (con[type - 1])
return con[type - 1];
- unsigned const VIRTUAL_GENODE_VBOX_LEVELS = 16;
- static_assert (RTTHREADTYPE_END < VIRTUAL_GENODE_VBOX_LEVELS,
- "prio levels exceeds VIRTUAL_GENODE_VBOX_LEVELS");
-
- long const prio = (VIRTUAL_GENODE_VBOX_LEVELS - type) *
- Cpu_session::PRIORITY_LIMIT / VIRTUAL_GENODE_VBOX_LEVELS;
-
char * data = new (vmm_heap()) char[16];
Genode::snprintf(data, 16, "vbox %u", type);
- con[type - 1] = new (vmm_heap()) Cpu_connection(genode_env(), data, prio);
+ con[type - 1] = new (vmm_heap()) Cpu_connection(genode_env(), data,
+ prio_class(type));
return con[type - 1];
}
@@ -90,7 +95,8 @@ static int create_thread(pthread_t *thread, const pthread_attr_t *attr,
Genode::Affinity::Location location(space.location_of_index(cpu_id));
if (create_emt_vcpu(thread, stack_size, start_routine, arg,
- cpu_session, location, cpu_id, rtthread->szName))
+ cpu_session, location, cpu_id, rtthread->szName,
+ prio_class(rtthread->enmType)))
return 0;
/*
* The virtualization layer had no need to setup the EMT
diff --git a/repos/ports/src/virtualbox5/vcpu.h b/repos/ports/src/virtualbox5/vcpu.h
new file mode 100644
index 000000000..23b2d3873
--- /dev/null
+++ b/repos/ports/src/virtualbox5/vcpu.h
@@ -0,0 +1,884 @@
+/*
+ * \brief Genode VirtualBox SUPLib supplements
+ * \author Alexander Boettcher
+ * \author Norman Feske
+ * \author Christian Helmuth
+ */
+
+/*
+ * Copyright (C) 2013-2017 Genode Labs GmbH
+ *
+ * This file is distributed under the terms of the GNU General Public License
+ * version 2.
+ */
+
+#ifndef _VIRTUALBOX__VCPU_H_
+#define _VIRTUALBOX__VCPU_H_
+
+/* Genode includes */
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+/* VirtualBox includes */
+#include "PGMInternal.h" /* enable access to pgm.s.* */
+
+#include "HMInternal.h" /* enable access to hm.s.* */
+#include "CPUMInternal.h" /* enable access to cpum.s.* */
+
+#include
+#include
+#include
+
+#include
+
+#include
+
+/* Genode's VirtualBox includes */
+#include "sup.h"
+
+/* Genode libc pthread binding */
+#include "thread.h"
+
+/* LibC includes */
+#include
+
+#include
+
+static bool debug_map_memory = false;
+
+/*
+ * VirtualBox stores segment attributes in Intel format using a 32-bit
+ * value. Genode represents the attributes in packed format using a 16-bit
+ * value.
+ */
+static inline Genode::uint16_t sel_ar_conv_to_genode(Genode::uint32_t v)
+{
+ return (v & 0xff) | ((v & 0x1f000) >> 4);
+}
+
+
+static inline Genode::uint32_t sel_ar_conv_from_genode(Genode::uint16_t v)
+{
+ return (v & 0xff) | (((uint32_t )v << 4) & 0x1f000);
+}
+
+namespace Vcpu_sync
+{
+ struct Session : Genode::Session
+ {
+ GENODE_RPC(Rpc_resume, void, resume);
+ GENODE_RPC(Rpc_request_pause, void, request_pause);
+ GENODE_RPC_INTERFACE(Rpc_resume, Rpc_request_pause);
+ };
+
+ struct Client : Genode::Rpc_client
+ {
+ Client(Genode::Capability cap) : Rpc_client(cap) { }
+
+ void resume() { call(); }
+ void request_pause() { call(); }
+ };
+};
+
+class Vcpu_handler : public Genode::List::Element,
+ public Genode::Rpc_object
+{
+ protected:
+
+ Genode::Entrypoint _ep;
+ Genode::Lock _lock;
+ Genode::Vm_state *_state { nullptr };
+
+ /* halt / wakeup handling with timeout support */
+ Genode::Lock _r0_block_guard;
+ Genode::Semaphore _r0_block;
+ Genode::uint64_t _r0_wakeup_abs { 0 };
+
+ /* information used for NPT/EPT handling */
+ Genode::addr_t npt_ept_exit_addr { 0 };
+ RTGCUINT npt_ept_errorcode { 0 };
+ bool npt_ept_unmap { false };
+
+ /* state machine between EMT and EP thread of a vCPU */
+ enum State { RUNNING, PAUSED, IRQ_WIN, NPT_EPT } _vm_state { PAUSED };
+
+ Vcpu_sync::Client _ep_emt;
+
+ private:
+
+ X86FXSTATE _guest_fpu_state __attribute__((aligned(0x10)));
+
+ bool _irq_win = false;
+
+ unsigned const _cpu_id;
+ PVM _vm { nullptr };
+ PVMCPU _vcpu { nullptr };
+
+ unsigned int _last_inj_info = 0;
+ unsigned int _last_inj_error = 0;
+
+ void fpu_save(char * data) {
+ Assert(!(reinterpret_cast(data) & 0xF));
+ asm volatile ("fxsave %0" : "=m" (*data));
+ }
+
+ void fpu_load(char * data) {
+ Assert(!(reinterpret_cast(data) & 0xF));
+ asm volatile ("fxrstor %0" : : "m" (*data));
+ }
+
+ enum {
+ REQ_IRQWIN_EXIT = 0x1000U,
+ IRQ_INJ_VALID_MASK = 0x80000000UL,
+ IRQ_INJ_NONE = 0U,
+
+ /*
+ * Intel® 64 and IA-32 Architectures Software Developer’s Manual
+ * Volume 3C, Chapter 24.4.2.
+ * May 2012
+ */
+ BLOCKING_BY_STI = 1U << 0,
+ BLOCKING_BY_MOV_SS = 1U << 1,
+ ACTIVITY_STATE_ACTIVE = 0U,
+ INTERRUPT_STATE_NONE = 0U,
+ };
+
+ /*
+ * 'longjmp()' restores some FPU registers saved by 'setjmp()',
+ * so we need to save the guest FPU state before calling 'longjmp()'
+ */
+ __attribute__((noreturn)) void _fpu_save_and_longjmp()
+ {
+ fpu_save(reinterpret_cast(&_guest_fpu_state));
+ }
+
+ protected:
+
+ int map_memory(Genode::Vm_connection &vm_session,
+ RTGCPHYS GCPhys, RTGCUINT vbox_fault_reason);
+
+ Genode::addr_t _vm_exits = 0;
+ Genode::addr_t _recall_skip = 0;
+ Genode::addr_t _recall_req = 0;
+ Genode::addr_t _recall_inv = 0;
+ Genode::addr_t _recall_drop = 0;
+ Genode::addr_t _irq_request = 0;
+ Genode::addr_t _irq_inject = 0;
+ Genode::addr_t _irq_drop = 0;
+
+ struct {
+ unsigned intr_state;
+ unsigned ctrl[2];
+ } next_utcb;
+
+ unsigned _ept_fault_addr_type;
+
+ Genode::uint64_t * pdpte_map(VM *pVM, RTGCPHYS cr3);
+
+ void switch_to_hw()
+ {
+ again:
+
+ _ep_emt.resume();
+
+ /* wait for next exit */
+ _lock.lock();
+
+ if (_vm_state == IRQ_WIN) {
+ *_state = Genode::Vm_state {}; /* reset */
+ _irq_window_pthread();
+ goto again;
+ } else
+ if (_vm_state == NPT_EPT) {
+ if (npt_ept_unmap) {
+ Genode::error("NPT/EPT unmap not supported - stop");
+ while (true) {
+ _lock.lock();
+ }
+ }
+
+ Genode::addr_t const gp_map_addr = npt_ept_exit_addr & ~((1UL << 12) - 1);
+ int res = attach_memory_to_vm(gp_map_addr, npt_ept_errorcode);
+ if (res == VINF_SUCCESS) {
+ *_state = Genode::Vm_state {}; /* reset */
+ goto again;
+ }
+ }
+
+ if (!(_vm_state == PAUSED || _vm_state == NPT_EPT))
+ Genode::error("which state we are ? ", (int)_vm_state, " ", Genode::Thread::myself()->name());
+
+ Assert(_vm_state == PAUSED || _vm_state == NPT_EPT);
+ }
+
+ void _default_handler()
+ {
+ if (_vm_state != RUNNING)
+ Genode::error(__func__, " _vm_state=", (int)_vm_state, " exit_reason=", Genode::Hex(_state->exit_reason));
+ Assert(_vm_state == RUNNING);
+
+ Assert(_state->actv_state.value() == ACTIVITY_STATE_ACTIVE);
+ Assert(!(_state->inj_info.value() & IRQ_INJ_VALID_MASK));
+
+ _vm_exits ++;
+
+ _vm_state = PAUSED;
+
+ _lock.unlock();
+ }
+
+ void _recall_handler()
+ {
+ if (_vm_state != RUNNING)
+ Genode::error(__func__, " _vm_state=", (int)_vm_state, " exit_reason=", Genode::Hex(_state->exit_reason));
+ Assert(_vm_state == RUNNING);
+
+ _vm_exits ++;
+ _recall_inv ++;
+
+ Assert(_state->actv_state.value() == ACTIVITY_STATE_ACTIVE);
+
+ if (_state->inj_info.value() & IRQ_INJ_VALID_MASK) {
+
+ Assert(_state->flags.value() & X86_EFL_IF);
+
+ if (_state->intr_state.value() != INTERRUPT_STATE_NONE)
+ Genode::log("intr state ", Genode::Hex(_state->intr_state.value()),
+ " ", Genode::Hex(_state->intr_state.value() & 0xf));
+
+ Assert(_state->intr_state.value() == INTERRUPT_STATE_NONE);
+
+ if (!continue_hw_accelerated())
+ _recall_drop ++;
+
+ /* got recall during irq injection and the guest is ready for
+ * delivery of IRQ - just continue */
+ run_vm();
+ return;
+ }
+
+ /* are we forced to go back to emulation mode ? */
+ if (!continue_hw_accelerated()) {
+ /* go back to emulation mode */
+ _default_handler();
+ return;
+ }
+
+ /* check whether we have to request irq injection window */
+ if (check_to_request_irq_window(_vcpu)) {
+ *_state = Genode::Vm_state {}; /* reset */
+ _state->inj_info.value(_state->inj_info.value());
+ _irq_win = true;
+ run_vm();
+ return;
+ }
+
+ _default_handler();
+ return;
+ }
+
+ inline bool vbox_to_state(VM *pVM, PVMCPU pVCpu)
+ {
+ typedef Genode::Vm_state::Range Range;
+
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+
+ _state->ip.value(pCtx->rip);
+ _state->sp.value(pCtx->rsp);
+
+ _state->ax.value(pCtx->rax);
+ _state->bx.value(pCtx->rbx);
+ _state->cx.value(pCtx->rcx);
+ _state->dx.value(pCtx->rdx);
+
+ _state->bp.value(pCtx->rbp);
+ _state->si.value(pCtx->rsi);
+ _state->di.value(pCtx->rdi);
+
+ _state->r8.value(pCtx->r8);
+ _state->r9.value(pCtx->r9);
+ _state->r10.value(pCtx->r10);
+ _state->r11.value(pCtx->r11);
+ _state->r12.value(pCtx->r12);
+ _state->r13.value(pCtx->r13);
+ _state->r14.value(pCtx->r14);
+ _state->r15.value(pCtx->r15);
+
+ _state->flags.value(pCtx->rflags.u);
+
+ _state->sysenter_cs.value(pCtx->SysEnter.cs);
+ _state->sysenter_sp.value(pCtx->SysEnter.esp);
+ _state->sysenter_ip.value(pCtx->SysEnter.eip);
+
+ _state->dr7.value(pCtx->dr[7]);
+
+ _state->cr0.value(pCtx->cr0);
+ _state->cr2.value(pCtx->cr2);
+ _state->cr3.value(pCtx->cr3);
+ _state->cr4.value(pCtx->cr4);
+
+ _state->idtr.value(Range{pCtx->idtr.pIdt, pCtx->idtr.cbIdt});
+ _state->gdtr.value(Range{pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt});
+
+ _state->efer.value(CPUMGetGuestEFER(pVCpu));
+
+ /*
+ * Update the PDPTE registers if necessary
+ *
+ * Intel manual sections 4.4.1 of Vol. 3A and 26.3.2.4 of Vol. 3C
+ * indicate the conditions when this is the case. The following
+ * code currently does not check if the recompiler modified any
+ * CR registers, which means the update can happen more often
+ * than really necessary.
+ */
+ if (pVM->hm.s.vmx.fSupported &&
+ CPUMIsGuestPagingEnabledEx(pCtx) &&
+ CPUMIsGuestInPAEModeEx(pCtx)) {
+
+ Genode::uint64_t *pdpte = pdpte_map(pVM, pCtx->cr3);
+
+ _state->pdpte_0.value(pdpte[0]);
+ _state->pdpte_1.value(pdpte[1]);
+ _state->pdpte_2.value(pdpte[2]);
+ _state->pdpte_3.value(pdpte[3]);
+ }
+
+ _state->star.value(pCtx->msrSTAR);
+ _state->lstar.value(pCtx->msrLSTAR);
+ _state->fmask.value(pCtx->msrSFMASK);
+ _state->kernel_gs_base.value(pCtx->msrKERNELGSBASE);
+
+ /* from HMVMXR0.cpp */
+ bool interrupt_pending = false;
+ uint8_t tpr = 0;
+ uint8_t pending_interrupt = 0;
+ PDMApicGetTPR(pVCpu, &tpr, &interrupt_pending, &pending_interrupt);
+
+ _state->tpr.value(tpr);
+ _state->tpr_threshold.value(0);
+
+ if (interrupt_pending) {
+ const uint8_t pending_priority = (pending_interrupt >> 4) & 0xf;
+ const uint8_t tpr_priority = (tpr >> 4) & 0xf;
+ if (pending_priority <= tpr_priority)
+ _state->tpr_threshold.value(pending_priority);
+ else
+ _state->tpr_threshold.value(tpr_priority);
+ }
+
+ return true;
+ }
+
+
+ inline bool state_to_vbox(VM *pVM, PVMCPU pVCpu)
+ {
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+
+ pCtx->rip = _state->ip.value();
+ pCtx->rsp = _state->sp.value();
+
+ pCtx->rax = _state->ax.value();
+ pCtx->rbx = _state->bx.value();
+ pCtx->rcx = _state->cx.value();
+ pCtx->rdx = _state->dx.value();
+
+ pCtx->rbp = _state->bp.value();
+ pCtx->rsi = _state->si.value();
+ pCtx->rdi = _state->di.value();
+ pCtx->rflags.u = _state->flags.value();
+
+ pCtx->r8 = _state->r8.value();
+ pCtx->r9 = _state->r9.value();
+ pCtx->r10 = _state->r10.value();
+ pCtx->r11 = _state->r11.value();
+ pCtx->r12 = _state->r12.value();
+ pCtx->r13 = _state->r13.value();
+ pCtx->r14 = _state->r14.value();
+ pCtx->r15 = _state->r15.value();
+
+ pCtx->dr[7] = _state->dr7.value();
+
+ if (pCtx->SysEnter.cs != _state->sysenter_cs.value())
+ CPUMSetGuestMsr(pVCpu, MSR_IA32_SYSENTER_CS, _state->sysenter_cs.value());
+
+ if (pCtx->SysEnter.esp != _state->sysenter_sp.value())
+ CPUMSetGuestMsr(pVCpu, MSR_IA32_SYSENTER_ESP, _state->sysenter_sp.value());
+
+ if (pCtx->SysEnter.eip != _state->sysenter_ip.value())
+ CPUMSetGuestMsr(pVCpu, MSR_IA32_SYSENTER_EIP, _state->sysenter_ip.value());
+
+ if (pCtx->idtr.cbIdt != _state->idtr.value().limit ||
+ pCtx->idtr.pIdt != _state->idtr.value().base)
+ CPUMSetGuestIDTR(pVCpu, _state->idtr.value().base, _state->idtr.value().limit);
+
+ if (pCtx->gdtr.cbGdt != _state->gdtr.value().limit ||
+ pCtx->gdtr.pGdt != _state->gdtr.value().base)
+ CPUMSetGuestGDTR(pVCpu, _state->gdtr.value().base, _state->gdtr.value().limit);
+
+ CPUMSetGuestEFER(pVCpu, _state->efer.value());
+
+ if (pCtx->cr0 != _state->cr0.value())
+ CPUMSetGuestCR0(pVCpu, _state->cr0.value());
+
+ if (pCtx->cr2 != _state->cr2.value())
+ CPUMSetGuestCR2(pVCpu, _state->cr2.value());
+
+ if (pCtx->cr3 != _state->cr3.value()) {
+ CPUMSetGuestCR3(pVCpu, _state->cr3.value());
+ VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
+ }
+
+ if (pCtx->cr4 != _state->cr4.value())
+ CPUMSetGuestCR4(pVCpu, _state->cr4.value());
+
+ if (pCtx->msrSTAR != _state->star.value())
+ CPUMSetGuestMsr(pVCpu, MSR_K6_STAR, _state->star.value());
+
+ if (pCtx->msrLSTAR != _state->lstar.value())
+ CPUMSetGuestMsr(pVCpu, MSR_K8_LSTAR, _state->lstar.value());
+
+ if (pCtx->msrSFMASK != _state->fmask.value())
+ CPUMSetGuestMsr(pVCpu, MSR_K8_SF_MASK, _state->fmask.value());
+
+ if (pCtx->msrKERNELGSBASE != _state->kernel_gs_base.value())
+ CPUMSetGuestMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, _state->kernel_gs_base.value());
+
+ const uint32_t tpr = _state->tpr.value();
+
+ /* reset message transfer descriptor for next invocation */
+ Assert (!(_state->inj_info.value() & IRQ_INJ_VALID_MASK));
+ next_utcb.intr_state = _state->intr_state.value();
+ next_utcb.ctrl[0] = _state->ctrl_primary.value();
+ next_utcb.ctrl[1] = _state->ctrl_secondary.value();
+
+ if (next_utcb.intr_state & 3) {
+ next_utcb.intr_state &= ~3U;
+ }
+
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
+
+ CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
+ pVCpu->cpum.s.fUseFlags |= (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_SINCE_REM);
+
+ if (_state->intr_state.value() != 0) {
+ Assert(_state->intr_state.value() == BLOCKING_BY_STI ||
+ _state->intr_state.value() == BLOCKING_BY_MOV_SS);
+ EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
+ } else
+ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
+
+ PDMApicSetTPR(pVCpu, tpr);
+
+ return true;
+ }
+
+
+ inline bool check_to_request_irq_window(PVMCPU pVCpu)
+ {
+ if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
+ return false;
+
+ if (!TRPMHasTrap(pVCpu) &&
+ !VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC |
+ VMCPU_FF_INTERRUPT_PIC)))
+ return false;
+
+ _irq_request++;
+
+ unsigned const vector = 0;
+ _state->inj_info.value(REQ_IRQWIN_EXIT | vector);
+
+ return true;
+ }
+
+
+ void _irq_window()
+ {
+ if (_vm_state != RUNNING)
+ Genode::error(__func__, " _vm_state=", (int)_vm_state, " exit_reason=", Genode::Hex(_state->exit_reason));
+ Assert(_vm_state == RUNNING);
+
+ _vm_exits ++;
+
+ _vm_state = IRQ_WIN;
+ _lock.unlock();
+ }
+
+ void _npt_ept()
+ {
+ if (_vm_state != RUNNING)
+ Genode::error(__func__, " _vm_state=", (int)_vm_state, " exit_reason=", Genode::Hex(_state->exit_reason));
+ Assert(_vm_state == RUNNING);
+
+ _vm_exits ++;
+
+ _vm_state = NPT_EPT;
+ _lock.unlock();
+ }
+
+ void _irq_window_pthread()
+ {
+ PVMCPU pVCpu = _vcpu;
+
+ Assert(_state->intr_state.value() == INTERRUPT_STATE_NONE);
+ Assert(_state->flags.value() & X86_EFL_IF);
+ Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
+ Assert(!(_state->inj_info.value() & IRQ_INJ_VALID_MASK));
+
+ Assert(_irq_win);
+
+ _irq_win = false;
+
+ /* request current tpr state from guest, it may block IRQs */
+ PDMApicSetTPR(pVCpu, _state->tpr_threshold.value());
+
+ if (!TRPMHasTrap(pVCpu)) {
+
+ bool res = VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
+ if (res)
+ Genode::log("NMI was set");
+
+ if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC |
+ VMCPU_FF_INTERRUPT_PIC))) {
+
+ uint8_t irq;
+ int rc = PDMGetInterrupt(pVCpu, &irq);
+ Assert(RT_SUCCESS(rc));
+
+ rc = TRPMAssertTrap(pVCpu, irq, TRPM_HARDWARE_INT);
+ Assert(RT_SUCCESS(rc));
+ }
+
+ if (!TRPMHasTrap(pVCpu)) {
+ _irq_drop++;
+ /* happens if PDMApicSetTPR (see above) mask IRQ */
+ _state->inj_info.value(IRQ_INJ_NONE);
+ Genode::error("virq window pthread aaaaaaa while loop");
+ return;
+ }
+ }
+ _irq_inject++;
+
+ /*
+ * If we have no IRQ for injection, something with requesting the
+ * IRQ window went wrong. Probably it was forgotten to be reset.
+ */
+ Assert(TRPMHasTrap(pVCpu));
+
+ /* interrupt can be dispatched */
+ uint8_t u8Vector;
+ TRPMEVENT enmType;
+ SVMEVENT Event;
+ RTGCUINT u32ErrorCode;
+ RTGCUINTPTR GCPtrFaultAddress;
+ uint8_t cbInstr;
+
+ Event.u = 0;
+
+ /* If a new event is pending, then dispatch it now. */
+ int rc = TRPMQueryTrapAll(pVCpu, &u8Vector, &enmType, &u32ErrorCode, 0, 0);
+ AssertRC(rc);
+ Assert(enmType == TRPM_HARDWARE_INT);
+ Assert(u8Vector != X86_XCPT_NMI);
+
+ /* Clear the pending trap. */
+ rc = TRPMResetTrap(pVCpu);
+ AssertRC(rc);
+
+ Event.n.u8Vector = u8Vector;
+ Event.n.u1Valid = 1;
+ Event.n.u32ErrorCode = u32ErrorCode;
+
+ Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
+
+ _state->inj_info.value(Event.u);
+ _state->inj_error.value(Event.n.u32ErrorCode);
+
+ _last_inj_info = _state->inj_info.value();
+ _last_inj_error = _state->inj_error.value();
+
+/*
+ Genode::log("type:info:vector ", Genode::Hex(Event.n.u3Type),
+ Genode::Hex(utcb->inj_info), Genode::Hex(u8Vector),
+ " intr:actv - ", Genode::Hex(utcb->intr_state),
+ Genode::Hex(utcb->actv_state), " mtd ",
+ Genode::Hex(utcb->mtd));
+*/
+ }
+
+
+ inline bool continue_hw_accelerated(bool verbose = false)
+ {
+ uint32_t check_vm = VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST
+ | VM_FF_PGM_POOL_FLUSH_PENDING
+ | VM_FF_PDM_DMA;
+ uint32_t check_vcpu = VMCPU_FF_HM_TO_R3_MASK
+ | VMCPU_FF_PGM_SYNC_CR3
+ | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
+ | VMCPU_FF_REQUEST;
+
+ if (!VM_FF_IS_PENDING(_vm, check_vm) &&
+ !VMCPU_FF_IS_PENDING(_vcpu, check_vcpu))
+ return true;
+
+ Assert(!(VM_FF_IS_PENDING(_vm, VM_FF_PGM_NO_MEMORY)));
+
+#define VERBOSE_VM(flag) \
+ do { \
+ if (VM_FF_IS_PENDING(_vm, flag)) \
+ Genode::log("flag ", flag, " pending"); \
+ } while (0)
+
+#define VERBOSE_VMCPU(flag) \
+ do { \
+ if (VMCPU_FF_IS_PENDING(_vcpu, flag)) \
+ Genode::log("flag ", flag, " pending"); \
+ } while (0)
+
+ if (verbose) {
+ /*
+ * VM_FF_HM_TO_R3_MASK
+ */
+ VERBOSE_VM(VM_FF_TM_VIRTUAL_SYNC);
+ VERBOSE_VM(VM_FF_PGM_NEED_HANDY_PAGES);
+ /* handled by the assertion above */
+ /* VERBOSE_VM(VM_FF_PGM_NO_MEMORY); */
+ VERBOSE_VM(VM_FF_PDM_QUEUES);
+ VERBOSE_VM(VM_FF_EMT_RENDEZVOUS);
+
+ VERBOSE_VM(VM_FF_REQUEST);
+ VERBOSE_VM(VM_FF_PGM_POOL_FLUSH_PENDING);
+ VERBOSE_VM(VM_FF_PDM_DMA);
+
+ /*
+ * VMCPU_FF_HM_TO_R3_MASK
+ */
+ VERBOSE_VMCPU(VMCPU_FF_TO_R3);
+ /* when this flag gets set, a recall request follows */
+ /* VERBOSE_VMCPU(VMCPU_FF_TIMER); */
+ VERBOSE_VMCPU(VMCPU_FF_PDM_CRITSECT);
+
+ VERBOSE_VMCPU(VMCPU_FF_PGM_SYNC_CR3);
+ VERBOSE_VMCPU(VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
+ VERBOSE_VMCPU(VMCPU_FF_REQUEST);
+ }
+
+#undef VERBOSE_VMCPU
+#undef VERBOSE_VM
+
+ return false;
+ }
+
+ virtual bool hw_load_state(Genode::Vm_state *, VM *, PVMCPU) = 0;
+ virtual bool hw_save_state(Genode::Vm_state *, VM *, PVMCPU) = 0;
+ virtual int vm_exit_requires_instruction_emulation(PCPUMCTX) = 0;
+
+ virtual void run_vm() = 0;
+ virtual void pause_vm() = 0;
+ virtual int attach_memory_to_vm(RTGCPHYS const,
+ RTGCUINT vbox_fault_reason) = 0;
+
+ public:
+
+ enum Exit_condition
+ {
+ SVM_NPT = 0xfc,
+ SVM_INVALID = 0xfd,
+
+ VCPU_STARTUP = 0xfe,
+
+ RECALL = 0xff,
+ };
+
+
+ Vcpu_handler(Genode::Env &env, size_t stack_size,
+ Genode::Affinity::Location location,
+ unsigned int cpu_id)
+ :
+ _ep(env, stack_size,
+ Genode::String<12>("EP-EMT-", cpu_id).string(), location),
+ _ep_emt(_ep.rpc_ep().manage(this)),
+ _cpu_id(cpu_id)
+ { }
+
+ void resume()
+ {
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(_vcpu);
+
+ /* write FPU state from pCtx to FPU registers */
+// fpu_load(reinterpret_cast(pCtx->pXStateR3));
+
+ Assert(_vm_state == IRQ_WIN || _vm_state == PAUSED || _vm_state == NPT_EPT);
+
+ _vm_state = RUNNING;
+ run_vm();
+ }
+
+ void request_pause()
+ {
+ _recall_req ++;
+
+ if (_irq_win) {
+ _recall_skip ++;
+ return;
+ }
+
+ if (_vm_state != RUNNING)
+ return;
+
+ pause_vm();
+ }
+
+ unsigned int cpu_id() { return _cpu_id; }
+
+
+ void recall(PVM vm)
+ {
+ if (!_vm || !_vcpu) {
+ _vm = vm;
+ _vcpu = &vm->aCpus[_cpu_id];
+ }
+
+ if (_vm != vm || _vcpu != &vm->aCpus[_cpu_id])
+ Genode::error("wrong CPU !?");
+
+ _ep_emt.request_pause();
+
+#if 0
+ if (_recall_req % 1000 == 0) {
+ using Genode::log;
+
+ while (other) {
+ log(other->_cpu_id, " exits=", other->_vm_exits,
+ " req:skip:drop,inv recall=", other->_recall_req, ":",
+ other->_recall_skip, ":", other->_recall_drop, ":",
+ other->_recall_inv, " req:inj:drop irq=",
+ other->_irq_request, ":", other->_irq_inject, ":",
+ other->_irq_drop);
+
+ other = other->next();
+ }
+ }
+#endif
+ }
+
+ void check_time()
+ {
+ {
+ Genode::Lock_guard lock(_r0_block_guard);
+
+ const uint64_t u64NowGip = RTTimeNanoTS();
+ if (!_r0_wakeup_abs || _r0_wakeup_abs >= u64NowGip)
+ return;
+ }
+
+ wake_up();
+ }
+
+ void halt(Genode::uint64_t rttime_abs)
+ {
+ {
+ Genode::Lock_guard lock(_r0_block_guard);
+ _r0_wakeup_abs = rttime_abs;
+ }
+
+ _r0_block.down();
+ }
+
+ void wake_up()
+ {
+ {
+ Genode::Lock_guard lock(_r0_block_guard);
+ _r0_wakeup_abs = 0;
+ }
+
+ _r0_block.up();
+ }
+
+ int run_hw(PVMR0 pVMR0)
+ {
+ VM * pVM = reinterpret_cast(pVMR0);
+ PVMCPU pVCpu = &pVM->aCpus[_cpu_id];
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+
+ if (!_vm || !_vcpu) {
+ _vm = pVM;
+ _vcpu = &pVM->aCpus[_cpu_id];
+ }
+
+ if (_vm != pVM || _vcpu != &pVM->aCpus[_cpu_id])
+ Genode::error("wrong CPU !?");
+
+ /* take the utcb state prepared during the last exit */
+ _state->inj_info.value(IRQ_INJ_NONE);
+ _state->intr_state.value(next_utcb.intr_state);
+ _state->actv_state.value(ACTIVITY_STATE_ACTIVE);
+ _state->ctrl_primary.value(next_utcb.ctrl[0]);
+ _state->ctrl_secondary.value(next_utcb.ctrl[1]);
+
+ /* Transfer vCPU state from vbox to Genode format */
+ if (!vbox_to_state(pVM, pVCpu) ||
+ !hw_load_state(_state, pVM, pVCpu)) {
+
+ Genode::error("loading vCPU state failed");
+ return VERR_INTERNAL_ERROR;
+ }
+
+ /* check whether to request interrupt window for injection */
+ _irq_win = check_to_request_irq_window(pVCpu);
+
+ /*
+ * Flag vCPU to be "pokeable" by external events such as interrupts
+ * from virtual devices. Only if this flag is set, the
+ * 'vmR3HaltGlobal1NotifyCpuFF' function calls 'SUPR3CallVMMR0Ex'
+ * with VMMR0_DO_GVMM_SCHED_POKE as argument to indicate such
+ * events. This function, in turn, will recall the vCPU.
+ */
+ VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
+
+ /* switch to hardware accelerated mode */
+ switch_to_hw();
+
+ Assert(_state->actv_state.value() == ACTIVITY_STATE_ACTIVE);
+
+ /* write FPU state of vCPU (in current FPU registers) to pCtx */
+ Genode::memcpy(pCtx->pXStateR3, &_guest_fpu_state, sizeof(X86FXSTATE));
+
+ /* see hmR0VmxExitToRing3 - sync recompiler state */
+ CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR |
+ CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR |
+ CPUM_CHANGED_IDTR | CPUM_CHANGED_TR |
+ CPUM_CHANGED_HIDDEN_SEL_REGS |
+ CPUM_CHANGED_GLOBAL_TLB_FLUSH);
+
+ VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
+
+ /* Transfer vCPU state from Genode to vbox format */
+ if (!state_to_vbox(pVM, pVCpu) ||
+ !hw_save_state(_state, pVM, pVCpu)) {
+
+ Genode::error("saving vCPU state failed");
+ return VERR_INTERNAL_ERROR;
+ }
+
+#ifdef VBOX_WITH_REM
+ REMFlushTBs(pVM);
+#endif
+
+ /* track guest mode changes - see VMM/VMMAll/IEMAllCImpl.cpp.h */
+ PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
+
+ int rc = vm_exit_requires_instruction_emulation(pCtx);
+
+ /* evaluated in VMM/include/EMHandleRCTmpl.h */
+ return rc;
+ }
+};
+
+#endif /* _VIRTUALBOX__VCPU_H_ */
diff --git a/repos/ports/src/virtualbox5/vcpu_svm.h b/repos/ports/src/virtualbox5/vcpu_svm.h
new file mode 100644
index 000000000..3a9e1ff5e
--- /dev/null
+++ b/repos/ports/src/virtualbox5/vcpu_svm.h
@@ -0,0 +1,183 @@
+/*
+ * \brief Genode/Nova specific VirtualBox SUPLib supplements
+ * \author Alexander Boettcher
+ * \date 2013-11-18
+ */
+
+/*
+ * Copyright (C) 2013-2017 Genode Labs GmbH
+ *
+ * This file is distributed under the terms of the GNU General Public License
+ * version 2.
+ */
+
+#ifndef _VIRTUALBOX__VCPU_SVM_H_
+#define _VIRTUALBOX__VCPU_SVM_H_
+
+/* base includes */
+#include
+#include
+
+#include
+
+/* Genode's VirtualBox includes */
+#include "vcpu.h"
+#include "svm.h"
+
+class Vcpu_handler_svm : public Vcpu_handler
+{
+ private:
+
+ Genode::Vm_handler _handler;
+
+ Genode::Vm_connection &_vm_session;
+ Genode::Vm_session_client::Vcpu_id _vcpu;
+
+ Genode::Attached_dataspace _state_ds;
+
+ void _svm_default() { _default_handler(); }
+ void _svm_vintr() { _irq_window(); }
+
+ void _svm_ioio()
+ {
+ if (_state->qual_primary.value() & 0x4) {
+ unsigned ctrl0 = _state->ctrl_primary.value();
+
+ Genode::warning("invalid gueststate");
+
+ *_state = Genode::Vm_state {}; /* reset */
+
+ _state->ctrl_primary.value(ctrl0);
+ _state->ctrl_secondary.value(0);
+
+ _vm_session.run(_vcpu);
+ } else
+ _default_handler();
+ }
+
+ template
+ void _svm_npt()
+ {
+ bool const unmap = _state->qual_primary.value() & 1;
+ Genode::addr_t const exit_addr = _state->qual_secondary.value();
+ RTGCUINT const vbox_errorcode = _state->qual_primary.value();
+
+ npt_ept_exit_addr = exit_addr;
+ npt_ept_unmap = unmap;
+ npt_ept_errorcode = vbox_errorcode;
+
+ _npt_ept();
+ }
+
+ void _svm_startup()
+ {
+ /* enable VM exits for CPUID */
+ next_utcb.ctrl[0] = SVM_CTRL1_INTERCEPT_CPUID;
+ next_utcb.ctrl[1] = 0;
+ }
+
+ void _svm_recall() { Vcpu_handler::_recall_handler(); }
+
+ void _handle_vm_exception()
+ {
+ unsigned const exit = _state->exit_reason;
+// Genode::warning(__func__, " ", Genode::Hex(exit), " _irq_win=", _irq_win);
+ switch (exit) {
+ case RECALL: _svm_recall(); break;
+ case SVM_EXIT_IOIO: _svm_ioio(); break;
+ case SVM_EXIT_VINTR: _svm_vintr(); break;
+// case SVM_EXIT_RDTSC: _svm_default(); break;
+ case SVM_EXIT_MSR: _svm_default(); break;
+ case SVM_NPT: _svm_npt(); break;
+ case SVM_EXIT_HLT: _svm_default(); break;
+ case SVM_EXIT_CPUID: _svm_default(); break;
+ case VCPU_STARTUP:
+ _svm_startup();
+ _lock.unlock();
+ /* pause - no resume */
+ return;
+ default:
+ Genode::error(__func__, " unknown exit - stop - ",
+ Genode::Hex(exit));
+ _vm_state = PAUSED;
+ return;
+ }
+ }
+
+ void run_vm() { _vm_session.run(_vcpu); }
+ void pause_vm() { _vm_session.pause(_vcpu); }
+
+ int attach_memory_to_vm(RTGCPHYS const gp_attach_addr,
+ RTGCUINT vbox_errorcode)
+ {
+ return map_memory(_vm_session, gp_attach_addr, vbox_errorcode);
+ }
+
+ void _exit_config(Genode::Vm_state &state, unsigned exit)
+ {
+ switch (exit) {
+ case RECALL:
+ case SVM_EXIT_IOIO:
+ case SVM_EXIT_VINTR:
+ case SVM_EXIT_RDTSC:
+ case SVM_EXIT_MSR:
+ case SVM_NPT:
+ case SVM_EXIT_HLT:
+ case SVM_EXIT_CPUID:
+ case VCPU_STARTUP:
+ /* todo - touch all members */
+ Genode::memset(&state, ~0U, sizeof(state));
+ break;
+ default:
+ break;
+ }
+ }
+
+ public:
+
+ Vcpu_handler_svm(Genode::Env &env, size_t stack_size,
+ Genode::Affinity::Location location,
+ unsigned int cpu_id,
+ Genode::Vm_connection &vm_session,
+ Genode::Allocator &alloc)
+ :
+ Vcpu_handler(env, stack_size, location, cpu_id),
+ _handler(_ep, *this, &Vcpu_handler_svm::_handle_vm_exception,
+ &Vcpu_handler_svm::_exit_config),
+ _vm_session(vm_session),
+ /* construct vcpu */
+ _vcpu(_vm_session.with_upgrade([&]() {
+ return _vm_session.create_vcpu(alloc, env, _handler); })),
+ /* get state of vcpu */
+ _state_ds(env.rm(), _vm_session.cpu_state(_vcpu))
+ {
+ _state = _state_ds.local_addr();
+
+ /* sync with initial startup exception */
+ _lock.lock();
+
+ _vm_session.run(_vcpu);
+
+ /* sync with initial startup exception */
+ _lock.lock();
+// _lock.unlock();
+ }
+
+ bool hw_save_state(Genode::Vm_state *state, VM * pVM, PVMCPU pVCpu) {
+ return svm_save_state(state, pVM, pVCpu);
+ }
+
+ bool hw_load_state(Genode::Vm_state *state, VM * pVM, PVMCPU pVCpu) {
+ return svm_load_state(state, pVM, pVCpu);
+ }
+
+ int vm_exit_requires_instruction_emulation(PCPUMCTX)
+ {
+ if (_state->exit_reason == RECALL)
+ return VINF_SUCCESS;
+
+ return VINF_EM_RAW_EMULATE_INSTR;
+ }
+};
+
+#endif /* _VIRTUALBOX__VCPU_SVM_H_ */
diff --git a/repos/ports/src/virtualbox5/vcpu_vmx.h b/repos/ports/src/virtualbox5/vcpu_vmx.h
new file mode 100644
index 000000000..030f98750
--- /dev/null
+++ b/repos/ports/src/virtualbox5/vcpu_vmx.h
@@ -0,0 +1,285 @@
+/*
+ * \brief Genode/Nova specific VirtualBox SUPLib supplements
+ * \author Alexander Boettcher
+ * \author Norman Feske
+ * \author Christian Helmuth
+ */
+
+/*
+ * Copyright (C) 2013-2017 Genode Labs GmbH
+ *
+ * This file is distributed under the terms of the GNU General Public License
+ * version 2.
+ */
+
+#ifndef _VIRTUALBOX__VCPU_VMX_H_
+#define _VIRTUALBOX__VCPU_VMX_H_
+
+/* base includes */
+#include
+#include
+
+#include
+
+/* libc includes */
+#include
+
+/* VirtualBox includes */
+#include
+
+/* Genode's VirtualBox includes */
+#include "vcpu.h"
+#include "vmx.h"
+
+
+class Vcpu_handler_vmx : public Vcpu_handler
+{
+ private:
+
+ Genode::Vm_handler _handler;
+
+ Genode::Vm_connection &_vm_session;
+ Genode::Vm_session_client::Vcpu_id _vcpu;
+
+ Genode::Attached_dataspace _state_ds;
+
+ template
+ void _vmx_ept()
+ {
+ Genode::addr_t const exit_qual = _state->qual_primary.value();
+ Genode::addr_t const exit_addr = _state->qual_secondary.value();
+ bool const unmap = exit_qual & 0x38;
+
+ RTGCUINT vbox_errorcode = 0;
+ if (exit_qual & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
+ vbox_errorcode |= X86_TRAP_PF_ID;
+ if (exit_qual & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
+ vbox_errorcode |= X86_TRAP_PF_RW;
+ if (exit_qual & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
+ vbox_errorcode |= X86_TRAP_PF_P;
+
+ npt_ept_exit_addr = exit_addr;
+ npt_ept_unmap = unmap;
+ npt_ept_errorcode = vbox_errorcode;
+
+ _npt_ept();
+ }
+
+ void _vmx_default() { _default_handler(); }
+
+ void _vmx_startup()
+ {
+ /* configure VM exits to get */
+ /* from src/VBox/VMM/VMMR0/HWVMXR0.cpp of virtualbox sources */
+ next_utcb.ctrl[0] = VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT |
+ VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT |
+ VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT |
+/*
+ VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT |
+ VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT |
+*/
+/* VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT |
+ VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT |*/
+ VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW |
+ VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT;
+/* VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT | */
+ /*
+ * Disable trapping RDTSC for now as it creates a huge load with
+ * VM guests that execute it frequently.
+ */
+ // VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
+
+ next_utcb.ctrl[1] = VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC |
+ VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT |
+ VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST |
+ VMX_VMCS_CTRL_PROC_EXEC2_VPID |
+ VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP |
+ VMX_VMCS_CTRL_PROC_EXEC2_EPT |
+ VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
+ }
+
+ void _vmx_triple()
+ {
+ Genode::error("triple fault - dead");
+ exit(-1);
+ }
+
+ void _vmx_irqwin() { _irq_window(); }
+
+ void _vmx_recall() { Vcpu_handler::_recall_handler(); }
+
+ __attribute__((noreturn)) void _vmx_invalid()
+ {
+ unsigned const dubious = _state->inj_info.value() |
+ _state->intr_state.value() |
+ _state->actv_state.value();
+ if (dubious)
+ Genode::warning(__func__, " - dubious -"
+ " inj_info=", Genode::Hex(_state->inj_info.value()),
+ " inj_error=", Genode::Hex(_state->inj_error.value()),
+ " intr_state=", Genode::Hex(_state->intr_state.value()),
+ " actv_state=", Genode::Hex(_state->actv_state.value()));
+
+ Genode::error("invalid guest state - dead");
+ exit(-1);
+ }
+
+ /*
+ * This VM exit is in part handled by the NOVA kernel (writing the CR
+ * register) and in part by VirtualBox (updating the PDPTE registers,
+ * which requires access to the guest physical memory).
+ * Intel manual sections 4.4.1 of Vol. 3A and 26.3.2.4 of Vol. 3C
+ * indicate the conditions when the PDPTE registers need to get
+ * updated.
+ */
+ void _vmx_mov_crx() { _default_handler(); return; }
+
+ void _handle_vm_exception()
+ {
+ unsigned const exit = _state->exit_reason;
+
+ switch (exit) {
+ case VMX_EXIT_TRIPLE_FAULT: _vmx_triple(); break;
+ case VMX_EXIT_INIT_SIGNAL: _vmx_default(); break;
+ case VMX_EXIT_INT_WINDOW: _vmx_irqwin(); break;
+ case VMX_EXIT_TASK_SWITCH: _vmx_default(); break;
+ case VMX_EXIT_CPUID: _vmx_default(); break;
+ case VMX_EXIT_HLT: _vmx_default(); break;
+ /* we don't support tsc offsetting for now - so let the rdtsc exit */
+ case VMX_EXIT_RDTSC: _vmx_default(); break;
+ case VMX_EXIT_RDTSCP: _vmx_default(); break;
+ case VMX_EXIT_VMCALL: _vmx_default(); break;
+ case VMX_EXIT_IO_INSTR: _vmx_default(); break;
+ case VMX_EXIT_RDMSR: _vmx_default(); break;
+ case VMX_EXIT_WRMSR: _vmx_default(); break;
+ case VMX_EXIT_ERR_INVALID_GUEST_STATE: _vmx_invalid(); break;
+ case VMX_EXIT_PAUSE: _vmx_default(); break;
+ case VMX_EXIT_WBINVD: _vmx_default(); break;
+ case VMX_EXIT_MOV_CRX: _vmx_mov_crx(); break;
+ case VMX_EXIT_MOV_DRX: _vmx_default(); break;
+ case VMX_EXIT_TPR_BELOW_THRESHOLD: _vmx_default(); break;
+ case VMX_EXIT_EPT_VIOLATION: _vmx_ept(); break;
+ case RECALL: _vmx_recall(); break;
+ case VCPU_STARTUP:
+ _vmx_startup();
+ _lock.unlock();
+ /* pause - no resume */
+ return;
+ default:
+ Genode::error(__func__, " unknown exit - stop - ",
+ Genode::Hex(exit));
+ _vm_state = PAUSED;
+ return;
+ }
+ }
+
+ void run_vm() { _vm_session.run(_vcpu); }
+ void pause_vm() { _vm_session.pause(_vcpu); }
+
+ int attach_memory_to_vm(RTGCPHYS const gp_attach_addr,
+ RTGCUINT vbox_errorcode)
+ {
+ return map_memory(_vm_session, gp_attach_addr, vbox_errorcode);
+ }
+
+ void _exit_config(Genode::Vm_state &state, unsigned exit)
+ {
+ switch (exit) {
+ case VMX_EXIT_TRIPLE_FAULT:
+ case VMX_EXIT_INIT_SIGNAL:
+ case VMX_EXIT_INT_WINDOW:
+ case VMX_EXIT_TASK_SWITCH:
+ case VMX_EXIT_CPUID:
+ case VMX_EXIT_HLT:
+ case VMX_EXIT_RDTSC:
+ case VMX_EXIT_RDTSCP:
+ case VMX_EXIT_VMCALL:
+ case VMX_EXIT_IO_INSTR:
+ case VMX_EXIT_RDMSR:
+ case VMX_EXIT_WRMSR:
+ case VMX_EXIT_ERR_INVALID_GUEST_STATE:
+// case VMX_EXIT_PAUSE:
+ case VMX_EXIT_WBINVD:
+ case VMX_EXIT_MOV_CRX:
+ case VMX_EXIT_MOV_DRX:
+ case VMX_EXIT_TPR_BELOW_THRESHOLD:
+ case VMX_EXIT_EPT_VIOLATION:
+ case VCPU_STARTUP:
+ case RECALL:
+ /* todo - touch all members */
+ Genode::memset(&state, ~0U, sizeof(state));
+ break;
+ default:
+ break;
+ }
+ }
+
+ public:
+
+ Vcpu_handler_vmx(Genode::Env &env, size_t stack_size,
+ Genode::Affinity::Location location,
+ unsigned int cpu_id,
+ Genode::Vm_connection &vm_session,
+ Genode::Allocator &alloc)
+ :
+ Vcpu_handler(env, stack_size, location, cpu_id),
+ _handler(_ep, *this, &Vcpu_handler_vmx::_handle_vm_exception,
+ &Vcpu_handler_vmx::_exit_config),
+ _vm_session(vm_session),
+ /* construct vcpu */
+ _vcpu(_vm_session.with_upgrade([&]() {
+ return _vm_session.create_vcpu(alloc, env, _handler); })),
+ /* get state of vcpu */
+ _state_ds(env.rm(), _vm_session.cpu_state(_vcpu))
+ {
+ _state = _state_ds.local_addr();
+
+ /* sync with initial startup exception */
+ _lock.lock();
+
+ _vm_session.run(_vcpu);
+
+ /* sync with initial startup exception */
+ _lock.lock();
+// _lock.unlock();
+ }
+
+ bool hw_save_state(Genode::Vm_state *state, VM * pVM, PVMCPU pVCpu) {
+ return vmx_save_state(state, pVM, pVCpu);
+ }
+
+ bool hw_load_state(Genode::Vm_state * state, VM * pVM, PVMCPU pVCpu) {
+ return vmx_load_state(state, pVM, pVCpu);
+ }
+
+ int vm_exit_requires_instruction_emulation(PCPUMCTX pCtx)
+ {
+ switch (_state->exit_reason) {
+ case VMX_EXIT_HLT:
+ pCtx->rip++;
+ return VINF_EM_HALT;
+ case VMX_EXIT_IO_INSTR:
+ /* EMHandleRCTmpl.h does not distinguish READ/WRITE rc */
+ return VINF_IOM_R3_IOPORT_WRITE;
+ case VMX_EXIT_RDMSR:
+ return VINF_CPUM_R3_MSR_READ;
+ case VMX_EXIT_WRMSR:
+ return VINF_CPUM_R3_MSR_WRITE;
+ case VMX_EXIT_TPR_BELOW_THRESHOLD:
+ /* the instruction causing the exit has already been executed */
+ case RECALL:
+ return VINF_SUCCESS;
+ case VMX_EXIT_EPT_VIOLATION:
+ if (_ept_fault_addr_type == PGMPAGETYPE_MMIO)
+ /* EMHandleRCTmpl.h does not distinguish READ/WRITE rc */
+ return VINF_IOM_R3_MMIO_READ_WRITE;
+ case VMX_EXIT_MOV_DRX:
+ /* looks complicated in original R0 code -> emulate instead */
+ return VINF_EM_RAW_EMULATE_INSTR;
+ default:
+ return VINF_EM_RAW_EMULATE_INSTR;
+ }
+ }
+};
+
+#endif /* _VIRTUALBOX__VCPU_VMX_H_ */
diff --git a/repos/ports/src/virtualbox5/vmx.h b/repos/ports/src/virtualbox5/vmx.h
new file mode 100644
index 000000000..3718e2493
--- /dev/null
+++ b/repos/ports/src/virtualbox5/vmx.h
@@ -0,0 +1,103 @@
+/*
+ * \brief Genode specific VirtualBox SUPLib supplements
+ * \author Norman Feske
+ * \author Alexander Boettcher
+ */
+
+/*
+ * Copyright (C) 2013-2019 Genode Labs GmbH
+ *
+ * This file is distributed under the terms of the GNU General Public License
+ * version 2.
+ */
+
+#ifndef _VIRTUALBOX__VMX_H_
+#define _VIRTUALBOX__VMX_H_
+
+#define GENODE_READ_SELREG_REQUIRED(REG) \
+ (pCtx->REG.Sel != state->REG.value().sel) || \
+ (pCtx->REG.ValidSel != state->REG.value().sel) || \
+ (pCtx->REG.fFlags != CPUMSELREG_FLAGS_VALID) || \
+ (pCtx->REG.u32Limit != state->REG.value().limit) || \
+ (pCtx->REG.u64Base != state->REG.value().base) || \
+ (pCtx->REG.Attr.u != sel_ar_conv_from_genode(state->REG.value().ar))
+
+#define GENODE_READ_SELREG(REG) \
+ pCtx->REG.Sel = state->REG.value().sel; \
+ pCtx->REG.ValidSel = state->REG.value().sel; \
+ pCtx->REG.fFlags = CPUMSELREG_FLAGS_VALID; \
+ pCtx->REG.u32Limit = state->REG.value().limit; \
+ pCtx->REG.u64Base = state->REG.value().base; \
+ pCtx->REG.Attr.u = sel_ar_conv_from_genode(state->REG.value().ar)
+
+static inline bool vmx_save_state(Genode::Vm_state * state, VM * pVM, PVMCPU pVCpu)
+{
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+
+ GENODE_READ_SELREG(cs);
+ GENODE_READ_SELREG(ds);
+ GENODE_READ_SELREG(es);
+ GENODE_READ_SELREG(fs);
+ GENODE_READ_SELREG(gs);
+ GENODE_READ_SELREG(ss);
+
+ if (GENODE_READ_SELREG_REQUIRED(ldtr)) {
+ GENODE_READ_SELREG(ldtr);
+ CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
+ }
+ if (GENODE_READ_SELREG_REQUIRED(tr)) {
+ GENODE_READ_SELREG(tr);
+ CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
+ }
+
+ return true;
+}
+
+#undef GENODE_READ_SELREG_REQUIRED
+#undef GENODE_READ_SELREG
+
+
+enum { VMCS_SEG_UNUSABLE = 0x10000 };
+
+#define GENODE_WRITE_SELREG(REG) \
+ Assert(pCtx->REG.fFlags & CPUMSELREG_FLAGS_VALID); \
+ Assert(pCtx->REG.ValidSel == pCtx->REG.Sel); \
+ state->REG.value(Segment{pCtx->REG.Sel, \
+ sel_ar_conv_to_genode(pCtx->REG.Attr.u ? : VMCS_SEG_UNUSABLE), \
+ pCtx->REG.u32Limit, \
+ pCtx->REG.u64Base});
+
+static inline bool vmx_load_state(Genode::Vm_state * state, VM * pVM, PVMCPU pVCpu)
+{
+ PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
+
+ typedef Genode::Vm_state::Segment Segment;
+
+ GENODE_WRITE_SELREG(es);
+ GENODE_WRITE_SELREG(ds);
+
+ GENODE_WRITE_SELREG(fs);
+ GENODE_WRITE_SELREG(gs);
+
+ GENODE_WRITE_SELREG(cs);
+ GENODE_WRITE_SELREG(ss);
+
+ /* ldtr */
+ if (pCtx->ldtr.Sel == 0) {
+ state->ldtr.value(Segment{0, sel_ar_conv_to_genode(0x82), 0, 0});
+ } else {
+ state->ldtr.value(Segment{pCtx->ldtr.Sel,
+ sel_ar_conv_to_genode(pCtx->ldtr.Attr.u),
+ pCtx->ldtr.u32Limit, pCtx->ldtr.u64Base});
+ }
+
+ /* tr */
+ state->tr.value(Segment{pCtx->tr.Sel, sel_ar_conv_to_genode(pCtx->tr.Attr.u),
+ pCtx->tr.u32Limit, pCtx->tr.u64Base});
+
+ return true;
+}
+
+#undef GENODE_WRITE_SELREG
+
+#endif /* _VIRTUALBOX__VMX_H_ */
diff --git a/tool/autopilot.list b/tool/autopilot.list
index 74eaf8be0..7a595b9b8 100644
--- a/tool/autopilot.list
+++ b/tool/autopilot.list
@@ -65,6 +65,10 @@ vbox5_ubuntu_14_04_32
vbox5_ubuntu_14_04_64
vbox5_ubuntu_16_04_32
vbox5_ubuntu_16_04_64
+vbox5_vm_ubuntu_16_04_32
+vbox5_vm_ubuntu_16_04_64
+vbox5_vm_win7_32
+vbox5_vm_win7_64
vbox5_win10_64
vbox5_win7_32
vbox5_win7_64