diff --git a/scripts/run_integration_tests.sh b/scripts/run_integration_tests.sh index d25862798..6382373b4 100755 --- a/scripts/run_integration_tests.sh +++ b/scripts/run_integration_tests.sh @@ -78,24 +78,40 @@ popd # Build custom kernel based on virtio-pmem and virtio-fs upstream patches VMLINUX_IMAGE="$WORKLOADS_DIR/vmlinux" +VMLINUX_PVH_IMAGE="$WORKLOADS_DIR/vmlinux.pvh" BZIMAGE_IMAGE="$WORKLOADS_DIR/bzImage" -LINUX_CUSTOM_DIR="linux-custom" +LINUX_CUSTOM_DIR="$WORKLOADS_DIR/linux-custom" -if [ ! -f "$VMLINUX_IMAGE" ]; then +if [ ! -f "$VMLINUX_IMAGE" ] || [ ! -f "$VMLINUX_PVH_IMAGE" ]; then SRCDIR=$PWD pushd $WORKLOADS_DIR time git clone --depth 1 "https://github.com/cloud-hypervisor/linux.git" -b "virtio-fs-virtio-iommu-virtio-mem-5.6-rc4" $LINUX_CUSTOM_DIR + cp $SRCDIR/resources/linux-config $LINUX_CUSTOM_DIR/.config + popd +fi + +if [ ! -f "$VMLINUX_IMAGE" ]; then pushd $LINUX_CUSTOM_DIR - cp $SRCDIR/resources/linux-config .config + scripts/config --disable "CONFIG_PVH" time make bzImage -j `nproc` cp vmlinux $VMLINUX_IMAGE || exit 1 cp arch/x86/boot/bzImage $BZIMAGE_IMAGE || exit 1 popd - rm -rf $LINUX_CUSTOM_DIR +fi + +if [ ! -f "$VMLINUX_PVH_IMAGE" ]; then + pushd $LINUX_CUSTOM_DIR + scripts/config --enable "CONFIG_PVH" + time make bzImage -j `nproc` + cp vmlinux $VMLINUX_PVH_IMAGE || exit 1 popd fi +if [ -d "$LINUX_CUSTOM_DIR" ]; then + rm -rf $LINUX_CUSTOM_DIR +fi + VIRTIOFSD="$WORKLOADS_DIR/virtiofsd" QEMU_DIR="qemu_build" if [ ! -f "$VIRTIOFSD" ]; then @@ -208,6 +224,19 @@ EOF RES=$? fi +# Test the pvh_boot feature +if [ $RES -eq 0 ]; then + cargo build --release --features "pvh_boot" + sudo setcap cap_net_admin+ep target/release/cloud-hypervisor + + newgrp kvm << EOF +export RUST_BACKTRACE=1 +time cargo test --features "integration_tests,pvh_boot" "$@" -- --nocapture +EOF + + RES=$? +fi + # Tear VFIO test network down sudo ip link del vfio-br0 sudo ip link del vfio-tap0 diff --git a/tests/integration.rs b/tests/integration.rs index 83e573221..b99c76e61 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -1052,6 +1052,51 @@ mod tests { }); } + #[cfg_attr(feature = "pvh_boot", test)] + fn test_pvh_boot() { + test_block!(tb, "", { + let mut clear = ClearDiskConfig::new(); + let guest = Guest::new(&mut clear); + let mut workload_path = dirs::home_dir().unwrap(); + workload_path.push("workloads"); + + let mut kernel_path = workload_path; + kernel_path.push("vmlinux.pvh"); + + let mut child = GuestCommand::new(&guest) + .args(&["--cpus","boot=1"]) + .args(&["--memory", "size=512M"]) + .args(&["--kernel", kernel_path.to_str().unwrap()]) + .default_disks() + .default_net() + .args(&["--cmdline", "root=PARTUUID=6fb4d1a8-6c8c-4dd7-9f7c-1fe0b9f2574c console=tty0 console=ttyS0,115200n8 console=hvc0 quiet init=/usr/lib/systemd/systemd-bootchart initcall_debug tsc=reliable no_timer_check noreplace-smp cryptomgr.notests rootfstype=ext4,btrfs,xfs kvm-intel.nested=1 rw"]) + .spawn() + .unwrap(); + + thread::sleep(std::time::Duration::new(20, 0)); + + aver_eq!(tb, guest.get_cpu_count().unwrap_or_default(), 1); + aver!(tb, guest.get_total_memory().unwrap_or_default() > 496_000); + aver!(tb, guest.get_entropy().unwrap_or_default() >= 900); + + #[cfg(not(feature = "mmio"))] + aver_eq!( + tb, + guest + .ssh_command("grep -c PCI-MSI /proc/interrupts") + .unwrap_or_default() + .trim() + .parse::() + .unwrap_or_default(), + 12 + ); + + let _ = child.kill(); + let _ = child.wait(); + Ok(()) + }); + } + #[test] fn test_bzimage_boot() { test_block!(tb, "", {