Kubevirt Virtual Machines on Kubernetes

KubeVirt extends Kubernetes with the ability to run virtual machines alongside containerized workloads, using the same Kubernetes APIs, scheduling, and networking infrastructure. This enables organizations to manage legacy VM workloads and modern containers in a unified platform, supporting live migration, VM snapshots, and seamless integration with Kubernetes storage and networking.

Prerequisites

  • Kubernetes 1.24+
  • Hardware virtualization support (VMX/SVM CPU flags)
  • Minimum 2 worker nodes for live migration
  • virtctl CLI tool installed
  • Sufficient RAM per node for VM workloads (VMs run inside pods)

Check hardware virtualization support:

# Verify CPU virtualization extensions
egrep -c '(vmx|svm)' /proc/cpuinfo

# Should return > 0
# If running in a VM, ensure nested virtualization is enabled on the hypervisor:
# KVM: modprobe kvm_intel nested=1 or kvm_amd nested=1

# Check Kubernetes nodes support virtualization
kubectl get nodes -o wide
for node in $(kubectl get nodes -o name); do
  echo "=== $node ==="
  kubectl describe $node | grep -i "cpu\|kvm\|virt"
done

Installing KubeVirt

# Get the latest stable version
export KUBEVIRT_VERSION=$(curl -s https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt)
echo "Installing KubeVirt $KUBEVIRT_VERSION"

# Deploy KubeVirt operator
kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator.yaml

# Deploy KubeVirt custom resource
kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr.yaml

# Wait for KubeVirt to be ready
kubectl -n kubevirt wait kv kubevirt \
  --for condition=Available \
  --timeout=600s

# Verify all components are running
kubectl -n kubevirt get pods

# Install virtctl CLI
curl -L -o virtctl \
  https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/virtctl-${KUBEVIRT_VERSION}-linux-amd64
chmod +x virtctl
sudo mv virtctl /usr/local/bin/

# Verify
virtctl version

Enable software emulation if hardware virtualization is unavailable (dev/test only):

kubectl -n kubevirt patch kubevirt kubevirt \
  --type merge \
  --patch '{"spec":{"configuration":{"developerConfiguration":{"useEmulation":true}}}}'

Creating Virtual Machines

# Create a VM using a container disk (cloud image bundled as container)
cat > fedora-vm.yaml <<EOF
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
  name: fedora-vm
  namespace: default
spec:
  running: false  # Set to true to start immediately
  template:
    metadata:
      labels:
        kubevirt.io/vm: fedora-vm
    spec:
      domain:
        cpu:
          cores: 2
          sockets: 1
          threads: 1
        memory:
          guest: 2Gi
        devices:
          disks:
            - name: containerdisk
              disk:
                bus: virtio
            - name: cloudinitdisk
              disk:
                bus: virtio
          interfaces:
            - name: default
              masquerade: {}
          rng: {}
        resources:
          requests:
            memory: 2Gi
            cpu: "2"
      networks:
        - name: default
          pod: {}
      volumes:
        - name: containerdisk
          containerDisk:
            image: quay.io/kubevirt/fedora-cloud-container-disk-demo:latest
        - name: cloudinitdisk
          cloudInitNoCloud:
            userDataBase64: |
              I2Nsb3VkLWNvbmZpZwp1c2VyczoKICAtIG5hbWU6IGZlZG9yYQogICAgc3VkbzogQUxMPShBTEwpIE5PUEFTU1dEOkFMTAogICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAgLSBzc2gtcnNhIFlPVVItUFVCTElDLUtFWQ==
              # Decoded: user 'fedora' with your SSH key
EOF

kubectl apply -f fedora-vm.yaml

# Start the VM
virtctl start fedora-vm

# Stop the VM
virtctl stop fedora-vm

# Check VM status
kubectl get vm fedora-vm
kubectl get vmi fedora-vm  # VMI = running instance

# Connect to the VM console
virtctl console fedora-vm

# SSH via virtctl
virtctl ssh fedora@fedora-vm

Create a VM from a PVC (persistent disk):

# First, import a cloud image into a PVC using Containerized Data Importer (CDI)
# Install CDI
export CDI_VERSION=$(curl -s https://github.com/kubevirt/containerized-data-importer/releases/latest | grep -oP '(?<=tag/)[^"]+')
kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$CDI_VERSION/cdi-operator.yaml
kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$CDI_VERSION/cdi-cr.yaml

# Import Ubuntu cloud image
cat > ubuntu-datavolume.yaml <<EOF
apiVersion: cdi.kubevirt.io/v1beta1
kind: DataVolume
metadata:
  name: ubuntu-2204-disk
  namespace: default
spec:
  source:
    http:
      url: "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
  pvc:
    storageClassName: longhorn
    accessModes:
      - ReadWriteOnce
    resources:
      requests:
        storage: 20Gi
EOF

kubectl apply -f ubuntu-datavolume.yaml

# Watch import progress
kubectl get datavolume ubuntu-2204-disk -w

# Create VM using the imported disk
cat > ubuntu-vm.yaml <<EOF
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
  name: ubuntu-prod-01
spec:
  running: true
  template:
    spec:
      domain:
        cpu:
          cores: 4
        memory:
          guest: 8Gi
        devices:
          disks:
            - name: rootdisk
              disk:
                bus: virtio
            - name: cloudinit
              disk:
                bus: virtio
          interfaces:
            - name: default
              masquerade: {}
      networks:
        - name: default
          pod: {}
      volumes:
        - name: rootdisk
          dataVolume:
            name: ubuntu-2204-disk
        - name: cloudinit
          cloudInitNoCloud:
            userData: |
              #cloud-config
              users:
                - name: ubuntu
                  sudo: ALL=(ALL) NOPASSWD:ALL
                  ssh_authorized_keys:
                    - ssh-rsa YOUR-PUBLIC-KEY
              package_update: true
              packages:
                - nginx
EOF

kubectl apply -f ubuntu-vm.yaml

VM Networking

Configure advanced networking for VMs:

# Install Multus CNI for multiple network interfaces
kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset-thick.yml

# Create a NetworkAttachmentDefinition for bridge networking
cat > bridge-network.yaml <<EOF
apiVersion: k8s.cni.cncf.io/v1
kind: NetworkAttachmentDefinition
metadata:
  name: bridge-network
  namespace: default
spec:
  config: |
    {
      "cniVersion": "0.3.1",
      "name": "bridge-network",
      "type": "bridge",
      "bridge": "br0",
      "isGateway": true,
      "ipam": {
        "type": "host-local",
        "subnet": "192.168.100.0/24",
        "rangeStart": "192.168.100.10",
        "rangeEnd": "192.168.100.200",
        "gateway": "192.168.100.1"
      }
    }
EOF

kubectl apply -f bridge-network.yaml

# Create a VM with multiple NICs
cat > multi-nic-vm.yaml <<EOF
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
  name: multi-nic-vm
spec:
  running: true
  template:
    spec:
      domain:
        devices:
          interfaces:
            - name: default
              masquerade: {}  # Pod network (NAT)
            - name: bridge-net
              bridge: {}       # Direct bridge access
        memory:
          guest: 2Gi
      networks:
        - name: default
          pod: {}
        - name: bridge-net
          multus:
            networkName: bridge-network
      volumes:
        - name: disk
          containerDisk:
            image: quay.io/kubevirt/cirros-container-disk-demo:latest
EOF

kubectl apply -f multi-nic-vm.yaml

# Expose a VM service (SSH access)
virtctl expose vm fedora-vm \
  --name fedora-vm-ssh \
  --port 22 \
  --type NodePort

Storage for VMs

# Add a data disk to a running VM
cat > data-disk-pvc.yaml <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: vm-data-disk
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: longhorn
  resources:
    requests:
      storage: 100Gi
EOF

kubectl apply -f data-disk-pvc.yaml

# Hotplug the disk to a running VM
virtctl addvolume ubuntu-prod-01 \
  --volume-name=extra-storage \
  --claim-name=vm-data-disk \
  --hotplug

# Remove hotplugged disk
virtctl removevolume ubuntu-prod-01 \
  --volume-name=extra-storage

# Take a VM snapshot (requires snapshot feature gate)
cat > vm-snapshot.yaml <<EOF
apiVersion: snapshot.kubevirt.io/v1alpha1
kind: VirtualMachineSnapshot
metadata:
  name: ubuntu-snapshot-01
spec:
  source:
    apiGroup: kubevirt.io
    kind: VirtualMachine
    name: ubuntu-prod-01
EOF

kubectl apply -f vm-snapshot.yaml
kubectl get vmsnapshot ubuntu-snapshot-01 -w

Live Migration

Migrate VMs between nodes without downtime:

# Enable live migration in KubeVirt config
kubectl -n kubevirt patch kubevirt kubevirt \
  --type merge \
  --patch '{
    "spec": {
      "configuration": {
        "migrations": {
          "bandwidthPerMigration": "64Mi",
          "completionTimeoutPerGiB": 800,
          "parallelMigrationsPerCluster": 5,
          "parallelOutboundMigrationsPerNode": 2,
          "progressTimeout": 150,
          "allowAutoConverge": true
        }
      }
    }
  }'

# Trigger a live migration
cat > migration.yaml <<EOF
apiVersion: kubevirt.io/v1
kind: VirtualMachineInstanceMigration
metadata:
  name: migration-ubuntu-01
spec:
  vmiName: ubuntu-prod-01
EOF

kubectl apply -f migration.yaml

# Watch migration progress
kubectl get vmim migration-ubuntu-01 -w

# Check which node the VM migrated to
kubectl get vmi ubuntu-prod-01 -o jsonpath='{.status.nodeName}'

# Drain a node (triggers live migration for all VMs)
kubectl drain worker-01 \
  --delete-emptydir-data \
  --ignore-daemonsets \
  --pod-selector=kubevirt.io=virt-launcher

Hybrid VM and Container Workloads

# Service that spans both VMs and containers
cat > hybrid-service.yaml <<EOF
apiVersion: v1
kind: Service
metadata:
  name: app-backend
spec:
  selector:
    app: backend  # Selects both VM pods and container pods
  ports:
    - port: 8080
      targetPort: 8080
---
# Label the VM to be included in the service
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
  name: legacy-backend
spec:
  running: true
  template:
    metadata:
      labels:
        app: backend      # Matches service selector
    spec:
      domain:
        devices:
          interfaces:
            - name: default
              masquerade: {}
        memory:
          guest: 4Gi
      networks:
        - name: default
          pod: {}
      volumes:
        - name: disk
          dataVolume:
            name: legacy-backend-disk
EOF

kubectl apply -f hybrid-service.yaml

# Traffic will be distributed between VMs and containers with the same label

Troubleshooting

VM stuck in Scheduling state:

# Check if hardware virtualization is available on nodes
kubectl get nodes -o custom-columns='NAME:.metadata.name,ALLOCATABLE:.status.allocatable'

# Verify KVM device plugin is running
kubectl -n kubevirt get pods -l kubevirt.io=virt-handler

# Check node labels
kubectl describe node worker-01 | grep kubevirt

VM fails to start:

# Check VMI events
kubectl describe vmi fedora-vm

# View virt-launcher pod logs
kubectl logs -l kubevirt.io=virt-launcher -n default

# Check libvirt log inside the launcher pod
kubectl exec -it $(kubectl get pod -l vm.kubevirt.io/name=fedora-vm -o name) -- cat /var/log/libvirt/qemu/*.log

Live migration fails:

# Check migration status
kubectl describe vmim migration-ubuntu-01

# Verify shared storage between nodes (required for migration)
kubectl get pvc <vm-pvc-name> -o yaml | grep accessModes

# Check network connectivity between nodes on migration port
# Default migration uses port 49152-49262

Console connection refused:

# Verify virt-api is running
kubectl -n kubevirt get pods -l kubevirt.io=virt-api

# Check VNC ports
kubectl -n kubevirt get svc

Conclusion

KubeVirt bridges the gap between traditional VM operations and cloud-native Kubernetes infrastructure, allowing teams to run legacy virtual machine workloads alongside containers with unified scheduling, networking, and storage. Live migration, VM snapshots, and hybrid service discovery make it a practical choice for organizations modernizing their infrastructure incrementally without forcing a complete rewrite of VM-based workloads.