1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (C) 2020 SUSE LLC 4 * Author: Nicolai Stange <[email protected]> 5 * LTP port: Martin Doucha <[email protected]> 6 */ 7 8.set KVM_TEXIT, 0xff 9.set RESULT_ADDRESS, 0xfffff000 10.set KVM_GDT_SIZE, 32 11 12.set MSR_VM_HSAVE_PA, 0xc0010117 13 14/* 15 * This section will be allocated at address 0x1000 and 16 * jumped to from the reset stub provided by kvm_run. 17 */ 18.code16 19.section .init.protected_mode, "ax" 20real_mode_entry: 21 cli 22 23 lgdt kvm_gdt_desc 24 25 mov $0x11, %eax 26 mov %eax, %cr0 27 28 jmp $1 * 8, $protected_mode_entry 29 30.code32 31protected_mode_entry: 32 mov $2 * 8, %eax 33 mov %eax, %ds 34 mov %eax, %es 35 jmp init_memlayout 36 37.section .init.gdt32, "a", @progbits 38 39.macro gdt32_entry type:req l=0 d=0 dpl=0 limit=0xfffff g=1 p=1 40 .4byte \limit & 0xffff 41 .2byte (\type << 8) | (\dpl << 13) | (\p << 15) 42 .2byte (\limit >> 16) | (\l << 5) | (\d << 6) | (\g << 7) 43.endm 44.align 8 45.global kvm_gdt 46kvm_gdt: 47 .8byte 0 48 gdt32_entry type=0x1a l=0 d=1 /* Code segment protected_mode, 32bits */ 49 gdt32_entry type=0x12 /* Data segment, writable */ 50 .skip (KVM_GDT_SIZE-3)*8 /* Stack, TSS and other segment descriptors */ 51 52.Lgdt_end: 53.global kvm_gdt_desc 54kvm_gdt_desc: 55 .2byte .Lgdt_end - kvm_gdt - 1 56 .4byte kvm_gdt 57 58.code32 59.section .init.memlayout, "ax" 60init_memlayout: 61 /* 62 * Identity-map the first 2GB of virtual address space. 63 */ 64 lea kvm_pagetable, %edi 65 lea kvm_pgtable_l2, %esi 66 movl %esi, %eax 67 mov $1024, %ecx 68 691: movl %eax, %ebx 70 orl $0x3, %ebx /* Flags: present, writable */ 71 movl %ebx, (%edi) 72 addl $4, %edi 73 addl $4096, %eax 74 dec %ecx 75 jnz 1b 76 77 /* Fill kvm_pgtable_l2 with identity map of the first 2GB. */ 78 movl %esi, %edi 79 movl $512 * 1024, %ecx 80 xor %eax, %eax 81 821: movl %eax, %ebx 83 orl $0x3, %ebx /* Flags: present, writable */ 84 movl %ebx, (%edi) 85 addl $4, %edi 86 addl $4096, %eax 87 dec %ecx 88 jnz 1b 89 90 /* Mark the upper 2GB as unmapped except for the last page. */ 91 movl $512 * 1024 - 1, %ecx 92 xor %eax, %eax 93 rep stosl 94 movl $0xfffff003, (%edi) 95 96 /* 97 * Install new pagetable to CR3 and enable memory paging by setting 98 * CR0.WP and CR0.PG 99 */ 100 lea kvm_pagetable, %eax 101 movl %eax, %cr3 102 movl %cr0, %eax 103 btsl $31, %eax 104 btsl $16, %eax 105 movl %eax, %cr0 106 107 /* Init TSS */ 108 lea kvm_tss, %edx 109 movl %edx, %edi 110 movl $.Ltss_end - kvm_tss, %ecx 111 xor %eax, %eax 112 rep stosb 113 movl %edx, %edi 114 lea kvm_stack_top, %edx 115 movl %edx, 4(%edi) 116 117 /* Create a stack descriptor in the 4th GDT slot */ 118 /* Base address: 0x0, Limit: kvm_stack_bottom */ 119 xor %eax, %eax 120 movl $0xc09600, %ebx /* flags + access bits */ 121 movl $kvm_stack_bottom - 1, %edx 122 shr $12, %edx 123 movw %dx, %ax 124 andl $0xf0000, %edx 125 orl %edx, %ebx 126 127 lea kvm_gdt + 3*8, %edi 128 mov %eax, (%edi) 129 mov %ebx, 4(%edi) 130 mov $3 * 8, %eax 131 mov %ax, %ss 132 lea kvm_stack_top, %esp 133 134 /* Create a TSS descriptor in the 5th GDT slot */ 135 lea kvm_tss, %edx 136 movl %edx, %ebx 137 andl $0xff000000, %ebx 138 movl %edx, %eax 139 shr $16, %eax 140 movb %al, %bl 141 orl $0x408900, %ebx /* flags + access bits */ 142 143 movl %edx, %eax 144 movl $.Ltss_end - kvm_tss - 1, %edx 145 shl $16, %eax 146 movw %dx, %ax 147 andl $0xf0000, %edx 148 orl %edx, %ebx 149 150 lea kvm_gdt + 4*8, %edi 151 mov %eax, (%edi) 152 mov %ebx, 4(%edi) 153 mov $4 * 8, %ax 154 ltr %ax 155 156 /* Configure and enable interrupts */ 157 call kvm_init_interrupts 158 lidt kvm_idt_desc 159 sti 160 161 /* 162 * Do just enough of initialization to get to a working 163 * -ffreestanding environment and call tst_main(void). 164 */ 165 lea __preinit_array_start, %edi 166 lea __preinit_array_end, %esi 1671: 168 cmp %edi, %esi 169 je 2f 170 call *(%edi) 171 add $4, %edi 172 jmp 1b 1732: 174 175 lea __init_array_start, %edi 176 lea __init_array_end, %esi 1771: 178 cmp %edi, %esi 179 je 2f 180 call *(%edi) 181 add $4, %edi 182 jmp 1b 1832: 184 call main 185 jmp kvm_exit 186 187.global kvm_read_cregs 188kvm_read_cregs: 189 push %edi 190 mov 8(%esp), %edi 191 mov %cr0, %eax 192 mov %eax, (%edi) 193 mov %cr2, %eax 194 mov %eax, 4(%edi) 195 mov %cr3, %eax 196 mov %eax, 8(%edi) 197 mov %cr4, %eax 198 mov %eax, 12(%edi) 199 pop %edi 200 ret 201 202.global kvm_read_sregs 203kvm_read_sregs: 204 push %edi 205 mov 8(%esp), %edi 206 mov %cs, %ax 207 movw %ax, (%edi) 208 mov %ds, %ax 209 movw %ax, 2(%edi) 210 mov %es, %ax 211 movw %ax, 4(%edi) 212 mov %fs, %ax 213 movw %ax, 6(%edi) 214 mov %gs, %ax 215 movw %ax, 8(%edi) 216 mov %ss, %ax 217 movw %ax, 10(%edi) 218 pop %edi 219 ret 220 221handle_interrupt: 222 /* save CPU state */ 223 push %ebp 224 mov %esp, %ebp 225 addl $12, %ebp 226 pushal 227 228 /* call handler */ 229 push -4(%ebp) 230 push -8(%ebp) 231 push %ebp 232 cld 233 call tst_handle_interrupt 234 addl $12, %esp 235 popal 236 pop %ebp 237 addl $8, %esp 238 iret 239 240.macro create_intr_handler vector:req padargs=0 241.if \padargs 242 pushl $0 /* push dummy error code */ 243.endif 244 pushl $\vector 245 jmp handle_interrupt 246.endm 247 248.global kvm_handle_zerodiv 249kvm_handle_zerodiv: 250 create_intr_handler 0, padargs=1 251 252.global kvm_handle_debug 253kvm_handle_debug: 254 create_intr_handler 1, padargs=1 255 256.global kvm_handle_nmi 257kvm_handle_nmi: 258 create_intr_handler 2, padargs=1 259 260.global kvm_handle_breakpoint 261kvm_handle_breakpoint: 262 create_intr_handler 3, padargs=1 263 264.global kvm_handle_overflow 265kvm_handle_overflow: 266 create_intr_handler 4, padargs=1 267 268.global kvm_handle_bound_range_exc 269kvm_handle_bound_range_exc: 270 create_intr_handler 5, padargs=1 271 272.global kvm_handle_bad_opcode 273kvm_handle_bad_opcode: 274 create_intr_handler 6, padargs=1 275 276.global kvm_handle_device_error 277kvm_handle_device_error: 278 create_intr_handler 7, padargs=1 279 280.global kvm_handle_double_fault 281kvm_handle_double_fault: 282 create_intr_handler 8 283 284.global kvm_handle_invalid_tss 285kvm_handle_invalid_tss: 286 create_intr_handler 10 287 288.global kvm_handle_segfault 289kvm_handle_segfault: 290 create_intr_handler 11 291 292.global kvm_handle_stack_fault 293kvm_handle_stack_fault: 294 create_intr_handler 12 295 296.global kvm_handle_gpf 297kvm_handle_gpf: 298 create_intr_handler 13 299 300.global kvm_handle_page_fault 301kvm_handle_page_fault: 302 create_intr_handler 14 303 304.global kvm_handle_fpu_error 305kvm_handle_fpu_error: 306 create_intr_handler 16, padargs=1 307 308.global kvm_handle_alignment_error 309kvm_handle_alignment_error: 310 create_intr_handler 17 311 312.global kvm_handle_machine_check 313kvm_handle_machine_check: 314 create_intr_handler 18, padargs=1 315 316.global kvm_handle_simd_error 317kvm_handle_simd_error: 318 create_intr_handler 19, padargs=1 319 320.global kvm_handle_virt_error 321kvm_handle_virt_error: 322 create_intr_handler 20, padargs=1 323 324.global kvm_handle_cpe 325kvm_handle_cpe: 326 create_intr_handler 21 327 328.global kvm_handle_hv_injection 329kvm_handle_hv_injection: 330 create_intr_handler 28, padargs=1 331 332.global kvm_handle_vmm_comm 333kvm_handle_vmm_comm: 334 create_intr_handler 29 335 336.global kvm_handle_security_error 337kvm_handle_security_error: 338 create_intr_handler 30 339 340.global kvm_handle_bad_exception 341kvm_handle_bad_exception: 342 create_intr_handler -1, padargs=1 343 344.global kvm_exit 345kvm_exit: 346 movl $RESULT_ADDRESS, %edi 347 movl $KVM_TEXIT, (%edi) 348 hlt 349 jmp kvm_exit 350 351.global kvm_yield 352kvm_yield: 353 hlt 354 ret 355 356.global kvm_svm_guest_entry 357kvm_svm_guest_entry: 358 call *%eax 3591: hlt 360 jmp 1b 361 362.global kvm_svm_vmrun 363kvm_svm_vmrun: 364 push %edi 365 mov 8(%esp), %edi 366 push %ebx 367 push %esi 368 push %ebp 369 370 clgi 371 372 /* Save full host state */ 373 movl $MSR_VM_HSAVE_PA, %ecx 374 rdmsr 375 vmsave 376 push %eax 377 378 /* Load guest registers */ 379 push %edi 380 movl (%edi), %eax 381 /* %eax is loaded by vmrun from VMCB */ 382 movl 0x0c(%edi), %ebx 383 movl 0x14(%edi), %ecx 384 movl 0x1c(%edi), %edx 385 movl 0x2c(%edi), %esi 386 movl 0x34(%edi), %ebp 387 /* %esp is loaded by vmrun from VMCB */ 388 movl 0x24(%edi), %edi 389 390 vmload 391 vmrun 392 vmsave 393 394 /* Clear guest register buffer */ 395 push %edi 396 push %ecx 397 movl 8(%esp), %edi 398 addl $4, %edi 399 xorl %eax, %eax 400 mov $32, %ecx 401 pushfl 402 cld 403 rep stosl 404 popfl 405 406 /* Save guest registers */ 407 pop %ecx 408 pop %eax 409 pop %edi 410 movl %ebx, 0x0c(%edi) 411 movl %ecx, 0x14(%edi) 412 movl %edx, 0x1c(%edi) 413 movl %eax, 0x24(%edi) 414 movl %esi, 0x2c(%edi) 415 movl %ebp, 0x34(%edi) 416 /* Copy %eax and %esp from VMCB */ 417 movl (%edi), %esi 418 movl 0x5f8(%esi), %eax 419 movl %eax, 0x04(%edi) 420 movl 0x5d8(%esi), %eax 421 movl %eax, 0x3c(%edi) 422 423 pop %eax 424 vmload 425 stgi 426 427 pop %ebp 428 pop %esi 429 pop %ebx 430 pop %edi 431 ret 432 433 434.section .bss.pgtables, "aw", @nobits 435.global kvm_pagetable 436kvm_pagetable: 437 .skip 4096 438 439kvm_pgtable_l2: 440 .skip 1024 * 4096 441 442.section .bss.stack, "aw", @nobits 443.global kvm_stack_bottom 444kvm_stack_bottom: 445 .skip 2 * 4096 446.global kvm_stack_top 447kvm_stack_top: 448 449.section .bss.tss 450.global kvm_tss 451kvm_tss: 452 .skip 0x6C 453.Ltss_end: 454 455.section .bss 456.align 8 457.global kvm_idt 458kvm_idt: 459 .skip 8 * 256 460.Lidt_end: 461 462.section .data 463.align 8 464.global kvm_idt_desc 465kvm_idt_desc: 466 .2byte .Lidt_end - kvm_idt - 1 467 .4byte kvm_idt 468