Lines Matching +full:len +full:- +full:or +full:- +full:define

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
12 #define INSN_OPCODE_MASK 0x007c
13 #define INSN_OPCODE_SHIFT 2
14 #define INSN_OPCODE_SYSTEM 28
16 #define INSN_MASK_WFI 0xffffffff
17 #define INSN_MATCH_WFI 0x10500073
19 #define INSN_MASK_WRS 0xffffffff
20 #define INSN_MATCH_WRS 0x00d00073
22 #define INSN_MATCH_CSRRW 0x1073
23 #define INSN_MASK_CSRRW 0x707f
24 #define INSN_MATCH_CSRRS 0x2073
25 #define INSN_MASK_CSRRS 0x707f
26 #define INSN_MATCH_CSRRC 0x3073
27 #define INSN_MASK_CSRRC 0x707f
28 #define INSN_MATCH_CSRRWI 0x5073
29 #define INSN_MASK_CSRRWI 0x707f
30 #define INSN_MATCH_CSRRSI 0x6073
31 #define INSN_MASK_CSRRSI 0x707f
32 #define INSN_MATCH_CSRRCI 0x7073
33 #define INSN_MASK_CSRRCI 0x707f
35 #define INSN_MATCH_LB 0x3
36 #define INSN_MASK_LB 0x707f
37 #define INSN_MATCH_LH 0x1003
38 #define INSN_MASK_LH 0x707f
39 #define INSN_MATCH_LW 0x2003
40 #define INSN_MASK_LW 0x707f
41 #define INSN_MATCH_LD 0x3003
42 #define INSN_MASK_LD 0x707f
43 #define INSN_MATCH_LBU 0x4003
44 #define INSN_MASK_LBU 0x707f
45 #define INSN_MATCH_LHU 0x5003
46 #define INSN_MASK_LHU 0x707f
47 #define INSN_MATCH_LWU 0x6003
48 #define INSN_MASK_LWU 0x707f
49 #define INSN_MATCH_SB 0x23
50 #define INSN_MASK_SB 0x707f
51 #define INSN_MATCH_SH 0x1023
52 #define INSN_MASK_SH 0x707f
53 #define INSN_MATCH_SW 0x2023
54 #define INSN_MASK_SW 0x707f
55 #define INSN_MATCH_SD 0x3023
56 #define INSN_MASK_SD 0x707f
58 #define INSN_MATCH_C_LD 0x6000
59 #define INSN_MASK_C_LD 0xe003
60 #define INSN_MATCH_C_SD 0xe000
61 #define INSN_MASK_C_SD 0xe003
62 #define INSN_MATCH_C_LW 0x4000
63 #define INSN_MASK_C_LW 0xe003
64 #define INSN_MATCH_C_SW 0xc000
65 #define INSN_MASK_C_SW 0xe003
66 #define INSN_MATCH_C_LDSP 0x6002
67 #define INSN_MASK_C_LDSP 0xe003
68 #define INSN_MATCH_C_SDSP 0xe002
69 #define INSN_MASK_C_SDSP 0xe003
70 #define INSN_MATCH_C_LWSP 0x4002
71 #define INSN_MASK_C_LWSP 0xe003
72 #define INSN_MATCH_C_SWSP 0xc002
73 #define INSN_MASK_C_SWSP 0xe003
75 #define INSN_16BIT_MASK 0x3
77 #define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
79 #define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
82 #define LOG_REGBYTES 3
84 #define LOG_REGBYTES 2
86 #define REGBYTES (1 << LOG_REGBYTES)
88 #define SH_RD 7
89 #define SH_RS1 15
90 #define SH_RS2 20
91 #define SH_RS2C 2
92 #define MASK_RX 0x1f
94 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
95 #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
98 #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
100 #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
103 #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
106 #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
108 #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
110 #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
111 #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
112 #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
114 #define SHIFT_RIGHT(x, y) \
115 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
117 #define REG_MASK \
118 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
120 #define REG_OFFSET(insn, pos) \
121 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
123 #define REG_PTR(insn, pos, regs) \
126 #define GET_FUNCT3(insn) (((insn) >> 12) & 7)
128 #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
129 #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
130 #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
131 #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
132 #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
133 #define GET_SP(regs) (*REG_PTR(2, 0, regs))
134 #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
135 #define IMM_I(insn) ((s32)(insn) >> 20)
136 #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
145 * 2) Returns 0 for exit to user-space
162 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_illegal_insn()
178 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_virtual_insn()
189 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
204 vcpu->stat.wfi_exit_stat++; in wfi_insn()
211 vcpu->stat.wrs_exit_stat++; in wrs_insn()
212 kvm_vcpu_on_spin(vcpu, vcpu->arch.guest_context.sstatus & SR_SPP); in wrs_insn()
232 if (!riscv_isa_extension_available(vcpu->arch.isa, ZKR)) in seed_csr_rmw()
245 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
246 * emulation or in-kernel emulation
257 if (vcpu->arch.csr_decode.return_handled) in kvm_riscv_vcpu_csr_return()
259 vcpu->arch.csr_decode.return_handled = 1; in kvm_riscv_vcpu_csr_return()
262 insn = vcpu->arch.csr_decode.insn; in kvm_riscv_vcpu_csr_return()
264 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_csr_return()
265 run->riscv_csr.ret_value); in kvm_riscv_vcpu_csr_return()
268 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in kvm_riscv_vcpu_csr_return()
278 ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context); in csr_insn()
285 wr_mask = -1UL; in csr_insn()
290 new_val = -1UL; in csr_insn()
297 wr_mask = -1UL; in csr_insn()
302 new_val = -1UL; in csr_insn()
313 vcpu->arch.csr_decode.insn = insn; in csr_insn()
314 vcpu->arch.csr_decode.return_handled = 0; in csr_insn()
317 run->riscv_csr.csr_num = csr_num; in csr_insn()
318 run->riscv_csr.new_value = new_val; in csr_insn()
319 run->riscv_csr.write_mask = wr_mask; in csr_insn()
320 run->riscv_csr.ret_value = 0; in csr_insn()
322 /* Find in-kernel CSR function */ in csr_insn()
325 if ((tcfn->base <= csr_num) && in csr_insn()
326 (csr_num < (tcfn->base + tcfn->count))) { in csr_insn()
332 /* First try in-kernel CSR emulation */ in csr_insn()
333 if (cfn && cfn->func) { in csr_insn()
334 rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask); in csr_insn()
337 run->riscv_csr.ret_value = val; in csr_insn()
338 vcpu->stat.csr_exit_kernel++; in csr_insn()
346 /* Exit to user-space for CSR emulation */ in csr_insn()
348 vcpu->stat.csr_exit_user++; in csr_insn()
349 run->exit_reason = KVM_EXIT_RISCV_CSR; in csr_insn()
406 if ((insn & ifn->mask) == ifn->match) { in system_opcode_insn()
407 rc = ifn->func(vcpu, run, insn); in system_opcode_insn()
418 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in system_opcode_insn()
428 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
434 * Returns > 0 to continue run-loop
435 * Returns 0 to exit run-loop and handle in user-space.
436 * Returns < 0 to report failure and exit run-loop
441 unsigned long insn = trap->stval; in kvm_riscv_vcpu_virtual_insn()
447 ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_virtual_insn()
449 ct->sepc, in kvm_riscv_vcpu_virtual_insn()
452 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_virtual_insn()
470 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
477 * Returns > 0 to continue run-loop
478 * Returns 0 to exit run-loop and handle in user-space.
479 * Returns < 0 to report failure and exit run-loop
487 int shift = 0, len = 0, insn_len = 0; in kvm_riscv_vcpu_mmio_load() local
489 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_load()
495 * transformed instruction or custom instruction. in kvm_riscv_vcpu_mmio_load()
502 * zero or special value. in kvm_riscv_vcpu_mmio_load()
504 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, in kvm_riscv_vcpu_mmio_load()
508 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_mmio_load()
517 len = 4; in kvm_riscv_vcpu_mmio_load()
518 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
520 len = 1; in kvm_riscv_vcpu_mmio_load()
521 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
523 len = 1; in kvm_riscv_vcpu_mmio_load()
524 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
527 len = 8; in kvm_riscv_vcpu_mmio_load()
528 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
530 len = 4; in kvm_riscv_vcpu_mmio_load()
533 len = 2; in kvm_riscv_vcpu_mmio_load()
534 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
536 len = 2; in kvm_riscv_vcpu_mmio_load()
539 len = 8; in kvm_riscv_vcpu_mmio_load()
540 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
544 len = 8; in kvm_riscv_vcpu_mmio_load()
545 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
548 len = 4; in kvm_riscv_vcpu_mmio_load()
549 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
553 len = 4; in kvm_riscv_vcpu_mmio_load()
554 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
556 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_load()
560 if (fault_addr & (len - 1)) in kvm_riscv_vcpu_mmio_load()
561 return -EIO; in kvm_riscv_vcpu_mmio_load()
564 vcpu->arch.mmio_decode.insn = insn; in kvm_riscv_vcpu_mmio_load()
565 vcpu->arch.mmio_decode.insn_len = insn_len; in kvm_riscv_vcpu_mmio_load()
566 vcpu->arch.mmio_decode.shift = shift; in kvm_riscv_vcpu_mmio_load()
567 vcpu->arch.mmio_decode.len = len; in kvm_riscv_vcpu_mmio_load()
568 vcpu->arch.mmio_decode.return_handled = 0; in kvm_riscv_vcpu_mmio_load()
571 run->mmio.is_write = false; in kvm_riscv_vcpu_mmio_load()
572 run->mmio.phys_addr = fault_addr; in kvm_riscv_vcpu_mmio_load()
573 run->mmio.len = len; in kvm_riscv_vcpu_mmio_load()
576 if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) { in kvm_riscv_vcpu_mmio_load()
578 memcpy(run->mmio.data, data_buf, len); in kvm_riscv_vcpu_mmio_load()
579 vcpu->stat.mmio_exit_kernel++; in kvm_riscv_vcpu_mmio_load()
585 vcpu->stat.mmio_exit_user++; in kvm_riscv_vcpu_mmio_load()
586 run->exit_reason = KVM_EXIT_MMIO; in kvm_riscv_vcpu_mmio_load()
592 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
599 * Returns > 0 to continue run-loop
600 * Returns 0 to exit run-loop and handle in user-space.
601 * Returns < 0 to report failure and exit run-loop
613 int len = 0, insn_len = 0; in kvm_riscv_vcpu_mmio_store() local
615 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_store()
621 * transformed instruction or custom instruction. in kvm_riscv_vcpu_mmio_store()
628 * zero or special value. in kvm_riscv_vcpu_mmio_store()
630 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, in kvm_riscv_vcpu_mmio_store()
634 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_mmio_store()
641 data = GET_RS2(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
645 len = 4; in kvm_riscv_vcpu_mmio_store()
647 len = 1; in kvm_riscv_vcpu_mmio_store()
650 len = 8; in kvm_riscv_vcpu_mmio_store()
653 len = 2; in kvm_riscv_vcpu_mmio_store()
656 len = 8; in kvm_riscv_vcpu_mmio_store()
657 data64 = GET_RS2S(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
660 len = 8; in kvm_riscv_vcpu_mmio_store()
661 data64 = GET_RS2C(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
664 len = 4; in kvm_riscv_vcpu_mmio_store()
665 data32 = GET_RS2S(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
668 len = 4; in kvm_riscv_vcpu_mmio_store()
669 data32 = GET_RS2C(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
671 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_store()
675 if (fault_addr & (len - 1)) in kvm_riscv_vcpu_mmio_store()
676 return -EIO; in kvm_riscv_vcpu_mmio_store()
679 vcpu->arch.mmio_decode.insn = insn; in kvm_riscv_vcpu_mmio_store()
680 vcpu->arch.mmio_decode.insn_len = insn_len; in kvm_riscv_vcpu_mmio_store()
681 vcpu->arch.mmio_decode.shift = 0; in kvm_riscv_vcpu_mmio_store()
682 vcpu->arch.mmio_decode.len = len; in kvm_riscv_vcpu_mmio_store()
683 vcpu->arch.mmio_decode.return_handled = 0; in kvm_riscv_vcpu_mmio_store()
686 switch (len) { in kvm_riscv_vcpu_mmio_store()
688 *((u8 *)run->mmio.data) = data8; in kvm_riscv_vcpu_mmio_store()
691 *((u16 *)run->mmio.data) = data16; in kvm_riscv_vcpu_mmio_store()
694 *((u32 *)run->mmio.data) = data32; in kvm_riscv_vcpu_mmio_store()
697 *((u64 *)run->mmio.data) = data64; in kvm_riscv_vcpu_mmio_store()
700 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_store()
704 run->mmio.is_write = true; in kvm_riscv_vcpu_mmio_store()
705 run->mmio.phys_addr = fault_addr; in kvm_riscv_vcpu_mmio_store()
706 run->mmio.len = len; in kvm_riscv_vcpu_mmio_store()
710 fault_addr, len, run->mmio.data)) { in kvm_riscv_vcpu_mmio_store()
712 vcpu->stat.mmio_exit_kernel++; in kvm_riscv_vcpu_mmio_store()
718 vcpu->stat.mmio_exit_user++; in kvm_riscv_vcpu_mmio_store()
719 run->exit_reason = KVM_EXIT_MMIO; in kvm_riscv_vcpu_mmio_store()
725 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
726 * or in-kernel IO emulation
738 int len, shift; in kvm_riscv_vcpu_mmio_return() local
740 if (vcpu->arch.mmio_decode.return_handled) in kvm_riscv_vcpu_mmio_return()
743 vcpu->arch.mmio_decode.return_handled = 1; in kvm_riscv_vcpu_mmio_return()
744 insn = vcpu->arch.mmio_decode.insn; in kvm_riscv_vcpu_mmio_return()
746 if (run->mmio.is_write) in kvm_riscv_vcpu_mmio_return()
749 len = vcpu->arch.mmio_decode.len; in kvm_riscv_vcpu_mmio_return()
750 shift = vcpu->arch.mmio_decode.shift; in kvm_riscv_vcpu_mmio_return()
752 switch (len) { in kvm_riscv_vcpu_mmio_return()
754 data8 = *((u8 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
755 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
759 data16 = *((u16 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
760 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
764 data32 = *((u32 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
765 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
769 data64 = *((u64 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
770 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
774 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_return()
779 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len; in kvm_riscv_vcpu_mmio_return()