1 // Copyright 2024 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! x86 architecture gdb debugging support.
6
7 use gdbstub_arch::x86::reg::id::X86_64CoreRegId;
8 use gdbstub_arch::x86::reg::X86SegmentRegs;
9 use gdbstub_arch::x86::reg::X86_64CoreRegs;
10 use gdbstub_arch::x86::reg::X87FpuInternalRegs;
11 use hypervisor::x86_64::Regs;
12 use hypervisor::x86_64::Sregs;
13 use hypervisor::VcpuX86_64;
14 use vm_memory::GuestAddress;
15 use vm_memory::GuestMemory;
16
17 use crate::Error;
18 use crate::Result;
19 use crate::X8664arch;
20
21 impl<T: VcpuX86_64> arch::GdbOps<T> for X8664arch {
22 type Error = Error;
23
read_registers(vcpu: &T) -> Result<X86_64CoreRegs>24 fn read_registers(vcpu: &T) -> Result<X86_64CoreRegs> {
25 // General registers: RAX, RBX, RCX, RDX, RSI, RDI, RBP, RSP, r8-r15
26 let gregs = vcpu.get_regs().map_err(Error::ReadRegs)?;
27 let regs = [
28 gregs.rax, gregs.rbx, gregs.rcx, gregs.rdx, gregs.rsi, gregs.rdi, gregs.rbp, gregs.rsp,
29 gregs.r8, gregs.r9, gregs.r10, gregs.r11, gregs.r12, gregs.r13, gregs.r14, gregs.r15,
30 ];
31
32 // GDB exposes 32-bit eflags instead of 64-bit rflags.
33 // https://github.com/bminor/binutils-gdb/blob/master/gdb/features/i386/64bit-core.xml
34 let eflags = gregs.rflags as u32;
35 let rip = gregs.rip;
36
37 // Segment registers: CS, SS, DS, ES, FS, GS
38 let sregs = vcpu.get_sregs().map_err(Error::ReadRegs)?;
39 let segments = X86SegmentRegs {
40 cs: sregs.cs.selector as u32,
41 ss: sregs.ss.selector as u32,
42 ds: sregs.ds.selector as u32,
43 es: sregs.es.selector as u32,
44 fs: sregs.fs.selector as u32,
45 gs: sregs.gs.selector as u32,
46 };
47
48 // x87 FPU internal state
49 // TODO(dverkamp): floating point tag word, instruction pointer, and data pointer
50 let fpu = vcpu.get_fpu().map_err(Error::ReadRegs)?;
51 let fpu_internal = X87FpuInternalRegs {
52 fctrl: u32::from(fpu.fcw),
53 fstat: u32::from(fpu.fsw),
54 fop: u32::from(fpu.last_opcode),
55 ..Default::default()
56 };
57
58 let mut regs = X86_64CoreRegs {
59 regs,
60 eflags,
61 rip,
62 segments,
63 st: Default::default(),
64 fpu: fpu_internal,
65 xmm: Default::default(),
66 mxcsr: fpu.mxcsr,
67 };
68
69 // x87 FPU registers: ST0-ST7
70 for (dst, src) in regs.st.iter_mut().zip(fpu.fpr.iter()) {
71 // `fpr` contains the x87 floating point registers in FXSAVE format.
72 // Each element contains an 80-bit floating point value.
73 *dst = (*src).into();
74 }
75
76 // SSE registers: XMM0-XMM15
77 for (dst, src) in regs.xmm.iter_mut().zip(fpu.xmm.iter()) {
78 *dst = u128::from_le_bytes(*src);
79 }
80
81 Ok(regs)
82 }
83
write_registers(vcpu: &T, regs: &X86_64CoreRegs) -> Result<()>84 fn write_registers(vcpu: &T, regs: &X86_64CoreRegs) -> Result<()> {
85 // General purpose registers (RAX, RBX, RCX, RDX, RSI, RDI, RBP, RSP, r8-r15) + RIP + rflags
86 let orig_gregs = vcpu.get_regs().map_err(Error::ReadRegs)?;
87 let gregs = Regs {
88 rax: regs.regs[0],
89 rbx: regs.regs[1],
90 rcx: regs.regs[2],
91 rdx: regs.regs[3],
92 rsi: regs.regs[4],
93 rdi: regs.regs[5],
94 rbp: regs.regs[6],
95 rsp: regs.regs[7],
96 r8: regs.regs[8],
97 r9: regs.regs[9],
98 r10: regs.regs[10],
99 r11: regs.regs[11],
100 r12: regs.regs[12],
101 r13: regs.regs[13],
102 r14: regs.regs[14],
103 r15: regs.regs[15],
104 rip: regs.rip,
105 // Update the lower 32 bits of rflags.
106 rflags: (orig_gregs.rflags & !(u32::MAX as u64)) | (regs.eflags as u64),
107 };
108 vcpu.set_regs(&gregs).map_err(Error::WriteRegs)?;
109
110 // Segment registers: CS, SS, DS, ES, FS, GS
111 // Since GDB care only selectors, we call get_sregs() first.
112 let mut sregs = vcpu.get_sregs().map_err(Error::ReadRegs)?;
113 sregs.cs.selector = regs.segments.cs as u16;
114 sregs.ss.selector = regs.segments.ss as u16;
115 sregs.ds.selector = regs.segments.ds as u16;
116 sregs.es.selector = regs.segments.es as u16;
117 sregs.fs.selector = regs.segments.fs as u16;
118 sregs.gs.selector = regs.segments.gs as u16;
119
120 vcpu.set_sregs(&sregs).map_err(Error::WriteRegs)?;
121
122 // FPU and SSE registers
123 let mut fpu = vcpu.get_fpu().map_err(Error::ReadRegs)?;
124 fpu.fcw = regs.fpu.fctrl as u16;
125 fpu.fsw = regs.fpu.fstat as u16;
126 fpu.last_opcode = regs.fpu.fop as u16;
127 // TODO(dverkamp): floating point tag word, instruction pointer, and data pointer
128
129 // x87 FPU registers: ST0-ST7
130 for (dst, src) in fpu.fpr.iter_mut().zip(regs.st.iter()) {
131 *dst = (*src).into();
132 }
133
134 // SSE registers: XMM0-XMM15
135 for (dst, src) in fpu.xmm.iter_mut().zip(regs.xmm.iter()) {
136 dst.copy_from_slice(&src.to_le_bytes());
137 }
138
139 vcpu.set_fpu(&fpu).map_err(Error::WriteRegs)?;
140
141 Ok(())
142 }
143
144 #[inline]
read_register(_vcpu: &T, _reg: X86_64CoreRegId) -> Result<Vec<u8>>145 fn read_register(_vcpu: &T, _reg: X86_64CoreRegId) -> Result<Vec<u8>> {
146 Err(Error::ReadRegIsUnsupported)
147 }
148
149 #[inline]
write_register(_vcpu: &T, _reg: X86_64CoreRegId, _buf: &[u8]) -> Result<()>150 fn write_register(_vcpu: &T, _reg: X86_64CoreRegId, _buf: &[u8]) -> Result<()> {
151 Err(Error::WriteRegIsUnsupported)
152 }
153
read_memory( vcpu: &T, guest_mem: &GuestMemory, vaddr: GuestAddress, len: usize, ) -> Result<Vec<u8>>154 fn read_memory(
155 vcpu: &T,
156 guest_mem: &GuestMemory,
157 vaddr: GuestAddress,
158 len: usize,
159 ) -> Result<Vec<u8>> {
160 let sregs = vcpu.get_sregs().map_err(Error::ReadRegs)?;
161 let mut buf = vec![0; len];
162 let mut total_read = 0u64;
163 // Handle reads across page boundaries.
164
165 while total_read < len as u64 {
166 let (paddr, psize) = phys_addr(guest_mem, vaddr.0 + total_read, &sregs)?;
167 let read_len = std::cmp::min(len as u64 - total_read, psize - (paddr & (psize - 1)));
168 guest_mem
169 .get_slice_at_addr(GuestAddress(paddr), read_len as usize)
170 .map_err(Error::ReadingGuestMemory)?
171 .copy_to(&mut buf[total_read as usize..]);
172 total_read += read_len;
173 }
174 Ok(buf)
175 }
176
write_memory( vcpu: &T, guest_mem: &GuestMemory, vaddr: GuestAddress, buf: &[u8], ) -> Result<()>177 fn write_memory(
178 vcpu: &T,
179 guest_mem: &GuestMemory,
180 vaddr: GuestAddress,
181 buf: &[u8],
182 ) -> Result<()> {
183 let sregs = vcpu.get_sregs().map_err(Error::ReadRegs)?;
184 let mut total_written = 0u64;
185 // Handle writes across page boundaries.
186 while total_written < buf.len() as u64 {
187 let (paddr, psize) = phys_addr(guest_mem, vaddr.0 + total_written, &sregs)?;
188 let write_len = std::cmp::min(
189 buf.len() as u64 - total_written,
190 psize - (paddr & (psize - 1)),
191 );
192
193 guest_mem
194 .write_all_at_addr(
195 &buf[total_written as usize..(total_written as usize + write_len as usize)],
196 GuestAddress(paddr),
197 )
198 .map_err(Error::WritingGuestMemory)?;
199 total_written += write_len;
200 }
201 Ok(())
202 }
203
enable_singlestep(vcpu: &T) -> Result<()>204 fn enable_singlestep(vcpu: &T) -> Result<()> {
205 vcpu.set_guest_debug(&[], true /* enable_singlestep */)
206 .map_err(Error::EnableSinglestep)
207 }
208
get_max_hw_breakpoints(_vcpu: &T) -> Result<usize>209 fn get_max_hw_breakpoints(_vcpu: &T) -> Result<usize> {
210 Ok(4usize)
211 }
212
set_hw_breakpoints(vcpu: &T, breakpoints: &[GuestAddress]) -> Result<()>213 fn set_hw_breakpoints(vcpu: &T, breakpoints: &[GuestAddress]) -> Result<()> {
214 vcpu.set_guest_debug(breakpoints, false /* enable_singlestep */)
215 .map_err(Error::SetHwBreakpoint)
216 }
217 }
218
219 // return the translated address and the size of the page it resides in.
phys_addr(mem: &GuestMemory, vaddr: u64, sregs: &Sregs) -> Result<(u64, u64)>220 fn phys_addr(mem: &GuestMemory, vaddr: u64, sregs: &Sregs) -> Result<(u64, u64)> {
221 const CR0_PG_MASK: u64 = 1 << 31;
222 const CR4_PAE_MASK: u64 = 1 << 5;
223 const CR4_LA57_MASK: u64 = 1 << 12;
224 const MSR_EFER_LMA: u64 = 1 << 10;
225 // bits 12 through 51 are the address in a PTE.
226 const PTE_ADDR_MASK: u64 = ((1 << 52) - 1) & !0x0fff;
227 const PAGE_PRESENT: u64 = 0x1;
228 const PAGE_PSE_MASK: u64 = 0x1 << 7;
229
230 const PAGE_SIZE_4K: u64 = 4 * 1024;
231 const PAGE_SIZE_2M: u64 = 2 * 1024 * 1024;
232 const PAGE_SIZE_1G: u64 = 1024 * 1024 * 1024;
233
234 fn next_pte(mem: &GuestMemory, curr_table_addr: u64, vaddr: u64, level: usize) -> Result<u64> {
235 let ent: u64 = mem
236 .read_obj_from_addr(GuestAddress(
237 (curr_table_addr & PTE_ADDR_MASK) + page_table_offset(vaddr, level),
238 ))
239 .map_err(|_| Error::TranslatingVirtAddr)?;
240 /* TODO - convert to a trace
241 println!(
242 "level {} vaddr {:x} table-addr {:x} mask {:x} ent {:x} offset {:x}",
243 level,
244 vaddr,
245 curr_table_addr,
246 PTE_ADDR_MASK,
247 ent,
248 page_table_offset(vaddr, level)
249 );
250 */
251 if ent & PAGE_PRESENT == 0 {
252 return Err(Error::PageNotPresent);
253 }
254 Ok(ent)
255 }
256
257 // Get the offset in to the page of `vaddr`.
258 fn page_offset(vaddr: u64, page_size: u64) -> u64 {
259 vaddr & (page_size - 1)
260 }
261
262 // Get the offset in to the page table of the given `level` specified by the virtual `address`.
263 // `level` is 1 through 5 in x86_64 to handle the five levels of paging.
264 fn page_table_offset(addr: u64, level: usize) -> u64 {
265 let offset = (level - 1) * 9 + 12;
266 ((addr >> offset) & 0x1ff) << 3
267 }
268
269 if sregs.cr0 & CR0_PG_MASK == 0 {
270 return Ok((vaddr, PAGE_SIZE_4K));
271 }
272
273 if sregs.cr4 & CR4_PAE_MASK == 0 {
274 return Err(Error::TranslatingVirtAddr);
275 }
276
277 if sregs.efer & MSR_EFER_LMA != 0 {
278 // TODO - check LA57
279 if sregs.cr4 & CR4_LA57_MASK != 0 {
280 todo!("handle LA57");
281 }
282 let p4_ent = next_pte(mem, sregs.cr3, vaddr, 4)?;
283 let p3_ent = next_pte(mem, p4_ent, vaddr, 3)?;
284 // TODO check if it's a 1G page with the PSE bit in p2_ent
285 if p3_ent & PAGE_PSE_MASK != 0 {
286 // It's a 1G page with the PSE bit in p3_ent
287 let paddr = p3_ent & PTE_ADDR_MASK | page_offset(vaddr, PAGE_SIZE_1G);
288 return Ok((paddr, PAGE_SIZE_1G));
289 }
290 let p2_ent = next_pte(mem, p3_ent, vaddr, 2)?;
291 if p2_ent & PAGE_PSE_MASK != 0 {
292 // It's a 2M page with the PSE bit in p2_ent
293 let paddr = p2_ent & PTE_ADDR_MASK | page_offset(vaddr, PAGE_SIZE_2M);
294 return Ok((paddr, PAGE_SIZE_2M));
295 }
296 let p1_ent = next_pte(mem, p2_ent, vaddr, 1)?;
297 let paddr = p1_ent & PTE_ADDR_MASK | page_offset(vaddr, PAGE_SIZE_4K);
298 return Ok((paddr, PAGE_SIZE_4K));
299 }
300 Err(Error::TranslatingVirtAddr)
301 }
302