xref: /aosp_15_r20/external/crosvm/devices/src/vmwdt.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! vmwdt is a virtual watchdog memory mapped device which detects stalls
6 //! on the vCPUs and resets the guest when no 'pet' events are received.
7 //! <https://docs.google.com/document/d/1DYmk2roxlwHZsOfcJi8xDMdWOHAmomvs2SDh7KPud3Y/edit?usp=sharing&resourcekey=0-oSNabc-t040a1q0K4cyI8Q>
8 
9 use std::collections::BTreeMap;
10 use std::convert::TryFrom;
11 use std::fs;
12 use std::sync::Arc;
13 use std::time::Duration;
14 
15 use anyhow::Context;
16 use base::custom_serde::serialize_arc_mutex;
17 use base::debug;
18 use base::error;
19 use base::warn;
20 use base::AsRawDescriptor;
21 use base::Descriptor;
22 use base::Error as SysError;
23 use base::Event;
24 use base::EventToken;
25 use base::SendTube;
26 use base::Timer;
27 use base::TimerTrait;
28 use base::Tube;
29 use base::VmEventType;
30 use base::WaitContext;
31 use base::WorkerThread;
32 use serde::Deserialize;
33 use serde::Serialize;
34 use sync::Mutex;
35 use vm_control::VmResponse;
36 
37 use crate::pci::CrosvmDeviceId;
38 use crate::BusAccessInfo;
39 use crate::BusDevice;
40 use crate::DeviceId;
41 use crate::IrqEdgeEvent;
42 use crate::Suspendable;
43 
44 // Registers offsets
45 const VMWDT_REG_STATUS: u32 = 0x00;
46 const VMWDT_REG_LOAD_CNT: u32 = 0x04;
47 const VMWDT_REG_CURRENT_CNT: u32 = 0x08;
48 const VMWDT_REG_CLOCK_FREQ_HZ: u32 = 0x0C;
49 
50 // Length of the registers
51 const VMWDT_REG_LEN: u64 = 0x10;
52 
53 pub const VMWDT_DEFAULT_TIMEOUT_SEC: u32 = 10;
54 pub const VMWDT_DEFAULT_CLOCK_HZ: u32 = 2;
55 
56 // Proc stat indexes
57 const PROCSTAT_GUEST_TIME_INDX: usize = 42;
58 
59 #[derive(Serialize)]
60 pub struct VmwdtPerCpu {
61     // Flag which indicated if the watchdog is started
62     is_enabled: bool,
63     // Timer used to generate periodic events at `timer_freq_hz` frequency
64     #[serde(skip_serializing)]
65     timer: Timer,
66     // The frequency of the `timer`
67     timer_freq_hz: u64,
68     // Timestamp measured in miliseconds of the last guest activity
69     last_guest_time_ms: i64,
70     // The thread_id of the thread this vcpu belongs to
71     thread_id: u32,
72     // The process id of the task this vcpu belongs to
73     process_id: u32,
74     // The pre-programmed one-shot expiration interval. If the guest runs in this
75     // interval but we don't receive a periodic event, the guest is stalled.
76     next_expiration_interval_ms: i64,
77     // Keep track if the watchdog PPI raised.
78     stall_evt_ppi_triggered: bool,
79     // Keep track if the time was armed with oneshot mode or with repeating interval
80     repeating_interval: Option<Duration>,
81 }
82 
83 #[derive(Deserialize)]
84 struct VmwdtPerCpuRestore {
85     is_enabled: bool,
86     timer_freq_hz: u64,
87     last_guest_time_ms: i64,
88     next_expiration_interval_ms: i64,
89     repeating_interval: Option<Duration>,
90 }
91 
92 pub struct Vmwdt {
93     vm_wdts: Arc<Mutex<Vec<VmwdtPerCpu>>>,
94     // The worker thread that waits on the timer fd
95     worker_thread: Option<WorkerThread<Tube>>,
96     // TODO: @sebastianene add separate reset event for the watchdog
97     // Reset source if the device is not responding
98     reset_evt_wrtube: SendTube,
99     activated: bool,
100     // Event to be used to interrupt the guest on detected stalls
101     stall_evt: IrqEdgeEvent,
102     vm_ctrl_tube: Option<Tube>,
103 }
104 
105 #[derive(Serialize)]
106 struct VmwdtSnapshot {
107     #[serde(serialize_with = "serialize_arc_mutex")]
108     vm_wdts: Arc<Mutex<Vec<VmwdtPerCpu>>>,
109     activated: bool,
110 }
111 
112 #[derive(Deserialize)]
113 struct VmwdtRestore {
114     vm_wdts: Vec<VmwdtPerCpuRestore>,
115     activated: bool,
116 }
117 
118 impl Vmwdt {
new( cpu_count: usize, reset_evt_wrtube: SendTube, evt: IrqEdgeEvent, vm_ctrl_tube: Tube, ) -> anyhow::Result<Vmwdt>119     pub fn new(
120         cpu_count: usize,
121         reset_evt_wrtube: SendTube,
122         evt: IrqEdgeEvent,
123         vm_ctrl_tube: Tube,
124     ) -> anyhow::Result<Vmwdt> {
125         let mut vec = Vec::new();
126         for _ in 0..cpu_count {
127             vec.push(VmwdtPerCpu {
128                 last_guest_time_ms: 0,
129                 thread_id: 0,
130                 process_id: 0,
131                 is_enabled: false,
132                 stall_evt_ppi_triggered: false,
133                 timer: Timer::new().context("failed to create Timer")?,
134                 timer_freq_hz: 0,
135                 next_expiration_interval_ms: 0,
136                 repeating_interval: None,
137             });
138         }
139         let vm_wdts = Arc::new(Mutex::new(vec));
140 
141         Ok(Vmwdt {
142             vm_wdts,
143             worker_thread: None,
144             reset_evt_wrtube,
145             activated: false,
146             stall_evt: evt,
147             vm_ctrl_tube: Some(vm_ctrl_tube),
148         })
149     }
150 
vmwdt_worker_thread( vm_wdts: Arc<Mutex<Vec<VmwdtPerCpu>>>, kill_evt: Event, reset_evt_wrtube: SendTube, stall_evt: IrqEdgeEvent, vm_ctrl_tube: Tube, worker_started_send: Option<SendTube>, ) -> anyhow::Result<Tube>151     pub fn vmwdt_worker_thread(
152         vm_wdts: Arc<Mutex<Vec<VmwdtPerCpu>>>,
153         kill_evt: Event,
154         reset_evt_wrtube: SendTube,
155         stall_evt: IrqEdgeEvent,
156         vm_ctrl_tube: Tube,
157         worker_started_send: Option<SendTube>,
158     ) -> anyhow::Result<Tube> {
159         let msg = vm_control::VmRequest::VcpuPidTid;
160         vm_ctrl_tube
161             .send(&msg)
162             .context("failed to send request to fetch Vcpus PID and TID")?;
163         let vcpus_pid_tid: BTreeMap<usize, (u32, u32)> = match vm_ctrl_tube
164             .recv()
165             .context("failed to receive vmwdt pids and tids")?
166         {
167             VmResponse::VcpuPidTidResponse { pid_tid_map } => pid_tid_map,
168             _ => {
169                 return Err(anyhow::anyhow!(
170                     "Receive incorrect message type when trying to get vcpu pid tid map"
171                 ));
172             }
173         };
174         {
175             let mut vm_wdts = vm_wdts.lock();
176             for (i, vmwdt) in (*vm_wdts).iter_mut().enumerate() {
177                 let pid_tid = vcpus_pid_tid
178                     .get(&i)
179                     .context("vmwdts empty, which could indicate no vcpus are initialized")?;
180                 vmwdt.process_id = pid_tid.0;
181                 vmwdt.thread_id = pid_tid.1;
182             }
183         }
184         if let Some(worker_started_send) = worker_started_send {
185             worker_started_send
186                 .send(&())
187                 .context("failed to send vmwdt worker started")?;
188         }
189         #[derive(EventToken)]
190         enum Token {
191             Kill,
192             Timer(usize),
193         }
194 
195         let wait_ctx: WaitContext<Token> =
196             WaitContext::new().context("Failed to create wait_ctx")?;
197         wait_ctx
198             .add(&kill_evt, Token::Kill)
199             .context("Failed to add Tokens to wait_ctx")?;
200 
201         let len = vm_wdts.lock().len();
202         for clock_id in 0..len {
203             let timer_fd = vm_wdts.lock()[clock_id].timer.as_raw_descriptor();
204             wait_ctx
205                 .add(&Descriptor(timer_fd), Token::Timer(clock_id))
206                 .context("Failed to link FDs to Tokens")?;
207         }
208 
209         loop {
210             let events = wait_ctx.wait().context("Failed to wait for events")?;
211             for event in events.iter().filter(|e| e.is_readable) {
212                 match event.token {
213                     Token::Kill => {
214                         return Ok(vm_ctrl_tube);
215                     }
216                     Token::Timer(cpu_id) => {
217                         let mut wdts_locked = vm_wdts.lock();
218                         let watchdog = &mut wdts_locked[cpu_id];
219                         match watchdog.timer.mark_waited() {
220                             Ok(true) => continue, // timer not actually ready
221                             Ok(false) => {}
222                             Err(e) => {
223                                 error!("error waiting for timer event on vcpu {cpu_id}: {e:#}");
224                                 continue;
225                             }
226                         }
227 
228                         let current_guest_time_ms =
229                             Vmwdt::get_guest_time_ms(watchdog.process_id, watchdog.thread_id)
230                                 .context("get_guest_time_ms failed")?;
231                         let remaining_time_ms = watchdog.next_expiration_interval_ms
232                             - (current_guest_time_ms - watchdog.last_guest_time_ms);
233 
234                         if remaining_time_ms > 0 {
235                             watchdog.next_expiration_interval_ms = remaining_time_ms;
236                             if let Err(e) = watchdog
237                                 .timer
238                                 .reset_oneshot(Duration::from_millis(remaining_time_ms as u64))
239                             {
240                                 error!(
241                                     "failed to reset internal timer on vcpu {}: {:#}",
242                                     cpu_id, e
243                                 );
244                             }
245                             watchdog.repeating_interval = None;
246                         } else {
247                             if watchdog.stall_evt_ppi_triggered {
248                                 if let Err(e) = reset_evt_wrtube
249                                     .send::<VmEventType>(&VmEventType::WatchdogReset)
250                                 {
251                                     error!("{} failed to send reset event from vcpu {}", e, cpu_id)
252                                 }
253                             }
254 
255                             stall_evt
256                                 .trigger()
257                                 .context("Failed to trigger stall event")?;
258                             watchdog.stall_evt_ppi_triggered = true;
259                             watchdog.last_guest_time_ms = current_guest_time_ms;
260                         }
261                     }
262                 }
263             }
264         }
265     }
266 
start(&mut self, worker_started_send: Option<SendTube>) -> anyhow::Result<()>267     fn start(&mut self, worker_started_send: Option<SendTube>) -> anyhow::Result<()> {
268         let vm_wdts = self.vm_wdts.clone();
269         let reset_evt_wrtube = self.reset_evt_wrtube.try_clone().unwrap();
270         let stall_event = self.stall_evt.try_clone().unwrap();
271         let vm_ctrl_tube = self
272             .vm_ctrl_tube
273             .take()
274             .context("missing vm control tube")?;
275 
276         self.activated = true;
277         self.worker_thread = Some(WorkerThread::start("vmwdt worker", |kill_evt| {
278             Vmwdt::vmwdt_worker_thread(
279                 vm_wdts,
280                 kill_evt,
281                 reset_evt_wrtube,
282                 stall_event,
283                 vm_ctrl_tube,
284                 worker_started_send,
285             )
286             .expect("failed to start vmwdt worker thread")
287         }));
288         Ok(())
289     }
290 
ensure_started(&mut self)291     fn ensure_started(&mut self) {
292         if self.worker_thread.is_some() {
293             return;
294         }
295 
296         let (worker_started_send, worker_started_recv) =
297             Tube::directional_pair().expect("failed to create vmwdt worker started tubes");
298         self.start(Some(worker_started_send))
299             .expect("failed to start Vmwdt");
300         worker_started_recv
301             .recv::<()>()
302             .expect("failed to receive vmwdt worker started");
303     }
304 
305     #[cfg(any(target_os = "linux", target_os = "android"))]
get_guest_time_ms(process_id: u32, thread_id: u32) -> Result<i64, SysError>306     pub fn get_guest_time_ms(process_id: u32, thread_id: u32) -> Result<i64, SysError> {
307         // TODO: @sebastianene check if we can avoid open-read-close on each call
308         let stat_path = format!("/proc/{}/task/{}/stat", process_id, thread_id);
309         let contents = fs::read_to_string(stat_path)?;
310 
311         let gtime_ticks = contents
312             .split_whitespace()
313             .nth(PROCSTAT_GUEST_TIME_INDX)
314             .and_then(|guest_time| guest_time.parse::<u64>().ok())
315             .unwrap_or(0);
316 
317         // SAFETY:
318         // Safe because this just returns an integer
319         let ticks_per_sec = unsafe { libc::sysconf(libc::_SC_CLK_TCK) } as u64;
320         Ok((gtime_ticks * 1000 / ticks_per_sec) as i64)
321     }
322 
323     #[cfg(not(any(target_os = "linux", target_os = "android")))]
get_guest_time_ms(process_id: u32, thread_id: u32) -> Result<i64, SysError>324     pub fn get_guest_time_ms(process_id: u32, thread_id: u32) -> Result<i64, SysError> {
325         Ok(0)
326     }
327 }
328 
329 impl BusDevice for Vmwdt {
debug_label(&self) -> String330     fn debug_label(&self) -> String {
331         "Vmwdt".to_owned()
332     }
333 
device_id(&self) -> DeviceId334     fn device_id(&self) -> DeviceId {
335         CrosvmDeviceId::VmWatchdog.into()
336     }
337 
read(&mut self, _offset: BusAccessInfo, _data: &mut [u8])338     fn read(&mut self, _offset: BusAccessInfo, _data: &mut [u8]) {}
339 
write(&mut self, info: BusAccessInfo, data: &[u8])340     fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
341         let data_array = match <&[u8; 4]>::try_from(data) {
342             Ok(array) => array,
343             _ => {
344                 error!("Bad write size: {} for vmwdt", data.len());
345                 return;
346             }
347         };
348 
349         let reg_val = u32::from_ne_bytes(*data_array);
350         let cpu_index: usize = (info.offset / VMWDT_REG_LEN) as usize;
351         let reg_offset = (info.offset % VMWDT_REG_LEN) as u32;
352 
353         if cpu_index > self.vm_wdts.lock().len() {
354             error!("Bad write cpu_index {}", cpu_index);
355             return;
356         }
357 
358         match reg_offset {
359             VMWDT_REG_STATUS => {
360                 self.ensure_started();
361                 let mut wdts_locked = self.vm_wdts.lock();
362                 let cpu_watchdog = &mut wdts_locked[cpu_index];
363 
364                 cpu_watchdog.is_enabled = reg_val != 0;
365 
366                 if reg_val != 0 {
367                     let interval = Duration::from_millis(1000 / cpu_watchdog.timer_freq_hz);
368                     cpu_watchdog.repeating_interval = Some(interval);
369                     cpu_watchdog
370                         .timer
371                         .reset_repeating(interval)
372                         .expect("Failed to reset timer repeating interval");
373                 } else {
374                     cpu_watchdog.repeating_interval = None;
375                     cpu_watchdog
376                         .timer
377                         .clear()
378                         .expect("Failed to clear cpu watchdog timer");
379                 }
380             }
381             VMWDT_REG_LOAD_CNT => {
382                 self.ensure_started();
383                 let (process_id, thread_id) = {
384                     let mut wdts_locked = self.vm_wdts.lock();
385                     let cpu_watchdog = &mut wdts_locked[cpu_index];
386                     (cpu_watchdog.process_id, cpu_watchdog.thread_id)
387                 };
388                 let guest_time_ms = Vmwdt::get_guest_time_ms(process_id, thread_id)
389                     .expect("get_guest_time_ms failed");
390 
391                 let mut wdts_locked = self.vm_wdts.lock();
392                 let cpu_watchdog = &mut wdts_locked[cpu_index];
393                 let next_expiration_interval_ms =
394                     reg_val as u64 * 1000 / cpu_watchdog.timer_freq_hz;
395 
396                 cpu_watchdog.last_guest_time_ms = guest_time_ms;
397                 cpu_watchdog.stall_evt_ppi_triggered = false;
398                 cpu_watchdog.next_expiration_interval_ms = next_expiration_interval_ms as i64;
399 
400                 if cpu_watchdog.is_enabled {
401                     if let Err(_e) = cpu_watchdog
402                         .timer
403                         .reset_oneshot(Duration::from_millis(next_expiration_interval_ms))
404                     {
405                         error!("failed to reset one-shot vcpu time {}", cpu_index);
406                     }
407                     cpu_watchdog.repeating_interval = None;
408                 }
409             }
410             VMWDT_REG_CURRENT_CNT => {
411                 warn!("invalid write to read-only VMWDT_REG_CURRENT_CNT register");
412             }
413             VMWDT_REG_CLOCK_FREQ_HZ => {
414                 let mut wdts_locked = self.vm_wdts.lock();
415                 let cpu_watchdog = &mut wdts_locked[cpu_index];
416 
417                 debug!(
418                     "CPU:{:x} wrote VMWDT_REG_CLOCK_FREQ_HZ {:x}",
419                     cpu_index, reg_val
420                 );
421                 cpu_watchdog.timer_freq_hz = reg_val as u64;
422             }
423             _ => unreachable!(),
424         }
425     }
426 }
427 
428 impl Suspendable for Vmwdt {
sleep(&mut self) -> anyhow::Result<()>429     fn sleep(&mut self) -> anyhow::Result<()> {
430         if let Some(worker) = self.worker_thread.take() {
431             self.vm_ctrl_tube = Some(worker.stop());
432         }
433         Ok(())
434     }
435 
wake(&mut self) -> anyhow::Result<()>436     fn wake(&mut self) -> anyhow::Result<()> {
437         if self.activated {
438             // We do not pass a tube to notify that the worker thread has started on wake.
439             // At this stage, vm_control is blocked on resuming devices and cannot provide the vcpu
440             // PIDs/TIDs yet.
441             // At the same time, the Vcpus are still frozen, which means no MMIO will get
442             // processed, and write will not get triggered.
443             // The request to get PIDs/TIDs should get processed before any MMIO request occurs.
444             self.start(None)?;
445             let mut vm_wdts = self.vm_wdts.lock();
446             for vmwdt in vm_wdts.iter_mut() {
447                 if let Some(interval) = &vmwdt.repeating_interval {
448                     vmwdt
449                         .timer
450                         .reset_repeating(*interval)
451                         .context("failed to write repeating interval")?;
452                 } else if vmwdt.is_enabled {
453                     vmwdt
454                         .timer
455                         .reset_oneshot(Duration::from_millis(
456                             vmwdt.next_expiration_interval_ms as u64,
457                         ))
458                         .context("failed to write oneshot interval")?;
459                 }
460             }
461         }
462         Ok(())
463     }
464 
snapshot(&mut self) -> anyhow::Result<serde_json::Value>465     fn snapshot(&mut self) -> anyhow::Result<serde_json::Value> {
466         serde_json::to_value(&VmwdtSnapshot {
467             vm_wdts: self.vm_wdts.clone(),
468             activated: self.activated,
469         })
470         .context("failed to snapshot Vmwdt")
471     }
472 
restore(&mut self, data: serde_json::Value) -> anyhow::Result<()>473     fn restore(&mut self, data: serde_json::Value) -> anyhow::Result<()> {
474         let deser: VmwdtRestore =
475             serde_json::from_value(data).context("failed to deserialize Vmwdt")?;
476         let mut vm_wdts = self.vm_wdts.lock();
477         for (vmwdt_restore, vmwdt) in deser.vm_wdts.iter().zip(vm_wdts.iter_mut()) {
478             vmwdt.is_enabled = vmwdt_restore.is_enabled;
479             vmwdt.timer_freq_hz = vmwdt_restore.timer_freq_hz;
480             vmwdt.last_guest_time_ms = vmwdt_restore.last_guest_time_ms;
481             vmwdt.next_expiration_interval_ms = vmwdt_restore.next_expiration_interval_ms;
482             vmwdt.repeating_interval = vmwdt_restore.repeating_interval;
483         }
484         self.activated = deser.activated;
485         Ok(())
486     }
487 }
488 
489 #[cfg(test)]
490 mod tests {
491     use std::process;
492     use std::thread::sleep;
493 
494     #[cfg(any(target_os = "linux", target_os = "android"))]
495     use base::gettid;
496     use base::poll_assert;
497     use base::Tube;
498 
499     use super::*;
500 
501     const AARCH64_VMWDT_ADDR: u64 = 0x3000;
502     const TEST_VMWDT_CPU_NO: usize = 0x1;
503 
vmwdt_bus_address(offset: u64) -> BusAccessInfo504     fn vmwdt_bus_address(offset: u64) -> BusAccessInfo {
505         BusAccessInfo {
506             offset,
507             address: AARCH64_VMWDT_ADDR,
508             id: 0,
509         }
510     }
511 
512     #[test]
test_watchdog_internal_timer()513     fn test_watchdog_internal_timer() {
514         let (vm_evt_wrtube, _vm_evt_rdtube) = Tube::directional_pair().unwrap();
515         let (vm_ctrl_wrtube, vm_ctrl_rdtube) = Tube::pair().unwrap();
516         let irq = IrqEdgeEvent::new().unwrap();
517         #[cfg(any(target_os = "linux", target_os = "android"))]
518         {
519             vm_ctrl_wrtube
520                 .send(&VmResponse::VcpuPidTidResponse {
521                     pid_tid_map: BTreeMap::from([(0, (process::id(), gettid() as u32))]),
522                 })
523                 .unwrap();
524         }
525         let mut device = Vmwdt::new(TEST_VMWDT_CPU_NO, vm_evt_wrtube, irq, vm_ctrl_rdtube).unwrap();
526 
527         // Configure the watchdog device, 2Hz internal clock
528         device.write(
529             vmwdt_bus_address(VMWDT_REG_CLOCK_FREQ_HZ as u64),
530             &[10, 0, 0, 0],
531         );
532         device.write(vmwdt_bus_address(VMWDT_REG_LOAD_CNT as u64), &[1, 0, 0, 0]);
533         device.write(vmwdt_bus_address(VMWDT_REG_STATUS as u64), &[1, 0, 0, 0]);
534         let next_expiration_ms = {
535             let mut vmwdt_locked = device.vm_wdts.lock();
536             // In the test scenario the guest does not interpret the /proc/stat::guest_time, thus
537             // the function get_guest_time() returns 0
538             vmwdt_locked[0].last_guest_time_ms = 10;
539             vmwdt_locked[0].next_expiration_interval_ms
540         };
541 
542         // Poll multiple times as we don't get a signal when the watchdog thread has run.
543         poll_assert!(10, || {
544             sleep(Duration::from_millis(50));
545             let vmwdt_locked = device.vm_wdts.lock();
546             // Verify that our timer expired and the next_expiration_interval_ms changed
547             vmwdt_locked[0].next_expiration_interval_ms != next_expiration_ms
548         });
549     }
550 
551     #[test]
test_watchdog_expiration()552     fn test_watchdog_expiration() {
553         let (vm_evt_wrtube, vm_evt_rdtube) = Tube::directional_pair().unwrap();
554         let (vm_ctrl_wrtube, vm_ctrl_rdtube) = Tube::pair().unwrap();
555         let irq = IrqEdgeEvent::new().unwrap();
556         #[cfg(any(target_os = "linux", target_os = "android"))]
557         {
558             vm_ctrl_wrtube
559                 .send(&VmResponse::VcpuPidTidResponse {
560                     pid_tid_map: BTreeMap::from([(0, (process::id(), gettid() as u32))]),
561                 })
562                 .unwrap();
563         }
564         let mut device = Vmwdt::new(TEST_VMWDT_CPU_NO, vm_evt_wrtube, irq, vm_ctrl_rdtube).unwrap();
565 
566         // Configure the watchdog device, 2Hz internal clock
567         device.write(
568             vmwdt_bus_address(VMWDT_REG_CLOCK_FREQ_HZ as u64),
569             &[10, 0, 0, 0],
570         );
571         device.write(vmwdt_bus_address(VMWDT_REG_LOAD_CNT as u64), &[1, 0, 0, 0]);
572         device.write(vmwdt_bus_address(VMWDT_REG_STATUS as u64), &[1, 0, 0, 0]);
573         // In the test scenario the guest does not interpret the /proc/stat::guest_time, thus
574         // the function get_guest_time() returns 0
575         device.vm_wdts.lock()[0].last_guest_time_ms = -100;
576 
577         // Check that the interrupt has raised
578         poll_assert!(10, || {
579             sleep(Duration::from_millis(50));
580             let vmwdt_locked = device.vm_wdts.lock();
581             vmwdt_locked[0].stall_evt_ppi_triggered
582         });
583 
584         // Simulate that the time has passed since the last expiration
585         device.vm_wdts.lock()[0].last_guest_time_ms = -100;
586 
587         // Poll multiple times as we don't get a signal when the watchdog thread has run.
588         poll_assert!(10, || {
589             sleep(Duration::from_millis(50));
590             match vm_evt_rdtube.recv::<VmEventType>() {
591                 Ok(vm_event) => vm_event == VmEventType::WatchdogReset,
592                 Err(_e) => false,
593             }
594         });
595     }
596 }
597