xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueueUncache.scala (revision e9e6cd0930ebf5aa3b23993da791f7ba1bf89998)
1/***************************************************************************************
2 * Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC)
3 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
4 * Copyright (c) 2020-2021 Peng Cheng Laboratory
5 *
6 * XiangShan is licensed under Mulan PSL v2.
7 * You can use this software according to the terms and conditions of the Mulan PSL v2.
8 * You may obtain a copy of Mulan PSL v2 at:
9 *          http://license.coscl.org.cn/MulanPSL2
10 *
11 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
12 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
13 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
14 *
15 * See the Mulan PSL v2 for more details.
16 ***************************************************************************************/
17package xiangshan.mem
18
19import chisel3._
20import chisel3.util._
21import org.chipsalliance.cde.config._
22import xiangshan._
23import xiangshan.backend.rob.{RobPtr, RobLsqIO}
24import xiangshan.ExceptionNO._
25import xiangshan.cache._
26import utils._
27import utility._
28import xiangshan.backend.Bundles
29import xiangshan.backend.Bundles.{DynInst, MemExuOutput}
30import xiangshan.backend.fu.FuConfig.LduCfg
31import _root_.xiangshan.backend.HasMemBlockParameters
32
33class UncacheEntry(entryIndex: Int)(implicit p: Parameters) extends XSModule
34  with HasCircularQueuePtrHelper
35  with HasLoadHelper
36{
37  val io = IO(new Bundle() {
38    /* control */
39    val redirect = Flipped(Valid(new Redirect))
40    // redirect flush
41    val flush = Output(Bool())
42    // mmio commit
43    val rob = Flipped(new RobLsqIO)
44    // mmio select
45    val mmioSelect = Output(Bool())
46
47    /* transaction */
48    // from ldu
49    val req = Flipped(Valid(new LqWriteBundle))
50    // to ldu: mmio, data
51    val mmioOut = DecoupledIO(new MemExuOutput)
52    val mmioRawData = Output(new LoadDataFromLQBundle)
53    // to ldu: nc with data
54    val ncOut = DecoupledIO(new LsPipelineBundle)
55    // <=> uncache
56    val uncache = new UncacheWordIO
57    // exception generated by outer bus
58    val exception = Valid(new LqWriteBundle)
59  })
60
61  val req_valid = RegInit(false.B)
62  val isNC = RegInit(false.B)
63  val req = Reg(new LqWriteBundle)
64
65  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
66  val uncacheState = RegInit(s_idle)
67  val uncacheData = Reg(io.uncache.resp.bits.data.cloneType)
68  val nderr = RegInit(false.B)
69
70  val writeback = Mux(req.nc, io.ncOut.fire, io.mmioOut.fire)
71
72  /**
73    * Flush
74    *
75    * 1. direct flush during idle
76    * 2. otherwise delayed flush until receiving uncache resp
77    */
78  val needFlushReg = RegInit(false.B)
79  val needFlush = req_valid && req.uop.robIdx.needFlush(io.redirect)
80  val flush = (needFlush && uncacheState===s_idle) || (io.uncache.resp.fire && needFlushReg)
81  when(flush){
82    needFlushReg := false.B
83  }.elsewhen(needFlush){
84    needFlushReg := true.B
85  }
86
87  /* enter req */
88  when (flush) {
89    req_valid := false.B
90  } .elsewhen (io.req.valid) {
91    XSError(req_valid, p"LoadQueueUncache: You can not write an valid entry: $entryIndex")
92    req_valid := true.B
93    req := io.req.bits
94    nderr := false.B
95  } .elsewhen (writeback) {
96    req_valid := false.B
97  }
98
99  /**
100    * Memory mapped IO / NC operations
101    *
102    * States:
103    * (1) s_idle: wait for mmio reaching ROB's head / nc req valid from loadunit
104    * (2) s_req: wait to be sent to uncache channel until req selected and uncache ready
105    * (3) s_resp: wait for response from uncache channel
106    * (4) s_wait: wait for loadunit to receive writeback req
107    */
108  val pendingld = GatedValidRegNext(io.rob.pendingMMIOld)
109  val pendingPtr = GatedRegNext(io.rob.pendingPtr)
110  val canSendReq = req_valid && !needFlush && Mux(
111    req.nc, true.B,
112    pendingld && req.uop.robIdx === pendingPtr
113  )
114  switch (uncacheState) {
115    is (s_idle) {
116      when (canSendReq) {
117        uncacheState := s_req
118      }
119    }
120    is (s_req) {
121      when (io.uncache.req.fire) {
122        uncacheState := s_resp
123      }
124    }
125    is (s_resp) {
126      when (io.uncache.resp.fire) {
127        when (needFlushReg) {
128          uncacheState := s_idle
129        }.otherwise{
130          uncacheState := s_wait
131        }
132      }
133    }
134    is (s_wait) {
135      when (writeback) {
136        uncacheState := s_idle
137      }
138    }
139  }
140
141  /* control */
142  io.flush := flush
143  io.rob.mmio := DontCare
144  io.rob.uop := DontCare
145  io.mmioSelect := (uncacheState =/= s_idle) && req.mmio
146
147  /* uncahce req */
148  io.uncache.req.valid     := uncacheState === s_req
149  io.uncache.req.bits      := DontCare
150  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
151  io.uncache.req.bits.data := DontCare
152  io.uncache.req.bits.addr := req.paddr
153  io.uncache.req.bits.vaddr:= req.vaddr
154  io.uncache.req.bits.mask := Mux(req.paddr(3), req.mask(15, 8), req.mask(7, 0))
155  io.uncache.req.bits.id   := entryIndex.U
156  io.uncache.req.bits.instrtype := DontCare
157  io.uncache.req.bits.replayCarry := DontCare
158  io.uncache.req.bits.atomic := req.atomic
159  io.uncache.req.bits.nc := req.nc
160
161  io.uncache.resp.ready := true.B
162
163  /* uncahce resp */
164  when (io.uncache.resp.fire) {
165    uncacheData := io.uncache.resp.bits.data
166    nderr := io.uncache.resp.bits.nderr
167  }
168
169  /* uncahce writeback */
170  val selUop = req.uop
171  val func = selUop.fuOpType
172  val raddr = req.paddr
173  val rdataSel = LookupTree(raddr(2, 0), List(
174      "b000".U -> uncacheData(63,  0),
175      "b001".U -> uncacheData(63,  8),
176      "b010".U -> uncacheData(63, 16),
177      "b011".U -> uncacheData(63, 24),
178      "b100".U -> uncacheData(63, 32),
179      "b101".U -> uncacheData(63, 40),
180      "b110".U -> uncacheData(63, 48),
181      "b111".U -> uncacheData(63, 56)
182    ))
183  val rdataPartialLoad = rdataHelper(selUop, rdataSel)
184
185  io.mmioOut.valid := false.B
186  io.mmioOut.bits := DontCare
187  io.mmioRawData := DontCare
188  io.ncOut.valid := false.B
189  io.ncOut.bits := DontCare
190
191  when(req.nc){
192    io.ncOut.valid := (uncacheState === s_wait)
193    io.ncOut.bits := DontCare
194    io.ncOut.bits.uop := selUop
195    io.ncOut.bits.uop.lqIdx := req.uop.lqIdx
196    io.ncOut.bits.uop.exceptionVec(loadAccessFault) := nderr
197    io.ncOut.bits.data := rdataPartialLoad
198    io.ncOut.bits.paddr := req.paddr
199    io.ncOut.bits.vaddr := req.vaddr
200    io.ncOut.bits.nc := true.B
201    io.ncOut.bits.mask := Mux(req.paddr(3), req.mask(15, 8), req.mask(7, 0))
202    io.ncOut.bits.schedIndex := req.schedIndex
203    io.ncOut.bits.isvec := req.isvec
204    io.ncOut.bits.is128bit := req.is128bit
205    io.ncOut.bits.vecActive := req.vecActive
206  }.otherwise{
207    io.mmioOut.valid := (uncacheState === s_wait)
208    io.mmioOut.bits := DontCare
209    io.mmioOut.bits.uop := selUop
210    io.mmioOut.bits.uop.lqIdx := req.uop.lqIdx
211    io.mmioOut.bits.uop.exceptionVec(loadAccessFault) := nderr
212    io.mmioOut.bits.data := rdataPartialLoad
213    io.mmioOut.bits.debug.isMMIO := true.B
214    io.mmioOut.bits.debug.isNC := false.B
215    io.mmioOut.bits.debug.paddr := req.paddr
216    io.mmioOut.bits.debug.vaddr := req.vaddr
217    io.mmioRawData.lqData := uncacheData
218    io.mmioRawData.uop := req.uop
219    io.mmioRawData.addrOffset := req.paddr
220  }
221
222  io.exception.valid := writeback
223  io.exception.bits := req
224  io.exception.bits.uop.exceptionVec(loadAccessFault) := nderr
225
226  /* debug log */
227  when (io.uncache.req.fire) {
228    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
229      req.uop.pc,
230      io.uncache.req.bits.addr,
231      io.uncache.req.bits.data,
232      io.uncache.req.bits.cmd,
233      io.uncache.req.bits.mask
234    )
235  }
236  when(io.ncOut.fire) {
237    XSInfo("int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n",
238      io.ncOut.bits.uop.robIdx.asUInt,
239      io.ncOut.bits.uop.lqIdx.asUInt,
240      io.ncOut.bits.uop.pc,
241      true.B
242    )
243  }
244  when(io.mmioOut.fire) {
245    XSInfo("int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n",
246      io.mmioOut.bits.uop.robIdx.asUInt,
247      io.mmioOut.bits.uop.lqIdx.asUInt,
248      io.mmioOut.bits.uop.pc,
249      true.B
250    )
251  }
252
253}
254
255class LoadQueueUncache(implicit p: Parameters) extends XSModule
256  with HasCircularQueuePtrHelper
257  with HasMemBlockParameters
258{
259  val io = IO(new Bundle() {
260    /* control */
261    val redirect = Flipped(Valid(new Redirect))
262    // mmio commit
263    val rob = Flipped(new RobLsqIO)
264
265    /* transaction */
266    // enqueue: from ldu s3
267    val req = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle)))
268    // writeback: mmio to ldu s0, s3
269    val mmioOut = Vec(LoadPipelineWidth, DecoupledIO(new MemExuOutput))
270    val mmioRawData = Vec(LoadPipelineWidth, Output(new LoadDataFromLQBundle))
271    // writeback: nc to ldu s0--s3
272    val ncOut = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle))
273    // <=>uncache
274    val uncache = new UncacheWordIO
275
276    /* except */
277    // rollback from frontend when buffer is full
278    val rollback = Output(Valid(new Redirect))
279    // exception generated by outer bus
280    val exception = Valid(new LqWriteBundle)
281  })
282
283  /******************************************************************
284   * Structure
285   ******************************************************************/
286  val entries = Seq.tabulate(LoadUncacheBufferSize)(i => Module(new UncacheEntry(i)))
287
288  val freeList = Module(new FreeList(
289    size = LoadUncacheBufferSize,
290    allocWidth = LoadPipelineWidth,
291    freeWidth = 4,
292    enablePreAlloc = true,
293    moduleName = "LoadQueueUncache freelist"
294  ))
295  freeList.io := DontCare
296
297  // set default IO
298  entries.foreach {
299    case (e) =>
300      e.io.req.valid := false.B
301      e.io.req.bits := DontCare
302      e.io.uncache.req.ready := false.B
303      e.io.uncache.resp.valid := false.B
304      e.io.uncache.resp.bits := DontCare
305      e.io.ncOut.ready := false.B
306      e.io.mmioOut.ready := false.B
307  }
308  io.uncache.req.valid := false.B
309  io.uncache.req.bits := DontCare
310  io.uncache.resp.ready := false.B
311  for (w <- 0 until LoadPipelineWidth) {
312    io.mmioOut(w).valid := false.B
313    io.mmioOut(w).bits := DontCare
314    io.mmioRawData(w) := DontCare
315    io.ncOut(w).valid := false.B
316    io.ncOut(w).bits := DontCare
317  }
318
319
320  /******************************************************************
321   * Enqueue
322   *
323   * s1: hold
324   * s2: confirm enqueue and write entry
325   *    valid: no redirect, no exception, no replay, is mmio/nc
326   *    ready: freelist can allocate
327   ******************************************************************/
328
329  val s1_req = VecInit(io.req.map(_.bits))
330  val s1_valid = VecInit(io.req.map(_.valid))
331  val s2_enqueue = Wire(Vec(LoadPipelineWidth, Bool()))
332  io.req.zipWithIndex.foreach{ case (r, i) =>
333    r.ready := !s2_enqueue(i) || freeList.io.canAllocate(i)
334  }
335
336  // s2: enqueue
337  val s2_req = (0 until LoadPipelineWidth).map(i => {RegEnable(s1_req(i), s1_valid(i))})
338  val s2_valid = (0 until LoadPipelineWidth).map(i => {
339    RegNext(s1_valid(i)) &&
340    !s2_req(i).uop.robIdx.needFlush(RegNext(io.redirect)) &&
341    !s2_req(i).uop.robIdx.needFlush(io.redirect)
342  })
343  val s2_has_exception = s2_req.map(x => ExceptionNO.selectByFu(x.uop.exceptionVec, LduCfg).asUInt.orR)
344  val s2_need_replay = s2_req.map(_.rep_info.need_rep)
345
346  for (w <- 0 until LoadPipelineWidth) {
347    s2_enqueue(w) := s2_valid(w) && !s2_has_exception(w) && !s2_need_replay(w) && (s2_req(w).mmio || s2_req(w).nc)
348  }
349
350  val s2_enqValidVec = Wire(Vec(LoadPipelineWidth, Bool()))
351  val s2_enqIndexVec = Wire(Vec(LoadPipelineWidth, UInt()))
352
353  for (w <- 0 until LoadPipelineWidth) {
354    freeList.io.allocateReq(w) := true.B
355  }
356
357  // freeList real-allocate
358  for (w <- 0 until LoadPipelineWidth) {
359    freeList.io.doAllocate(w) := s2_enqValidVec(w)
360  }
361
362  for (w <- 0 until LoadPipelineWidth) {
363    s2_enqValidVec(w) := s2_enqueue(w) && freeList.io.canAllocate(w)
364
365    val offset = PopCount(s2_enqueue.take(w))
366    s2_enqIndexVec(w) := freeList.io.allocateSlot(offset)
367  }
368
369
370  /******************************************************************
371   * Uncache Transaction
372   *
373   * 1. uncache req
374   * 2. uncache resp
375   * 3. writeback
376   ******************************************************************/
377  private val NC_WB_MOD = NCWBPorts.length
378
379  val uncacheReq = Wire(DecoupledIO(io.uncache.req.bits.cloneType))
380  val mmioSelect = entries.map(e => e.io.mmioSelect).reduce(_ || _)
381  val mmioReq = Wire(DecoupledIO(io.uncache.req.bits.cloneType))
382  // TODO lyq: It's best to choose in robIdx order / the order in which they enter
383  val ncReqArb = Module(new RRArbiterInit(io.uncache.req.bits.cloneType, LoadUncacheBufferSize))
384
385  val mmioOut = Wire(DecoupledIO(io.mmioOut(0).bits.cloneType))
386  val mmioRawData = Wire(io.mmioRawData(0).cloneType)
387  val ncOut = Wire(chiselTypeOf(io.ncOut))
388  val ncOutValidVec = VecInit(entries.map(e => e.io.ncOut.valid))
389  val ncOutValidVecRem = SubVec.getMaskRem(ncOutValidVec, NC_WB_MOD)
390
391  // init
392  uncacheReq.valid := false.B
393  uncacheReq.bits  := DontCare
394  mmioReq.valid := false.B
395  mmioReq.bits := DontCare
396  mmioOut.valid := false.B
397  mmioOut.bits := DontCare
398  mmioRawData := DontCare
399  for (i <- 0 until LoadUncacheBufferSize) {
400    ncReqArb.io.in(i).valid := false.B
401    ncReqArb.io.in(i).bits := DontCare
402  }
403  for (i <- 0 until LoadPipelineWidth) {
404    ncOut(i).valid := false.B
405    ncOut(i).bits := DontCare
406  }
407
408  entries.zipWithIndex.foreach {
409    case (e, i) =>
410      // enqueue
411      for (w <- 0 until LoadPipelineWidth) {
412        when (s2_enqValidVec(w) && (i.U === s2_enqIndexVec(w))) {
413          e.io.req.valid := true.B
414          e.io.req.bits := s2_req(w)
415        }
416      }
417
418      // control
419      e.io.redirect <> io.redirect
420      e.io.rob <> io.rob
421
422      // uncache req, writeback
423      when (e.io.mmioSelect) {
424        mmioReq.valid := e.io.uncache.req.valid
425        mmioReq.bits := e.io.uncache.req.bits
426        e.io.uncache.req.ready := mmioReq.ready
427
428        e.io.mmioOut.ready := mmioOut.ready
429        mmioOut.valid := e.io.mmioOut.valid
430        mmioOut.bits := e.io.mmioOut.bits
431        mmioRawData := e.io.mmioRawData
432
433      }.otherwise{
434        ncReqArb.io.in(i).valid := e.io.uncache.req.valid
435        ncReqArb.io.in(i).bits := e.io.uncache.req.bits
436        e.io.uncache.req.ready := ncReqArb.io.in(i).ready
437
438        (0 until NC_WB_MOD).map { w =>
439          val (idx, ncOutValid) = PriorityEncoderWithFlag(ncOutValidVecRem(w))
440          val port = NCWBPorts(w)
441          when((i.U === idx) && ncOutValid) {
442            ncOut(port).valid := ncOutValid
443            ncOut(port).bits := e.io.ncOut.bits
444            e.io.ncOut.ready := ncOut(port).ready
445          }
446        }
447
448      }
449
450      // uncache resp
451      when (i.U === io.uncache.resp.bits.id) {
452        e.io.uncache.resp <> io.uncache.resp
453      }
454
455  }
456
457  mmioReq.ready := false.B
458  ncReqArb.io.out.ready := false.B
459  when(mmioSelect){
460    uncacheReq <> mmioReq
461  }.otherwise{
462    uncacheReq <> ncReqArb.io.out
463  }
464
465  // uncache Request
466  AddPipelineReg(uncacheReq, io.uncache.req, false.B)
467
468  // uncache Writeback
469  AddPipelineReg(mmioOut, io.mmioOut(UncacheWBPort), false.B)
470  io.mmioRawData(UncacheWBPort) := RegEnable(mmioRawData, mmioOut.fire)
471
472  (0 until LoadPipelineWidth).foreach { i => AddPipelineReg(ncOut(i), io.ncOut(i), false.B) }
473
474  // uncache exception
475  io.exception.valid := Cat(entries.map(_.io.exception.valid)).orR
476  io.exception.bits := ParallelPriorityMux(entries.map(e =>
477    (e.io.exception.valid, e.io.exception.bits)
478  ))
479
480  // rob
481  for (i <- 0 until LoadPipelineWidth) {
482    io.rob.mmio(i) := RegNext(s1_valid(i) && s1_req(i).mmio)
483    io.rob.uop(i) := RegEnable(s1_req(i).uop, s1_valid(i))
484  }
485
486
487  /******************************************************************
488   * Deallocate
489   ******************************************************************/
490  // UncacheBuffer deallocate
491  val freeMaskVec = Wire(Vec(LoadUncacheBufferSize, Bool()))
492
493  // init
494  freeMaskVec.map(e => e := false.B)
495
496  // dealloc logic
497  entries.zipWithIndex.foreach {
498    case (e, i) =>
499      when ((e.io.mmioSelect && e.io.mmioOut.fire) || e.io.ncOut.fire || e.io.flush) {
500        freeMaskVec(i) := true.B
501      }
502  }
503
504  freeList.io.free := freeMaskVec.asUInt
505
506
507  /******************************************************************
508   * Uncache rollback detection
509   *
510   * When uncache loads enqueue, it searches uncache loads, They can not enqueue and need re-execution.
511   *
512   * Cycle 0: uncache enqueue.
513   * Cycle 1: Select oldest uncache loads.
514   * Cycle 2: Redirect Fire.
515   *   Choose the oldest load from LoadPipelineWidth oldest loads.
516   *   Prepare redirect request according to the detected rejection.
517   *   Fire redirect request (if valid)
518   *
519   *               Load_S3  .... Load_S3
520   * stage 0:        lq            lq
521   *                 |             | (can not enqueue)
522   * stage 1:        lq            lq
523   *                 |             |
524   *                 ---------------
525   *                        |
526   * stage 2:               lq
527   *                        |
528   *                     rollback req
529   *
530   ******************************************************************/
531  def selectOldestRedirect(xs: Seq[Valid[Redirect]]): Vec[Bool] = {
532    val compareVec = (0 until xs.length).map(i => (0 until i).map(j => isAfter(xs(j).bits.robIdx, xs(i).bits.robIdx)))
533    val resultOnehot = VecInit((0 until xs.length).map(i => Cat((0 until xs.length).map(j =>
534      (if (j < i) !xs(j).valid || compareVec(i)(j)
535      else if (j == i) xs(i).valid
536      else !xs(j).valid || !compareVec(j)(i))
537    )).andR))
538    resultOnehot
539  }
540  val reqNeedCheck = VecInit((0 until LoadPipelineWidth).map(w =>
541    s2_enqueue(w) && !s2_enqValidVec(w)
542  ))
543  val reqSelUops = VecInit(s2_req.map(_.uop))
544  val allRedirect = (0 until LoadPipelineWidth).map(i => {
545    val redirect = Wire(Valid(new Redirect))
546    redirect.valid := reqNeedCheck(i)
547    redirect.bits             := DontCare
548    redirect.bits.isRVC       := reqSelUops(i).preDecodeInfo.isRVC
549    redirect.bits.robIdx      := reqSelUops(i).robIdx
550    redirect.bits.ftqIdx      := reqSelUops(i).ftqPtr
551    redirect.bits.ftqOffset   := reqSelUops(i).ftqOffset
552    redirect.bits.level       := RedirectLevel.flush
553    redirect.bits.cfiUpdate.target := reqSelUops(i).pc // TODO: check if need pc
554    redirect.bits.debug_runahead_checkpoint_id := reqSelUops(i).debugInfo.runahead_checkpoint_id
555    redirect
556  })
557  val oldestOneHot = selectOldestRedirect(allRedirect)
558  val oldestRedirect = Mux1H(oldestOneHot, allRedirect)
559  val lastCycleRedirect = Wire(Valid(new Redirect))
560  lastCycleRedirect.valid := RegNext(io.redirect.valid)
561  lastCycleRedirect.bits := RegEnable(io.redirect.bits, io.redirect.valid)
562  val lastLastCycleRedirect = Wire(Valid(new Redirect))
563  lastLastCycleRedirect.valid := RegNext(lastCycleRedirect.valid)
564  lastLastCycleRedirect.bits := RegEnable(lastCycleRedirect.bits, lastCycleRedirect.valid)
565  io.rollback.valid := GatedValidRegNext(oldestRedirect.valid &&
566                      !oldestRedirect.bits.robIdx.needFlush(io.redirect) &&
567                      !oldestRedirect.bits.robIdx.needFlush(lastCycleRedirect) &&
568                      !oldestRedirect.bits.robIdx.needFlush(lastLastCycleRedirect))
569  io.rollback.bits := RegEnable(oldestRedirect.bits, oldestRedirect.valid)
570
571
572  /******************************************************************
573   * Perf Counter
574   ******************************************************************/
575  val validCount = freeList.io.validCount
576  val allowEnqueue = !freeList.io.empty
577  QueuePerf(LoadUncacheBufferSize, validCount, !allowEnqueue)
578
579  XSPerfAccumulate("mmio_uncache_req", io.uncache.req.fire && !io.uncache.req.bits.nc)
580  XSPerfAccumulate("mmio_writeback_success", io.mmioOut(0).fire)
581  XSPerfAccumulate("mmio_writeback_blocked", io.mmioOut(0).valid && !io.mmioOut(0).ready)
582  XSPerfAccumulate("nc_uncache_req", io.uncache.req.fire && io.uncache.req.bits.nc)
583  XSPerfAccumulate("nc_writeback_success", io.ncOut(0).fire)
584  XSPerfAccumulate("nc_writeback_blocked", io.ncOut(0).valid && !io.ncOut(0).ready)
585  XSPerfAccumulate("uncache_full_rollback", io.rollback.valid)
586
587  val perfEvents: Seq[(String, UInt)] = Seq(
588    ("mmio_uncache_req", io.uncache.req.fire && !io.uncache.req.bits.nc),
589    ("mmio_writeback_success", io.mmioOut(0).fire),
590    ("mmio_writeback_blocked", io.mmioOut(0).valid && !io.mmioOut(0).ready),
591    ("nc_uncache_req", io.uncache.req.fire && io.uncache.req.bits.nc),
592    ("nc_writeback_success", io.ncOut(0).fire),
593    ("nc_writeback_blocked", io.ncOut(0).valid && !io.ncOut(0).ready),
594    ("uncache_full_rollback", io.rollback.valid)
595  )
596  // end
597}
598