xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueueUncache.scala (revision 94aa21c6009c2f39c5c5dae9c87260c78887efcc)
1/***************************************************************************************
2 * Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC)
3 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
4 * Copyright (c) 2020-2021 Peng Cheng Laboratory
5 *
6 * XiangShan is licensed under Mulan PSL v2.
7 * You can use this software according to the terms and conditions of the Mulan PSL v2.
8 * You may obtain a copy of Mulan PSL v2 at:
9 *          http://license.coscl.org.cn/MulanPSL2
10 *
11 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
12 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
13 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
14 *
15 * See the Mulan PSL v2 for more details.
16 ***************************************************************************************/
17package xiangshan.mem
18
19import chisel3._
20import chisel3.util._
21import org.chipsalliance.cde.config._
22import xiangshan._
23import xiangshan.backend.rob.{RobPtr, RobLsqIO}
24import xiangshan.ExceptionNO._
25import xiangshan.cache._
26import utils._
27import utility._
28import xiangshan.backend.Bundles
29import xiangshan.backend.Bundles.{DynInst, MemExuOutput}
30import xiangshan.backend.fu.FuConfig.LduCfg
31import xiangshan.backend.HasMemBlockParameters
32
33class UncacheEntry(entryIndex: Int)(implicit p: Parameters) extends XSModule
34  with HasCircularQueuePtrHelper
35  with HasLoadHelper
36{
37  val io = IO(new Bundle() {
38    /* control */
39    val redirect = Flipped(Valid(new Redirect))
40    // redirect flush
41    val flush = Output(Bool())
42    // mmio commit
43    val rob = Flipped(new RobLsqIO)
44    // mmio select
45    val mmioSelect = Output(Bool())
46
47    /* transaction */
48    // from ldu
49    val req = Flipped(Valid(new LqWriteBundle))
50    // to ldu: mmio, data
51    val mmioOut = DecoupledIO(new MemExuOutput)
52    val mmioRawData = Output(new LoadDataFromLQBundle)
53    // to ldu: nc with data
54    val ncOut = DecoupledIO(new LsPipelineBundle)
55    // <=> uncache
56    val uncache = new UncacheWordIO
57    // exception generated by outer bus
58    val exception = Valid(new LqWriteBundle)
59  })
60
61  val req_valid = RegInit(false.B)
62  val isNC = RegInit(false.B)
63  val req = Reg(new LqWriteBundle)
64
65  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
66  val uncacheState = RegInit(s_idle)
67  val uncacheData = Reg(io.uncache.resp.bits.data.cloneType)
68  val nderr = RegInit(false.B)
69
70  val writeback = Mux(req.nc, io.ncOut.fire, io.mmioOut.fire)
71
72  /**
73    * Flush
74    *
75    * 1. direct flush during idle
76    * 2. otherwise delayed flush until receiving uncache resp
77    */
78  val needFlushReg = RegInit(false.B)
79  val needFlush = req_valid && req.uop.robIdx.needFlush(io.redirect)
80  val flush = (needFlush && uncacheState===s_idle) || (io.uncache.resp.fire && needFlushReg)
81  when(flush){
82    needFlushReg := false.B
83  }.elsewhen(needFlush){
84    needFlushReg := true.B
85  }
86
87  /* enter req */
88  when (flush) {
89    req_valid := false.B
90  } .elsewhen (io.req.valid) {
91    req_valid := true.B
92    req := io.req.bits
93    nderr := false.B
94  } .elsewhen (writeback) {
95    req_valid := false.B
96  }
97  XSError(!flush && io.req.valid && req_valid, p"LoadQueueUncache: You can not write an valid entry: $entryIndex")
98
99  /**
100    * Memory mapped IO / NC operations
101    *
102    * States:
103    * (1) s_idle: wait for mmio reaching ROB's head / nc req valid from loadunit
104    * (2) s_req: wait to be sent to uncache channel until req selected and uncache ready
105    * (3) s_resp: wait for response from uncache channel
106    * (4) s_wait: wait for loadunit to receive writeback req
107    */
108  val pendingld = GatedValidRegNext(io.rob.pendingMMIOld)
109  val pendingPtr = GatedRegNext(io.rob.pendingPtr)
110  val canSendReq = req_valid && !needFlush && Mux(
111    req.nc, true.B,
112    pendingld && req.uop.robIdx === pendingPtr
113  )
114  switch (uncacheState) {
115    is (s_idle) {
116      when (canSendReq) {
117        uncacheState := s_req
118      }
119    }
120    is (s_req) {
121      when (io.uncache.req.fire) {
122        uncacheState := s_resp
123      }
124    }
125    is (s_resp) {
126      when (io.uncache.resp.fire) {
127        when (needFlushReg) {
128          uncacheState := s_idle
129        }.otherwise{
130          uncacheState := s_wait
131        }
132      }
133    }
134    is (s_wait) {
135      when (writeback) {
136        uncacheState := s_idle
137      }
138    }
139  }
140
141  /* control */
142  io.flush := flush
143  io.rob.mmio := DontCare
144  io.rob.uop := DontCare
145  io.mmioSelect := (uncacheState =/= s_idle) && req.mmio
146
147  /* uncahce req */
148  io.uncache.req.valid     := uncacheState === s_req
149  io.uncache.req.bits      := DontCare
150  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
151  io.uncache.req.bits.data := DontCare
152  io.uncache.req.bits.addr := req.paddr
153  io.uncache.req.bits.vaddr:= req.vaddr
154  io.uncache.req.bits.mask := Mux(req.paddr(3), req.mask(15, 8), req.mask(7, 0))
155  io.uncache.req.bits.id   := entryIndex.U
156  io.uncache.req.bits.instrtype := DontCare
157  io.uncache.req.bits.replayCarry := DontCare
158  io.uncache.req.bits.atomic := req.atomic
159  io.uncache.req.bits.nc := req.nc
160
161  io.uncache.resp.ready := true.B
162
163  /* uncahce resp */
164  when (io.uncache.resp.fire) {
165    uncacheData := io.uncache.resp.bits.data
166    nderr := io.uncache.resp.bits.nderr
167  }
168
169  /* uncahce writeback */
170  val selUop = req.uop
171  val func = selUop.fuOpType
172  val raddr = req.paddr
173  val rdataSel = LookupTree(raddr(2, 0), List(
174      "b000".U -> uncacheData(63,  0),
175      "b001".U -> uncacheData(63,  8),
176      "b010".U -> uncacheData(63, 16),
177      "b011".U -> uncacheData(63, 24),
178      "b100".U -> uncacheData(63, 32),
179      "b101".U -> uncacheData(63, 40),
180      "b110".U -> uncacheData(63, 48),
181      "b111".U -> uncacheData(63, 56)
182    ))
183  val rdataPartialLoad = rdataHelper(selUop, rdataSel)
184
185  io.mmioOut.valid := false.B
186  io.mmioOut.bits := DontCare
187  io.mmioRawData := DontCare
188  io.ncOut.valid := false.B
189  io.ncOut.bits := DontCare
190
191  when(req.nc){
192    io.ncOut.valid := (uncacheState === s_wait)
193    io.ncOut.bits := DontCare
194    io.ncOut.bits.uop := selUop
195    io.ncOut.bits.uop.lqIdx := req.uop.lqIdx
196    io.ncOut.bits.uop.exceptionVec(loadAccessFault) := nderr
197    io.ncOut.bits.data := rdataPartialLoad
198    io.ncOut.bits.paddr := req.paddr
199    io.ncOut.bits.vaddr := req.vaddr
200    io.ncOut.bits.nc := true.B
201    io.ncOut.bits.mask := Mux(req.paddr(3), req.mask(15, 8), req.mask(7, 0))
202    io.ncOut.bits.schedIndex := req.schedIndex
203    io.ncOut.bits.isvec := req.isvec
204    io.ncOut.bits.is128bit := req.is128bit
205    io.ncOut.bits.vecActive := req.vecActive
206  }.otherwise{
207    io.mmioOut.valid := (uncacheState === s_wait)
208    io.mmioOut.bits := DontCare
209    io.mmioOut.bits.uop := selUop
210    io.mmioOut.bits.uop.lqIdx := req.uop.lqIdx
211    io.mmioOut.bits.uop.exceptionVec(loadAccessFault) := nderr
212    io.mmioOut.bits.data := rdataPartialLoad
213    io.mmioOut.bits.debug.isMMIO := true.B
214    io.mmioOut.bits.debug.isNC := false.B
215    io.mmioOut.bits.debug.paddr := req.paddr
216    io.mmioOut.bits.debug.vaddr := req.vaddr
217    io.mmioRawData.lqData := uncacheData
218    io.mmioRawData.uop := req.uop
219    io.mmioRawData.addrOffset := req.paddr
220  }
221
222  io.exception.valid := writeback
223  io.exception.bits := req
224  io.exception.bits.uop.exceptionVec(loadAccessFault) := nderr
225
226  /* debug log */
227  XSDebug(io.uncache.req.fire,
228    "uncache req: pc %x addr %x data %x op %x mask %x\n",
229    req.uop.pc,
230    io.uncache.req.bits.addr,
231    io.uncache.req.bits.data,
232    io.uncache.req.bits.cmd,
233    io.uncache.req.bits.mask
234  )
235  XSInfo(io.ncOut.fire,
236    "int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n",
237    io.ncOut.bits.uop.robIdx.asUInt,
238    io.ncOut.bits.uop.lqIdx.asUInt,
239    io.ncOut.bits.uop.pc,
240    true.B
241  )
242  XSInfo(io.mmioOut.fire,
243    "int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n",
244    io.mmioOut.bits.uop.robIdx.asUInt,
245    io.mmioOut.bits.uop.lqIdx.asUInt,
246    io.mmioOut.bits.uop.pc,
247    true.B
248  )
249
250}
251
252class LoadQueueUncache(implicit p: Parameters) extends XSModule
253  with HasCircularQueuePtrHelper
254  with HasMemBlockParameters
255{
256  val io = IO(new Bundle() {
257    /* control */
258    val redirect = Flipped(Valid(new Redirect))
259    // mmio commit
260    val rob = Flipped(new RobLsqIO)
261
262    /* transaction */
263    // enqueue: from ldu s3
264    val req = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle)))
265    // writeback: mmio to ldu s0, s3
266    val mmioOut = Vec(LoadPipelineWidth, DecoupledIO(new MemExuOutput))
267    val mmioRawData = Vec(LoadPipelineWidth, Output(new LoadDataFromLQBundle))
268    // writeback: nc to ldu s0--s3
269    val ncOut = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle))
270    // <=>uncache
271    val uncache = new UncacheWordIO
272
273    /* except */
274    // rollback from frontend when buffer is full
275    val rollback = Output(Valid(new Redirect))
276    // exception generated by outer bus
277    val exception = Valid(new LqWriteBundle)
278  })
279
280  /******************************************************************
281   * Structure
282   ******************************************************************/
283  val entries = Seq.tabulate(LoadUncacheBufferSize)(i => Module(new UncacheEntry(i)))
284
285  val freeList = Module(new FreeList(
286    size = LoadUncacheBufferSize,
287    allocWidth = LoadPipelineWidth,
288    freeWidth = 4,
289    enablePreAlloc = true,
290    moduleName = "LoadQueueUncache freelist"
291  ))
292  freeList.io := DontCare
293
294  // set default IO
295  entries.foreach {
296    case (e) =>
297      e.io.req.valid := false.B
298      e.io.req.bits := DontCare
299      e.io.uncache.req.ready := false.B
300      e.io.uncache.resp.valid := false.B
301      e.io.uncache.resp.bits := DontCare
302      e.io.ncOut.ready := false.B
303      e.io.mmioOut.ready := false.B
304  }
305  io.uncache.req.valid := false.B
306  io.uncache.req.bits := DontCare
307  io.uncache.resp.ready := false.B
308  for (w <- 0 until LoadPipelineWidth) {
309    io.mmioOut(w).valid := false.B
310    io.mmioOut(w).bits := DontCare
311    io.mmioRawData(w) := DontCare
312    io.ncOut(w).valid := false.B
313    io.ncOut(w).bits := DontCare
314  }
315
316
317  /******************************************************************
318   * Enqueue
319   *
320   * s1: hold
321   * s2: confirm enqueue and write entry
322   *    valid: no redirect, no exception, no replay, is mmio/nc
323   *    ready: freelist can allocate
324   ******************************************************************/
325
326  val s1_req = VecInit(io.req.map(_.bits))
327  val s1_valid = VecInit(io.req.map(_.valid))
328  val s2_enqueue = Wire(Vec(LoadPipelineWidth, Bool()))
329  io.req.zipWithIndex.foreach{ case (r, i) =>
330    r.ready := !s2_enqueue(i) || freeList.io.canAllocate(i)
331  }
332
333  // s2: enqueue
334  val s2_req = (0 until LoadPipelineWidth).map(i => {RegEnable(s1_req(i), s1_valid(i))})
335  val s2_valid = (0 until LoadPipelineWidth).map(i => {
336    RegNext(s1_valid(i)) &&
337    !s2_req(i).uop.robIdx.needFlush(RegNext(io.redirect)) &&
338    !s2_req(i).uop.robIdx.needFlush(io.redirect)
339  })
340  val s2_has_exception = s2_req.map(x => ExceptionNO.selectByFu(x.uop.exceptionVec, LduCfg).asUInt.orR)
341  val s2_need_replay = s2_req.map(_.rep_info.need_rep)
342
343  for (w <- 0 until LoadPipelineWidth) {
344    s2_enqueue(w) := s2_valid(w) && !s2_has_exception(w) && !s2_need_replay(w) && (s2_req(w).mmio || s2_req(w).nc)
345  }
346
347  val s2_enqValidVec = Wire(Vec(LoadPipelineWidth, Bool()))
348  val s2_enqIndexVec = Wire(Vec(LoadPipelineWidth, UInt()))
349
350  for (w <- 0 until LoadPipelineWidth) {
351    freeList.io.allocateReq(w) := true.B
352  }
353
354  // freeList real-allocate
355  for (w <- 0 until LoadPipelineWidth) {
356    freeList.io.doAllocate(w) := s2_enqValidVec(w)
357  }
358
359  for (w <- 0 until LoadPipelineWidth) {
360    s2_enqValidVec(w) := s2_enqueue(w) && freeList.io.canAllocate(w)
361
362    val offset = PopCount(s2_enqueue.take(w))
363    s2_enqIndexVec(w) := freeList.io.allocateSlot(offset)
364  }
365
366
367  /******************************************************************
368   * Uncache Transaction
369   *
370   * 1. uncache req
371   * 2. uncache resp
372   * 3. writeback
373   ******************************************************************/
374  private val NC_WB_MOD = NCWBPorts.length
375
376  val uncacheReq = Wire(DecoupledIO(io.uncache.req.bits.cloneType))
377  val mmioSelect = entries.map(e => e.io.mmioSelect).reduce(_ || _)
378  val mmioReq = Wire(DecoupledIO(io.uncache.req.bits.cloneType))
379  // TODO lyq: It's best to choose in robIdx order / the order in which they enter
380  val ncReqArb = Module(new RRArbiterInit(io.uncache.req.bits.cloneType, LoadUncacheBufferSize))
381
382  val mmioOut = Wire(DecoupledIO(io.mmioOut(0).bits.cloneType))
383  val mmioRawData = Wire(io.mmioRawData(0).cloneType)
384  val ncOut = Wire(chiselTypeOf(io.ncOut))
385  val ncOutValidVec = VecInit(entries.map(e => e.io.ncOut.valid))
386  val ncOutValidVecRem = SubVec.getMaskRem(ncOutValidVec, NC_WB_MOD)
387
388  // init
389  uncacheReq.valid := false.B
390  uncacheReq.bits  := DontCare
391  mmioReq.valid := false.B
392  mmioReq.bits := DontCare
393  mmioOut.valid := false.B
394  mmioOut.bits := DontCare
395  mmioRawData := DontCare
396  for (i <- 0 until LoadUncacheBufferSize) {
397    ncReqArb.io.in(i).valid := false.B
398    ncReqArb.io.in(i).bits := DontCare
399  }
400  for (i <- 0 until LoadPipelineWidth) {
401    ncOut(i).valid := false.B
402    ncOut(i).bits := DontCare
403  }
404
405  entries.zipWithIndex.foreach {
406    case (e, i) =>
407      // enqueue
408      for (w <- 0 until LoadPipelineWidth) {
409        when (s2_enqValidVec(w) && (i.U === s2_enqIndexVec(w))) {
410          e.io.req.valid := true.B
411          e.io.req.bits := s2_req(w)
412        }
413      }
414
415      // control
416      e.io.redirect <> io.redirect
417      e.io.rob <> io.rob
418
419      // uncache req, writeback
420      when (e.io.mmioSelect) {
421        mmioReq.valid := e.io.uncache.req.valid
422        mmioReq.bits := e.io.uncache.req.bits
423        e.io.uncache.req.ready := mmioReq.ready
424
425        e.io.mmioOut.ready := mmioOut.ready
426        mmioOut.valid := e.io.mmioOut.valid
427        mmioOut.bits := e.io.mmioOut.bits
428        mmioRawData := e.io.mmioRawData
429
430      }.otherwise{
431        ncReqArb.io.in(i).valid := e.io.uncache.req.valid
432        ncReqArb.io.in(i).bits := e.io.uncache.req.bits
433        e.io.uncache.req.ready := ncReqArb.io.in(i).ready
434
435        (0 until NC_WB_MOD).map { w =>
436          val (idx, ncOutValid) = PriorityEncoderWithFlag(ncOutValidVecRem(w))
437          val port = NCWBPorts(w)
438          when((i.U === idx) && ncOutValid) {
439            ncOut(port).valid := ncOutValid
440            ncOut(port).bits := e.io.ncOut.bits
441            e.io.ncOut.ready := ncOut(port).ready
442          }
443        }
444
445      }
446
447      // uncache resp
448      when (i.U === io.uncache.resp.bits.id) {
449        e.io.uncache.resp <> io.uncache.resp
450      }
451
452  }
453
454  mmioReq.ready := false.B
455  ncReqArb.io.out.ready := false.B
456  when(mmioSelect){
457    uncacheReq <> mmioReq
458  }.otherwise{
459    uncacheReq <> ncReqArb.io.out
460  }
461
462  // uncache Request
463  AddPipelineReg(uncacheReq, io.uncache.req, false.B)
464
465  // uncache Writeback
466  AddPipelineReg(mmioOut, io.mmioOut(UncacheWBPort), false.B)
467  io.mmioRawData(UncacheWBPort) := RegEnable(mmioRawData, mmioOut.fire)
468
469  (0 until LoadPipelineWidth).foreach { i => AddPipelineReg(ncOut(i), io.ncOut(i), false.B) }
470
471  // uncache exception
472  io.exception.valid := Cat(entries.map(_.io.exception.valid)).orR
473  io.exception.bits := ParallelPriorityMux(entries.map(e =>
474    (e.io.exception.valid, e.io.exception.bits)
475  ))
476
477  // rob
478  for (i <- 0 until LoadPipelineWidth) {
479    io.rob.mmio(i) := RegNext(s1_valid(i) && s1_req(i).mmio)
480    io.rob.uop(i) := RegEnable(s1_req(i).uop, s1_valid(i))
481  }
482
483
484  /******************************************************************
485   * Deallocate
486   ******************************************************************/
487  // UncacheBuffer deallocate
488  val freeMaskVec = Wire(Vec(LoadUncacheBufferSize, Bool()))
489
490  // init
491  freeMaskVec.map(e => e := false.B)
492
493  // dealloc logic
494  entries.zipWithIndex.foreach {
495    case (e, i) =>
496      when ((e.io.mmioSelect && e.io.mmioOut.fire) || e.io.ncOut.fire || e.io.flush) {
497        freeMaskVec(i) := true.B
498      }
499  }
500
501  freeList.io.free := freeMaskVec.asUInt
502
503
504  /******************************************************************
505   * Uncache rollback detection
506   *
507   * When uncache loads enqueue, it searches uncache loads, They can not enqueue and need re-execution.
508   *
509   * Cycle 0: uncache enqueue.
510   * Cycle 1: Select oldest uncache loads.
511   * Cycle 2: Redirect Fire.
512   *   Choose the oldest load from LoadPipelineWidth oldest loads.
513   *   Prepare redirect request according to the detected rejection.
514   *   Fire redirect request (if valid)
515   *
516   *               Load_S3  .... Load_S3
517   * stage 0:        lq            lq
518   *                 |             | (can not enqueue)
519   * stage 1:        lq            lq
520   *                 |             |
521   *                 ---------------
522   *                        |
523   * stage 2:               lq
524   *                        |
525   *                     rollback req
526   *
527   ******************************************************************/
528  def selectOldestRedirect(xs: Seq[Valid[Redirect]]): Vec[Bool] = {
529    val compareVec = (0 until xs.length).map(i => (0 until i).map(j => isAfter(xs(j).bits.robIdx, xs(i).bits.robIdx)))
530    val resultOnehot = VecInit((0 until xs.length).map(i => Cat((0 until xs.length).map(j =>
531      (if (j < i) !xs(j).valid || compareVec(i)(j)
532      else if (j == i) xs(i).valid
533      else !xs(j).valid || !compareVec(j)(i))
534    )).andR))
535    resultOnehot
536  }
537  val reqNeedCheck = VecInit((0 until LoadPipelineWidth).map(w =>
538    s2_enqueue(w) && !s2_enqValidVec(w)
539  ))
540  val reqSelUops = VecInit(s2_req.map(_.uop))
541  val allRedirect = (0 until LoadPipelineWidth).map(i => {
542    val redirect = Wire(Valid(new Redirect))
543    redirect.valid := reqNeedCheck(i)
544    redirect.bits             := DontCare
545    redirect.bits.isRVC       := reqSelUops(i).preDecodeInfo.isRVC
546    redirect.bits.robIdx      := reqSelUops(i).robIdx
547    redirect.bits.ftqIdx      := reqSelUops(i).ftqPtr
548    redirect.bits.ftqOffset   := reqSelUops(i).ftqOffset
549    redirect.bits.level       := RedirectLevel.flush
550    redirect.bits.cfiUpdate.target := reqSelUops(i).pc // TODO: check if need pc
551    redirect.bits.debug_runahead_checkpoint_id := reqSelUops(i).debugInfo.runahead_checkpoint_id
552    redirect
553  })
554  val oldestOneHot = selectOldestRedirect(allRedirect)
555  val oldestRedirect = Mux1H(oldestOneHot, allRedirect)
556  val lastCycleRedirect = Wire(Valid(new Redirect))
557  lastCycleRedirect.valid := RegNext(io.redirect.valid)
558  lastCycleRedirect.bits := RegEnable(io.redirect.bits, io.redirect.valid)
559  val lastLastCycleRedirect = Wire(Valid(new Redirect))
560  lastLastCycleRedirect.valid := RegNext(lastCycleRedirect.valid)
561  lastLastCycleRedirect.bits := RegEnable(lastCycleRedirect.bits, lastCycleRedirect.valid)
562  io.rollback.valid := GatedValidRegNext(oldestRedirect.valid &&
563                      !oldestRedirect.bits.robIdx.needFlush(io.redirect) &&
564                      !oldestRedirect.bits.robIdx.needFlush(lastCycleRedirect) &&
565                      !oldestRedirect.bits.robIdx.needFlush(lastLastCycleRedirect))
566  io.rollback.bits := RegEnable(oldestRedirect.bits, oldestRedirect.valid)
567
568
569  /******************************************************************
570   * Perf Counter
571   ******************************************************************/
572  val validCount = freeList.io.validCount
573  val allowEnqueue = !freeList.io.empty
574  QueuePerf(LoadUncacheBufferSize, validCount, !allowEnqueue)
575
576  XSPerfAccumulate("mmio_uncache_req", io.uncache.req.fire && !io.uncache.req.bits.nc)
577  XSPerfAccumulate("mmio_writeback_success", io.mmioOut(0).fire)
578  XSPerfAccumulate("mmio_writeback_blocked", io.mmioOut(0).valid && !io.mmioOut(0).ready)
579  XSPerfAccumulate("nc_uncache_req", io.uncache.req.fire && io.uncache.req.bits.nc)
580  XSPerfAccumulate("nc_writeback_success", io.ncOut(0).fire)
581  XSPerfAccumulate("nc_writeback_blocked", io.ncOut(0).valid && !io.ncOut(0).ready)
582  XSPerfAccumulate("uncache_full_rollback", io.rollback.valid)
583
584  val perfEvents: Seq[(String, UInt)] = Seq(
585    ("mmio_uncache_req", io.uncache.req.fire && !io.uncache.req.bits.nc),
586    ("mmio_writeback_success", io.mmioOut(0).fire),
587    ("mmio_writeback_blocked", io.mmioOut(0).valid && !io.mmioOut(0).ready),
588    ("nc_uncache_req", io.uncache.req.fire && io.uncache.req.bits.nc),
589    ("nc_writeback_success", io.ncOut(0).fire),
590    ("nc_writeback_blocked", io.ncOut(0).valid && !io.ncOut(0).ready),
591    ("uncache_full_rollback", io.rollback.valid)
592  )
593  // end
594}
595