1/*************************************************************************************** 2 * Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC) 3 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 4 * Copyright (c) 2020-2021 Peng Cheng Laboratory 5 * 6 * XiangShan is licensed under Mulan PSL v2. 7 * You can use this software according to the terms and conditions of the Mulan PSL v2. 8 * You may obtain a copy of Mulan PSL v2 at: 9 * http://license.coscl.org.cn/MulanPSL2 10 * 11 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 12 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 13 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 14 * 15 * See the Mulan PSL v2 for more details. 16 ***************************************************************************************/ 17package xiangshan.mem 18 19import chisel3._ 20import chisel3.util._ 21import org.chipsalliance.cde.config._ 22import xiangshan._ 23import xiangshan.backend.rob.{RobPtr, RobLsqIO} 24import xiangshan.ExceptionNO._ 25import xiangshan.cache._ 26import utils._ 27import utility._ 28import xiangshan.backend.Bundles 29import xiangshan.backend.Bundles.{DynInst, MemExuOutput} 30import xiangshan.backend.fu.FuConfig.LduCfg 31import xiangshan.backend.HasMemBlockParameters 32 33class UncacheEntry(entryIndex: Int)(implicit p: Parameters) extends XSModule 34 with HasCircularQueuePtrHelper 35 with HasLoadHelper 36{ 37 val io = IO(new Bundle() { 38 /* control */ 39 val redirect = Flipped(Valid(new Redirect)) 40 // redirect flush 41 val flush = Output(Bool()) 42 // mmio commit 43 val rob = Flipped(new RobLsqIO) 44 // mmio select 45 val mmioSelect = Output(Bool()) 46 47 /* transaction */ 48 // from ldu 49 val req = Flipped(Valid(new LqWriteBundle)) 50 // to ldu: mmio, data 51 val mmioOut = DecoupledIO(new MemExuOutput) 52 val mmioRawData = Output(new LoadDataFromLQBundle) 53 // to ldu: nc with data 54 val ncOut = DecoupledIO(new LsPipelineBundle) 55 // <=> uncache 56 val uncache = new UncacheWordIO 57 // exception generated by outer bus 58 val exception = Valid(new LqWriteBundle) 59 }) 60 61 val req_valid = RegInit(false.B) 62 val isNC = RegInit(false.B) 63 val req = Reg(new LqWriteBundle) 64 65 val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4) 66 val uncacheState = RegInit(s_idle) 67 val uncacheData = Reg(io.uncache.resp.bits.data.cloneType) 68 val nderr = RegInit(false.B) 69 70 val writeback = Mux(req.nc, io.ncOut.fire, io.mmioOut.fire) 71 72 /** 73 * Flush 74 * 75 * 1. direct flush during idle 76 * 2. otherwise delayed flush until receiving uncache resp 77 */ 78 val needFlushReg = RegInit(false.B) 79 val needFlush = req_valid && req.uop.robIdx.needFlush(io.redirect) 80 val flush = (needFlush && uncacheState===s_idle) || (io.uncache.resp.fire && needFlushReg) 81 when(flush){ 82 needFlushReg := false.B 83 }.elsewhen(needFlush){ 84 needFlushReg := true.B 85 } 86 87 /* enter req */ 88 when (flush) { 89 req_valid := false.B 90 } .elsewhen (io.req.valid) { 91 req_valid := true.B 92 req := io.req.bits 93 nderr := false.B 94 } .elsewhen (writeback) { 95 req_valid := false.B 96 } 97 XSError(!flush && io.req.valid && req_valid, p"LoadQueueUncache: You can not write an valid entry: $entryIndex") 98 99 /** 100 * Memory mapped IO / NC operations 101 * 102 * States: 103 * (1) s_idle: wait for mmio reaching ROB's head / nc req valid from loadunit 104 * (2) s_req: wait to be sent to uncache channel until req selected and uncache ready 105 * (3) s_resp: wait for response from uncache channel 106 * (4) s_wait: wait for loadunit to receive writeback req 107 */ 108 val pendingld = GatedValidRegNext(io.rob.pendingMMIOld) 109 val pendingPtr = GatedRegNext(io.rob.pendingPtr) 110 val canSendReq = req_valid && !needFlush && Mux( 111 req.nc, true.B, 112 pendingld && req.uop.robIdx === pendingPtr 113 ) 114 switch (uncacheState) { 115 is (s_idle) { 116 when (canSendReq) { 117 uncacheState := s_req 118 } 119 } 120 is (s_req) { 121 when (io.uncache.req.fire) { 122 uncacheState := s_resp 123 } 124 } 125 is (s_resp) { 126 when (io.uncache.resp.fire) { 127 when (needFlushReg) { 128 uncacheState := s_idle 129 }.otherwise{ 130 uncacheState := s_wait 131 } 132 } 133 } 134 is (s_wait) { 135 when (writeback) { 136 uncacheState := s_idle 137 } 138 } 139 } 140 141 /* control */ 142 io.flush := flush 143 io.rob.mmio := DontCare 144 io.rob.uop := DontCare 145 io.mmioSelect := (uncacheState =/= s_idle) && req.mmio 146 147 /* uncahce req */ 148 io.uncache.req.valid := uncacheState === s_req 149 io.uncache.req.bits := DontCare 150 io.uncache.req.bits.cmd := MemoryOpConstants.M_XRD 151 io.uncache.req.bits.data := DontCare 152 io.uncache.req.bits.addr := req.paddr 153 io.uncache.req.bits.vaddr:= req.vaddr 154 io.uncache.req.bits.mask := Mux(req.paddr(3), req.mask(15, 8), req.mask(7, 0)) 155 io.uncache.req.bits.id := entryIndex.U 156 io.uncache.req.bits.instrtype := DontCare 157 io.uncache.req.bits.replayCarry := DontCare 158 io.uncache.req.bits.atomic := req.atomic 159 io.uncache.req.bits.nc := req.nc 160 io.uncache.req.bits.memBackTypeMM := req.memBackTypeMM 161 162 io.uncache.resp.ready := true.B 163 164 /* uncahce resp */ 165 when (io.uncache.resp.fire) { 166 uncacheData := io.uncache.resp.bits.data 167 nderr := io.uncache.resp.bits.nderr 168 } 169 170 /* uncahce writeback */ 171 val selUop = req.uop 172 val func = selUop.fuOpType 173 val raddr = req.paddr 174 val rdataSel = LookupTree(raddr(2, 0), List( 175 "b000".U -> uncacheData(63, 0), 176 "b001".U -> uncacheData(63, 8), 177 "b010".U -> uncacheData(63, 16), 178 "b011".U -> uncacheData(63, 24), 179 "b100".U -> uncacheData(63, 32), 180 "b101".U -> uncacheData(63, 40), 181 "b110".U -> uncacheData(63, 48), 182 "b111".U -> uncacheData(63, 56) 183 )) 184 val rdataPartialLoad = rdataHelper(selUop, rdataSel) 185 186 io.mmioOut.valid := false.B 187 io.mmioOut.bits := DontCare 188 io.mmioRawData := DontCare 189 io.ncOut.valid := false.B 190 io.ncOut.bits := DontCare 191 192 when(req.nc){ 193 io.ncOut.valid := (uncacheState === s_wait) 194 io.ncOut.bits := DontCare 195 io.ncOut.bits.uop := selUop 196 io.ncOut.bits.uop.lqIdx := req.uop.lqIdx 197 io.ncOut.bits.uop.exceptionVec(loadAccessFault) := nderr 198 io.ncOut.bits.data := rdataPartialLoad 199 io.ncOut.bits.paddr := req.paddr 200 io.ncOut.bits.vaddr := req.vaddr 201 io.ncOut.bits.nc := true.B 202 io.ncOut.bits.mask := Mux(req.paddr(3), req.mask(15, 8), req.mask(7, 0)) 203 io.ncOut.bits.schedIndex := req.schedIndex 204 io.ncOut.bits.isvec := req.isvec 205 io.ncOut.bits.is128bit := req.is128bit 206 io.ncOut.bits.vecActive := req.vecActive 207 }.otherwise{ 208 io.mmioOut.valid := (uncacheState === s_wait) 209 io.mmioOut.bits := DontCare 210 io.mmioOut.bits.uop := selUop 211 io.mmioOut.bits.uop.lqIdx := req.uop.lqIdx 212 io.mmioOut.bits.uop.exceptionVec(loadAccessFault) := nderr 213 io.mmioOut.bits.data := rdataPartialLoad 214 io.mmioOut.bits.debug.isMMIO := true.B 215 io.mmioOut.bits.debug.isNC := false.B 216 io.mmioOut.bits.debug.paddr := req.paddr 217 io.mmioOut.bits.debug.vaddr := req.vaddr 218 io.mmioRawData.lqData := uncacheData 219 io.mmioRawData.uop := req.uop 220 io.mmioRawData.addrOffset := req.paddr 221 } 222 223 io.exception.valid := writeback 224 io.exception.bits := req 225 io.exception.bits.uop.exceptionVec(loadAccessFault) := nderr 226 227 /* debug log */ 228 XSDebug(io.uncache.req.fire, 229 "uncache req: pc %x addr %x data %x op %x mask %x\n", 230 req.uop.pc, 231 io.uncache.req.bits.addr, 232 io.uncache.req.bits.data, 233 io.uncache.req.bits.cmd, 234 io.uncache.req.bits.mask 235 ) 236 XSInfo(io.ncOut.fire, 237 "int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n", 238 io.ncOut.bits.uop.robIdx.asUInt, 239 io.ncOut.bits.uop.lqIdx.asUInt, 240 io.ncOut.bits.uop.pc, 241 true.B 242 ) 243 XSInfo(io.mmioOut.fire, 244 "int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n", 245 io.mmioOut.bits.uop.robIdx.asUInt, 246 io.mmioOut.bits.uop.lqIdx.asUInt, 247 io.mmioOut.bits.uop.pc, 248 true.B 249 ) 250 251} 252 253class LoadQueueUncache(implicit p: Parameters) extends XSModule 254 with HasCircularQueuePtrHelper 255 with HasMemBlockParameters 256{ 257 val io = IO(new Bundle() { 258 /* control */ 259 val redirect = Flipped(Valid(new Redirect)) 260 // mmio commit 261 val rob = Flipped(new RobLsqIO) 262 263 /* transaction */ 264 // enqueue: from ldu s3 265 val req = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle))) 266 // writeback: mmio to ldu s0, s3 267 val mmioOut = Vec(LoadPipelineWidth, DecoupledIO(new MemExuOutput)) 268 val mmioRawData = Vec(LoadPipelineWidth, Output(new LoadDataFromLQBundle)) 269 // writeback: nc to ldu s0--s3 270 val ncOut = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle)) 271 // <=>uncache 272 val uncache = new UncacheWordIO 273 274 /* except */ 275 // rollback from frontend when buffer is full 276 val rollback = Output(Valid(new Redirect)) 277 // exception generated by outer bus 278 val exception = Valid(new LqWriteBundle) 279 }) 280 281 /****************************************************************** 282 * Structure 283 ******************************************************************/ 284 val entries = Seq.tabulate(LoadUncacheBufferSize)(i => Module(new UncacheEntry(i))) 285 286 val freeList = Module(new FreeList( 287 size = LoadUncacheBufferSize, 288 allocWidth = LoadPipelineWidth, 289 freeWidth = 4, 290 enablePreAlloc = true, 291 moduleName = "LoadQueueUncache freelist" 292 )) 293 freeList.io := DontCare 294 295 // set default IO 296 entries.foreach { 297 case (e) => 298 e.io.req.valid := false.B 299 e.io.req.bits := DontCare 300 e.io.uncache.req.ready := false.B 301 e.io.uncache.resp.valid := false.B 302 e.io.uncache.resp.bits := DontCare 303 e.io.ncOut.ready := false.B 304 e.io.mmioOut.ready := false.B 305 } 306 io.uncache.req.valid := false.B 307 io.uncache.req.bits := DontCare 308 io.uncache.resp.ready := false.B 309 for (w <- 0 until LoadPipelineWidth) { 310 io.mmioOut(w).valid := false.B 311 io.mmioOut(w).bits := DontCare 312 io.mmioRawData(w) := DontCare 313 io.ncOut(w).valid := false.B 314 io.ncOut(w).bits := DontCare 315 } 316 317 318 /****************************************************************** 319 * Enqueue 320 * 321 * s1: hold 322 * s2: confirm enqueue and write entry 323 * valid: no redirect, no exception, no replay, is mmio/nc 324 * ready: freelist can allocate 325 ******************************************************************/ 326 327 val s1_req = VecInit(io.req.map(_.bits)) 328 val s1_valid = VecInit(io.req.map(_.valid)) 329 val s2_enqueue = Wire(Vec(LoadPipelineWidth, Bool())) 330 io.req.zipWithIndex.foreach{ case (r, i) => 331 r.ready := !s2_enqueue(i) || freeList.io.canAllocate(i) 332 } 333 334 // s2: enqueue 335 val s2_req = (0 until LoadPipelineWidth).map(i => {RegEnable(s1_req(i), s1_valid(i))}) 336 val s2_valid = (0 until LoadPipelineWidth).map(i => { 337 RegNext(s1_valid(i)) && 338 !s2_req(i).uop.robIdx.needFlush(RegNext(io.redirect)) && 339 !s2_req(i).uop.robIdx.needFlush(io.redirect) 340 }) 341 val s2_has_exception = s2_req.map(x => ExceptionNO.selectByFu(x.uop.exceptionVec, LduCfg).asUInt.orR) 342 val s2_need_replay = s2_req.map(_.rep_info.need_rep) 343 344 for (w <- 0 until LoadPipelineWidth) { 345 s2_enqueue(w) := s2_valid(w) && !s2_has_exception(w) && !s2_need_replay(w) && (s2_req(w).mmio || s2_req(w).nc) 346 } 347 348 val s2_enqValidVec = Wire(Vec(LoadPipelineWidth, Bool())) 349 val s2_enqIndexVec = Wire(Vec(LoadPipelineWidth, UInt())) 350 351 for (w <- 0 until LoadPipelineWidth) { 352 freeList.io.allocateReq(w) := true.B 353 } 354 355 // freeList real-allocate 356 for (w <- 0 until LoadPipelineWidth) { 357 freeList.io.doAllocate(w) := s2_enqValidVec(w) 358 } 359 360 for (w <- 0 until LoadPipelineWidth) { 361 val offset = PopCount(s2_enqueue.take(w)) 362 s2_enqValidVec(w) := s2_enqueue(w) && freeList.io.canAllocate(offset) 363 s2_enqIndexVec(w) := freeList.io.allocateSlot(offset) 364 } 365 366 367 /****************************************************************** 368 * Uncache Transaction 369 * 370 * 1. uncache req 371 * 2. uncache resp 372 * 3. writeback 373 ******************************************************************/ 374 private val NC_WB_MOD = NCWBPorts.length 375 376 val uncacheReq = Wire(DecoupledIO(io.uncache.req.bits.cloneType)) 377 val mmioSelect = entries.map(e => e.io.mmioSelect).reduce(_ || _) 378 val mmioReq = Wire(DecoupledIO(io.uncache.req.bits.cloneType)) 379 // TODO lyq: It's best to choose in robIdx order / the order in which they enter 380 val ncReqArb = Module(new RRArbiterInit(io.uncache.req.bits.cloneType, LoadUncacheBufferSize)) 381 382 val mmioOut = Wire(DecoupledIO(io.mmioOut(0).bits.cloneType)) 383 val mmioRawData = Wire(io.mmioRawData(0).cloneType) 384 val ncOut = Wire(chiselTypeOf(io.ncOut)) 385 val ncOutValidVec = VecInit(entries.map(e => e.io.ncOut.valid)) 386 val ncOutValidVecRem = SubVec.getMaskRem(ncOutValidVec, NC_WB_MOD) 387 388 // init 389 uncacheReq.valid := false.B 390 uncacheReq.bits := DontCare 391 mmioReq.valid := false.B 392 mmioReq.bits := DontCare 393 mmioOut.valid := false.B 394 mmioOut.bits := DontCare 395 mmioRawData := DontCare 396 for (i <- 0 until LoadUncacheBufferSize) { 397 ncReqArb.io.in(i).valid := false.B 398 ncReqArb.io.in(i).bits := DontCare 399 } 400 for (i <- 0 until LoadPipelineWidth) { 401 ncOut(i).valid := false.B 402 ncOut(i).bits := DontCare 403 } 404 405 entries.zipWithIndex.foreach { 406 case (e, i) => 407 // enqueue 408 for (w <- 0 until LoadPipelineWidth) { 409 when (s2_enqValidVec(w) && (i.U === s2_enqIndexVec(w))) { 410 e.io.req.valid := true.B 411 e.io.req.bits := s2_req(w) 412 } 413 } 414 415 // control 416 e.io.redirect <> io.redirect 417 e.io.rob <> io.rob 418 419 // uncache req, writeback 420 when (e.io.mmioSelect) { 421 mmioReq.valid := e.io.uncache.req.valid 422 mmioReq.bits := e.io.uncache.req.bits 423 e.io.uncache.req.ready := mmioReq.ready 424 425 e.io.mmioOut.ready := mmioOut.ready 426 mmioOut.valid := e.io.mmioOut.valid 427 mmioOut.bits := e.io.mmioOut.bits 428 mmioRawData := e.io.mmioRawData 429 430 }.otherwise{ 431 ncReqArb.io.in(i).valid := e.io.uncache.req.valid 432 ncReqArb.io.in(i).bits := e.io.uncache.req.bits 433 e.io.uncache.req.ready := ncReqArb.io.in(i).ready 434 435 (0 until NC_WB_MOD).map { w => 436 val (idx, ncOutValid) = PriorityEncoderWithFlag(ncOutValidVecRem(w)) 437 val port = NCWBPorts(w) 438 when((i.U === idx) && ncOutValid) { 439 ncOut(port).valid := ncOutValid 440 ncOut(port).bits := e.io.ncOut.bits 441 e.io.ncOut.ready := ncOut(port).ready 442 } 443 } 444 445 } 446 447 // uncache resp 448 when (i.U === io.uncache.resp.bits.id) { 449 e.io.uncache.resp <> io.uncache.resp 450 } 451 452 } 453 454 mmioReq.ready := false.B 455 ncReqArb.io.out.ready := false.B 456 when(mmioSelect){ 457 uncacheReq <> mmioReq 458 }.otherwise{ 459 uncacheReq <> ncReqArb.io.out 460 } 461 462 // uncache Request 463 AddPipelineReg(uncacheReq, io.uncache.req, false.B) 464 465 // uncache Writeback 466 AddPipelineReg(mmioOut, io.mmioOut(UncacheWBPort), false.B) 467 io.mmioRawData(UncacheWBPort) := RegEnable(mmioRawData, mmioOut.fire) 468 469 (0 until LoadPipelineWidth).foreach { i => AddPipelineReg(ncOut(i), io.ncOut(i), false.B) } 470 471 // uncache exception 472 io.exception.valid := Cat(entries.map(_.io.exception.valid)).orR 473 io.exception.bits := ParallelPriorityMux(entries.map(e => 474 (e.io.exception.valid, e.io.exception.bits) 475 )) 476 477 // rob 478 for (i <- 0 until LoadPipelineWidth) { 479 io.rob.mmio(i) := RegNext(s1_valid(i) && s1_req(i).mmio) 480 io.rob.uop(i) := RegEnable(s1_req(i).uop, s1_valid(i)) 481 } 482 483 484 /****************************************************************** 485 * Deallocate 486 ******************************************************************/ 487 // UncacheBuffer deallocate 488 val freeMaskVec = Wire(Vec(LoadUncacheBufferSize, Bool())) 489 490 // init 491 freeMaskVec.map(e => e := false.B) 492 493 // dealloc logic 494 entries.zipWithIndex.foreach { 495 case (e, i) => 496 when ((e.io.mmioSelect && e.io.mmioOut.fire) || e.io.ncOut.fire || e.io.flush) { 497 freeMaskVec(i) := true.B 498 } 499 } 500 501 freeList.io.free := freeMaskVec.asUInt 502 503 504 /****************************************************************** 505 * Uncache rollback detection 506 * 507 * When uncache loads enqueue, it searches uncache loads, They can not enqueue and need re-execution. 508 * 509 * Cycle 0: uncache enqueue. 510 * Cycle 1: Select oldest uncache loads. 511 * Cycle 2: Redirect Fire. 512 * Choose the oldest load from LoadPipelineWidth oldest loads. 513 * Prepare redirect request according to the detected rejection. 514 * Fire redirect request (if valid) 515 * 516 * Load_S3 .... Load_S3 517 * stage 0: lq lq 518 * | | (can not enqueue) 519 * stage 1: lq lq 520 * | | 521 * --------------- 522 * | 523 * stage 2: lq 524 * | 525 * rollback req 526 * 527 ******************************************************************/ 528 def selectOldestRedirect(xs: Seq[Valid[Redirect]]): Vec[Bool] = { 529 val compareVec = (0 until xs.length).map(i => (0 until i).map(j => isAfter(xs(j).bits.robIdx, xs(i).bits.robIdx))) 530 val resultOnehot = VecInit((0 until xs.length).map(i => Cat((0 until xs.length).map(j => 531 (if (j < i) !xs(j).valid || compareVec(i)(j) 532 else if (j == i) xs(i).valid 533 else !xs(j).valid || !compareVec(j)(i)) 534 )).andR)) 535 resultOnehot 536 } 537 val reqNeedCheck = VecInit((0 until LoadPipelineWidth).map(w => 538 s2_enqueue(w) && !s2_enqValidVec(w) 539 )) 540 val reqSelUops = VecInit(s2_req.map(_.uop)) 541 val allRedirect = (0 until LoadPipelineWidth).map(i => { 542 val redirect = Wire(Valid(new Redirect)) 543 redirect.valid := reqNeedCheck(i) 544 redirect.bits := DontCare 545 redirect.bits.isRVC := reqSelUops(i).preDecodeInfo.isRVC 546 redirect.bits.robIdx := reqSelUops(i).robIdx 547 redirect.bits.ftqIdx := reqSelUops(i).ftqPtr 548 redirect.bits.ftqOffset := reqSelUops(i).ftqOffset 549 redirect.bits.level := RedirectLevel.flush 550 redirect.bits.cfiUpdate.target := reqSelUops(i).pc // TODO: check if need pc 551 redirect.bits.debug_runahead_checkpoint_id := reqSelUops(i).debugInfo.runahead_checkpoint_id 552 redirect 553 }) 554 val oldestOneHot = selectOldestRedirect(allRedirect) 555 val oldestRedirect = Mux1H(oldestOneHot, allRedirect) 556 val lastCycleRedirect = Wire(Valid(new Redirect)) 557 lastCycleRedirect.valid := RegNext(io.redirect.valid) 558 lastCycleRedirect.bits := RegEnable(io.redirect.bits, io.redirect.valid) 559 val lastLastCycleRedirect = Wire(Valid(new Redirect)) 560 lastLastCycleRedirect.valid := RegNext(lastCycleRedirect.valid) 561 lastLastCycleRedirect.bits := RegEnable(lastCycleRedirect.bits, lastCycleRedirect.valid) 562 io.rollback.valid := GatedValidRegNext(oldestRedirect.valid && 563 !oldestRedirect.bits.robIdx.needFlush(io.redirect) && 564 !oldestRedirect.bits.robIdx.needFlush(lastCycleRedirect) && 565 !oldestRedirect.bits.robIdx.needFlush(lastLastCycleRedirect)) 566 io.rollback.bits := RegEnable(oldestRedirect.bits, oldestRedirect.valid) 567 568 569 /****************************************************************** 570 * Perf Counter 571 ******************************************************************/ 572 val validCount = freeList.io.validCount 573 val allowEnqueue = !freeList.io.empty 574 QueuePerf(LoadUncacheBufferSize, validCount, !allowEnqueue) 575 576 XSPerfAccumulate("mmio_uncache_req", io.uncache.req.fire && !io.uncache.req.bits.nc) 577 XSPerfAccumulate("mmio_writeback_success", io.mmioOut(0).fire) 578 XSPerfAccumulate("mmio_writeback_blocked", io.mmioOut(0).valid && !io.mmioOut(0).ready) 579 XSPerfAccumulate("nc_uncache_req", io.uncache.req.fire && io.uncache.req.bits.nc) 580 XSPerfAccumulate("nc_writeback_success", io.ncOut(0).fire) 581 XSPerfAccumulate("nc_writeback_blocked", io.ncOut(0).valid && !io.ncOut(0).ready) 582 XSPerfAccumulate("uncache_full_rollback", io.rollback.valid) 583 584 val perfEvents: Seq[(String, UInt)] = Seq( 585 ("mmio_uncache_req", io.uncache.req.fire && !io.uncache.req.bits.nc), 586 ("mmio_writeback_success", io.mmioOut(0).fire), 587 ("mmio_writeback_blocked", io.mmioOut(0).valid && !io.mmioOut(0).ready), 588 ("nc_uncache_req", io.uncache.req.fire && io.uncache.req.bits.nc), 589 ("nc_writeback_success", io.ncOut(0).fire), 590 ("nc_writeback_blocked", io.ncOut(0).valid && !io.ncOut(0).ready), 591 ("uncache_full_rollback", io.rollback.valid) 592 ) 593 // end 594} 595