1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import utils._ 24import freechips.rocketchip.tilelink._ 25import freechips.rocketchip.tilelink.ClientStates._ 26import freechips.rocketchip.tilelink.MemoryOpCategories._ 27import freechips.rocketchip.tilelink.TLPermissions._ 28import difftest._ 29import huancun.{AliasKey, DirtyKey, PreferCacheKey, PrefetchKey} 30import huancun.utils.FastArbiter 31import mem.{AddPipelineReg} 32 33class MissReqWoStoreData(implicit p: Parameters) extends DCacheBundle { 34 val source = UInt(sourceTypeWidth.W) 35 val cmd = UInt(M_SZ.W) 36 val addr = UInt(PAddrBits.W) 37 val vaddr = UInt(VAddrBits.W) 38 val way_en = UInt(DCacheWays.W) 39 40 // store 41 val full_overwrite = Bool() 42 43 // which word does amo work on? 44 val word_idx = UInt(log2Up(blockWords).W) 45 val amo_data = UInt(DataBits.W) 46 val amo_mask = UInt((DataBits / 8).W) 47 48 val req_coh = new ClientMetadata 49 val replace_coh = new ClientMetadata 50 val replace_tag = UInt(tagBits.W) 51 val id = UInt(reqIdWidth.W) 52 53 // For now, miss queue entry req is actually valid when req.valid && !cancel 54 // * req.valid is fast to generate 55 // * cancel is slow to generate, it will not be used until the last moment 56 // 57 // cancel may come from the following sources: 58 // 1. miss req blocked by writeback queue: 59 // a writeback req of the same address is in progress 60 // 2. pmp check failed 61 val cancel = Bool() // cancel is slow to generate, it will cancel missreq.valid 62 63 def isLoad = source === LOAD_SOURCE.U 64 def isStore = source === STORE_SOURCE.U 65 def isAMO = source === AMO_SOURCE.U 66 def hit = req_coh.isValid() 67} 68 69class MissReqStoreData(implicit p: Parameters) extends DCacheBundle { 70 // store data and store mask will be written to miss queue entry 71 // 1 cycle after req.fire() and meta write 72 val store_data = UInt((cfg.blockBytes * 8).W) 73 val store_mask = UInt(cfg.blockBytes.W) 74} 75 76class MissReq(implicit p: Parameters) extends MissReqWoStoreData { 77 // store data and store mask will be written to miss queue entry 78 // 1 cycle after req.fire() and meta write 79 val store_data = UInt((cfg.blockBytes * 8).W) 80 val store_mask = UInt(cfg.blockBytes.W) 81 82 def toMissReqStoreData(): MissReqStoreData = { 83 val out = Wire(new MissReqStoreData) 84 out.store_data := store_data 85 out.store_mask := store_mask 86 out 87 } 88 89 def toMissReqWoStoreData(): MissReqWoStoreData = { 90 val out = Wire(new MissReqWoStoreData) 91 out.source := source 92 out.cmd := cmd 93 out.addr := addr 94 out.vaddr := vaddr 95 out.way_en := way_en 96 out.full_overwrite := full_overwrite 97 out.word_idx := word_idx 98 out.amo_data := amo_data 99 out.amo_mask := amo_mask 100 out.req_coh := req_coh 101 out.replace_coh := replace_coh 102 out.replace_tag := replace_tag 103 out.id := id 104 out.cancel := cancel 105 out 106 } 107} 108 109class MissEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule { 110 val io = IO(new Bundle() { 111 // MSHR ID 112 val id = Input(UInt(log2Up(cfg.nMissEntries).W)) 113 // client requests 114 // MSHR update request, MSHR state and addr will be updated when req.fire() 115 val req = Flipped(ValidIO(new MissReqWoStoreData)) 116 // store data and mask will be write to miss queue entry 1 cycle after req.fire() 117 val req_data = Input(new MissReqStoreData) 118 // allocate this entry for new req 119 val primary_valid = Input(Bool()) 120 // this entry is free and can be allocated to new reqs 121 val primary_ready = Output(Bool()) 122 // this entry is busy, but it can merge the new req 123 val secondary_ready = Output(Bool()) 124 // this entry is busy and it can not merge the new req 125 val secondary_reject = Output(Bool()) 126 127 val refill_to_ldq = ValidIO(new Refill) 128 129 // bus 130 val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle)) 131 val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle))) 132 val mem_finish = DecoupledIO(new TLBundleE(edge.bundle)) 133 134 // refill pipe 135 val refill_pipe_req = DecoupledIO(new RefillPipeReq) 136 val refill_pipe_resp = Input(Bool()) 137 138 // replace pipe 139 val replace_pipe_req = DecoupledIO(new MainPipeReq) 140 val replace_pipe_resp = Input(Bool()) 141 142 // main pipe: amo miss 143 val main_pipe_req = DecoupledIO(new MainPipeReq) 144 val main_pipe_resp = Input(Bool()) 145 146 val block_addr = ValidIO(UInt(PAddrBits.W)) 147 148 val debug_early_replace = ValidIO(new Bundle() { 149 // info about the block that has been replaced 150 val idx = UInt(idxBits.W) // vaddr 151 val tag = UInt(tagBits.W) // paddr 152 }) 153 }) 154 155 assert(!RegNext(io.primary_valid && !io.primary_ready)) 156 157 val req = Reg(new MissReqWoStoreData) 158 val req_store_mask = Reg(UInt(cfg.blockBytes.W)) 159 val req_valid = RegInit(false.B) 160 val set = addr_to_dcache_set(req.vaddr) 161 162 val s_acquire = RegInit(true.B) 163 val s_grantack = RegInit(true.B) 164 val s_replace_req = RegInit(true.B) 165 val s_refill = RegInit(true.B) 166 val s_mainpipe_req = RegInit(true.B) 167 val s_write_storedata = RegInit(true.B) 168 169 val w_grantfirst = RegInit(true.B) 170 val w_grantlast = RegInit(true.B) 171 val w_replace_resp = RegInit(true.B) 172 val w_refill_resp = RegInit(true.B) 173 val w_mainpipe_resp = RegInit(true.B) 174 175 val release_entry = s_grantack && w_refill_resp && w_mainpipe_resp 176 177 val acquire_not_sent = !s_acquire && !io.mem_acquire.ready 178 val data_not_refilled = !w_grantfirst 179 180 val error = RegInit(false.B) 181 182 val should_refill_data_reg = Reg(Bool()) 183 val should_refill_data = WireInit(should_refill_data_reg) 184 185 // val full_overwrite = req.isStore && req_store_mask.andR 186 val full_overwrite = Reg(Bool()) 187 188 val (_, _, refill_done, refill_count) = edge.count(io.mem_grant) 189 val grant_param = Reg(UInt(TLPermissions.bdWidth.W)) 190 191 // refill data with store data, this reg will be used to store: 192 // 1. store data (if needed), before l2 refill data 193 // 2. store data and l2 refill data merged result (i.e. new cacheline taht will be write to data array) 194 val refill_and_store_data = Reg(Vec(blockRows, UInt(rowBits.W))) 195 // raw data refilled to l1 by l2 196 val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W))) 197 198 // allocate current miss queue entry for a miss req 199 val primary_fire = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel) 200 // merge miss req to current miss queue entry 201 val secondary_fire = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel) 202 203 when (release_entry && req_valid) { 204 req_valid := false.B 205 } 206 207 when (!s_write_storedata && req_valid) { 208 // store data will be write to miss queue entry 1 cycle after req.fire() 209 s_write_storedata := true.B 210 assert(RegNext(primary_fire || secondary_fire)) 211 } 212 213 when (primary_fire) { 214 req_valid := true.B 215 req := io.req.bits 216 req.addr := get_block_addr(io.req.bits.addr) 217 218 s_acquire := false.B 219 s_grantack := false.B 220 221 w_grantfirst := false.B 222 w_grantlast := false.B 223 224 s_write_storedata := !io.req.bits.isStore // only store need to wait for data 225 full_overwrite := io.req.bits.isStore && io.req.bits.full_overwrite 226 227 when (!io.req.bits.isAMO) { 228 s_refill := false.B 229 w_refill_resp := false.B 230 } 231 232 when (!io.req.bits.hit && io.req.bits.replace_coh.isValid() && !io.req.bits.isAMO) { 233 s_replace_req := false.B 234 w_replace_resp := false.B 235 } 236 237 when (io.req.bits.isAMO) { 238 s_mainpipe_req := false.B 239 w_mainpipe_resp := false.B 240 } 241 242 should_refill_data_reg := io.req.bits.isLoad 243 error := false.B 244 } 245 246 when (secondary_fire) { 247 assert(io.req.bits.req_coh.state <= req.req_coh.state) 248 assert(!(io.req.bits.isAMO || req.isAMO)) 249 // use the most uptodate meta 250 req.req_coh := io.req.bits.req_coh 251 252 when (io.req.bits.isStore) { 253 req := io.req.bits 254 req.addr := get_block_addr(io.req.bits.addr) 255 req.way_en := req.way_en 256 req.replace_coh := req.replace_coh 257 req.replace_tag := req.replace_tag 258 s_write_storedata := false.B // only store need to wait for data 259 full_overwrite := io.req.bits.isStore && io.req.bits.full_overwrite 260 } 261 262 should_refill_data := should_refill_data_reg || io.req.bits.isLoad 263 should_refill_data_reg := should_refill_data 264 } 265 266 when (io.mem_acquire.fire()) { 267 s_acquire := true.B 268 } 269 270 // store data and mask write 271 when (!s_write_storedata && req_valid) { 272 req_store_mask := io.req_data.store_mask 273 for (i <- 0 until blockRows) { 274 refill_and_store_data(i) := io.req_data.store_data(rowBits * (i + 1) - 1, rowBits * i) 275 } 276 } 277 278 // merge data refilled by l2 and store data, update miss queue entry, gen refill_req 279 val new_data = Wire(Vec(blockRows, UInt(rowBits.W))) 280 val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W))) 281 // merge refilled data and store data (if needed) 282 def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = { 283 val full_wmask = FillInterleaved(8, wmask) 284 (~full_wmask & old_data | full_wmask & new_data) 285 } 286 for (i <- 0 until blockRows) { 287 // new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i) 288 new_data(i) := refill_and_store_data(i) 289 // we only need to merge data for Store 290 new_mask(i) := Mux(req.isStore, req_store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U) 291 } 292 293 val hasData = RegInit(true.B) 294 val isDirty = RegInit(false.B) 295 when (io.mem_grant.fire()) { 296 w_grantfirst := true.B 297 grant_param := io.mem_grant.bits.param 298 when (edge.hasData(io.mem_grant.bits)) { 299 // GrantData 300 for (i <- 0 until beatRows) { 301 val idx = (refill_count << log2Floor(beatRows)) + i.U 302 val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i) 303 refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx)) 304 } 305 w_grantlast := w_grantlast || refill_done 306 hasData := true.B 307 }.otherwise { 308 // Grant 309 assert(full_overwrite) 310 for (i <- 0 until blockRows) { 311 refill_and_store_data(i) := new_data(i) 312 } 313 w_grantlast := true.B 314 hasData := false.B 315 } 316 317 error := io.mem_grant.bits.denied || io.mem_grant.bits.corrupt || error 318 319 refill_data_raw(refill_count) := io.mem_grant.bits.data 320 isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B) 321 } 322 323 when (io.mem_finish.fire()) { 324 s_grantack := true.B 325 } 326 327 when (io.replace_pipe_req.fire()) { 328 s_replace_req := true.B 329 } 330 331 when (io.replace_pipe_resp) { 332 w_replace_resp := true.B 333 } 334 335 when (io.refill_pipe_req.fire()) { 336 s_refill := true.B 337 } 338 339 when (io.refill_pipe_resp) { 340 w_refill_resp := true.B 341 } 342 343 when (io.main_pipe_req.fire()) { 344 s_mainpipe_req := true.B 345 } 346 347 when (io.main_pipe_resp) { 348 w_mainpipe_resp := true.B 349 } 350 351 def before_read_sent_can_merge(new_req: MissReqWoStoreData): Bool = { 352 acquire_not_sent && req.isLoad && (new_req.isLoad || new_req.isStore) 353 } 354 355 def before_data_refill_can_merge(new_req: MissReqWoStoreData): Bool = { 356 data_not_refilled && (req.isLoad || req.isStore) && new_req.isLoad 357 } 358 359 def should_merge(new_req: MissReqWoStoreData): Bool = { 360 val block_match = get_block(req.addr) === get_block(new_req.addr) 361 block_match && 362 ( 363 before_read_sent_can_merge(new_req) || 364 before_data_refill_can_merge(new_req) 365 ) 366 } 367 368 // store can be merged before io.mem_acquire.fire() 369 // store can not be merged the cycle that io.mem_acquire.fire() 370 // load can be merged before io.mem_grant.fire() 371 // 372 // TODO: merge store if possible? mem_acquire may need to be re-issued, 373 // but sbuffer entry can be freed 374 def should_reject(new_req: MissReqWoStoreData): Bool = { 375 val block_match = get_block(req.addr) === get_block(new_req.addr) 376 val set_match = set === addr_to_dcache_set(new_req.vaddr) 377 378 req_valid && 379 Mux( 380 block_match, 381 !before_read_sent_can_merge(new_req) && 382 !before_data_refill_can_merge(new_req), 383 set_match && new_req.way_en === req.way_en 384 ) 385 } 386 387 io.primary_ready := !req_valid 388 io.secondary_ready := should_merge(io.req.bits) 389 io.secondary_reject := should_reject(io.req.bits) 390 391 // should not allocate, merge or reject at the same time 392 assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U)) 393 394 val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => { 395 val data = refill_and_store_data.asUInt 396 data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth) 397 }))) 398 io.refill_to_ldq.valid := RegNext(!w_grantlast && io.mem_grant.fire()) && should_refill_data_reg 399 io.refill_to_ldq.bits.addr := RegNext(req.addr + (refill_count << refillOffBits)) 400 io.refill_to_ldq.bits.data := refill_data_splited(RegNext(refill_count)) 401 io.refill_to_ldq.bits.error := RegNext(io.mem_grant.bits.corrupt || io.mem_grant.bits.denied) 402 io.refill_to_ldq.bits.refill_done := RegNext(refill_done && io.mem_grant.fire()) 403 io.refill_to_ldq.bits.hasdata := hasData 404 io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt 405 406 io.mem_acquire.valid := !s_acquire 407 val grow_param = req.req_coh.onAccess(req.cmd)._2 408 val acquireBlock = edge.AcquireBlock( 409 fromSource = io.id, 410 toAddress = req.addr, 411 lgSize = (log2Up(cfg.blockBytes)).U, 412 growPermissions = grow_param 413 )._2 414 val acquirePerm = edge.AcquirePerm( 415 fromSource = io.id, 416 toAddress = req.addr, 417 lgSize = (log2Up(cfg.blockBytes)).U, 418 growPermissions = grow_param 419 )._2 420 io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock) 421 // resolve cache alias by L2 422 io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12)) 423 // trigger prefetch 424 io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := true.B) 425 // prefer not to cache data in L2 by default 426 io.mem_acquire.bits.user.lift(PreferCacheKey).foreach(_ := false.B) 427 require(nSets <= 256) 428 429 io.mem_grant.ready := !w_grantlast && s_acquire 430 431 val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire()) 432 assert(RegNext(!io.mem_grant.fire() || edge.isRequest(io.mem_grant.bits))) 433 io.mem_finish.valid := !s_grantack && w_grantfirst 434 io.mem_finish.bits := grantack 435 436 io.replace_pipe_req.valid := !s_replace_req 437 val replace = io.replace_pipe_req.bits 438 replace := DontCare 439 replace.miss := false.B 440 replace.miss_id := io.id 441 replace.miss_dirty := false.B 442 replace.probe := false.B 443 replace.probe_need_data := false.B 444 replace.source := LOAD_SOURCE.U 445 replace.vaddr := req.vaddr // only untag bits are needed 446 replace.addr := Cat(req.replace_tag, 0.U(pgUntagBits.W)) // only tag bits are needed 447 replace.store_mask := 0.U 448 replace.replace := true.B 449 replace.replace_way_en := req.way_en 450 replace.error := false.B 451 452 io.refill_pipe_req.valid := !s_refill && w_replace_resp && w_grantlast 453 val refill = io.refill_pipe_req.bits 454 refill.source := req.source 455 refill.addr := req.addr 456 refill.way_en := req.way_en 457 refill.wmask := Mux( 458 hasData || req.isLoad, 459 ~0.U(DCacheBanks.W), 460 VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, req_store_mask).orR)).asUInt 461 ) 462 refill.data := refill_and_store_data.asTypeOf((new RefillPipeReq).data) 463 refill.miss_id := io.id 464 refill.id := req.id 465 def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = { 466 val c = categorize(cmd) 467 MuxLookup(Cat(c, param, dirty), Nothing, Seq( 468 //(effect param) -> (next) 469 Cat(rd, toB, false.B) -> Branch, 470 Cat(rd, toB, true.B) -> Branch, 471 Cat(rd, toT, false.B) -> Trunk, 472 Cat(rd, toT, true.B) -> Dirty, 473 Cat(wi, toT, false.B) -> Trunk, 474 Cat(wi, toT, true.B) -> Dirty, 475 Cat(wr, toT, false.B) -> Dirty, 476 Cat(wr, toT, true.B) -> Dirty)) 477 } 478 refill.meta.coh := ClientMetadata(missCohGen(req.cmd, grant_param, isDirty)) 479 refill.error := error 480 refill.alias := req.vaddr(13, 12) // TODO 481 482 io.main_pipe_req.valid := !s_mainpipe_req && w_grantlast 483 io.main_pipe_req.bits := DontCare 484 io.main_pipe_req.bits.miss := true.B 485 io.main_pipe_req.bits.miss_id := io.id 486 io.main_pipe_req.bits.miss_param := grant_param 487 io.main_pipe_req.bits.miss_dirty := isDirty 488 io.main_pipe_req.bits.miss_way_en := req.way_en 489 io.main_pipe_req.bits.probe := false.B 490 io.main_pipe_req.bits.source := req.source 491 io.main_pipe_req.bits.cmd := req.cmd 492 io.main_pipe_req.bits.vaddr := req.vaddr 493 io.main_pipe_req.bits.addr := req.addr 494 io.main_pipe_req.bits.store_data := refill_and_store_data.asUInt 495 io.main_pipe_req.bits.store_mask := ~0.U(blockBytes.W) 496 io.main_pipe_req.bits.word_idx := req.word_idx 497 io.main_pipe_req.bits.amo_data := req.amo_data 498 io.main_pipe_req.bits.amo_mask := req.amo_mask 499 io.main_pipe_req.bits.error := error 500 io.main_pipe_req.bits.id := req.id 501 502 io.block_addr.valid := req_valid && w_grantlast && !w_refill_resp 503 io.block_addr.bits := req.addr 504 505 io.debug_early_replace.valid := BoolStopWatch(io.replace_pipe_resp, io.refill_pipe_req.fire()) 506 io.debug_early_replace.bits.idx := addr_to_dcache_set(req.vaddr) 507 io.debug_early_replace.bits.tag := req.replace_tag 508 509 XSPerfAccumulate("miss_req_primary", primary_fire) 510 XSPerfAccumulate("miss_req_merged", secondary_fire) 511 XSPerfAccumulate("load_miss_penalty_to_use", 512 should_refill_data && 513 BoolStopWatch(primary_fire, io.refill_to_ldq.valid, true) 514 ) 515 XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire(), io.main_pipe_resp)) 516 XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready) 517 XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid) 518 XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready) 519 XSPerfAccumulate("penalty_from_grant_to_refill", !w_refill_resp && w_grantlast) 520 XSPerfAccumulate("soft_prefetch_number", primary_fire && io.req.bits.source === SOFT_PREFETCH.U) 521 522 val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(RegNext(primary_fire), release_entry) 523 XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 20, 1, true, true) 524 XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 20, 100, 10, true, false) 525 526 val load_miss_begin = primary_fire && io.req.bits.isLoad 527 val refill_finished = RegNext(!w_grantlast && refill_done) && should_refill_data 528 val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time 529 XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 20, 1, true, true) 530 XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 20, 100, 10, true, false) 531 532 val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(io.mem_acquire.fire(), io.mem_grant.fire() && refill_done) 533 XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 20, 1, true, true) 534 XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 20, 100, 10, true, false) 535} 536 537class MissQueue(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule with HasPerfEvents { 538 val io = IO(new Bundle { 539 val hartId = Input(UInt(8.W)) 540 val req = Flipped(DecoupledIO(new MissReq)) 541 val refill_to_ldq = ValidIO(new Refill) 542 543 val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle)) 544 val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle))) 545 val mem_finish = DecoupledIO(new TLBundleE(edge.bundle)) 546 547 val refill_pipe_req = DecoupledIO(new RefillPipeReq) 548 val refill_pipe_req_dup = Vec(nDupStatus, DecoupledIO(new RefillPipeReqCtrl)) 549 val refill_pipe_resp = Flipped(ValidIO(UInt(log2Up(cfg.nMissEntries).W))) 550 551 val replace_pipe_req = DecoupledIO(new MainPipeReq) 552 val replace_pipe_resp = Flipped(ValidIO(UInt(log2Up(cfg.nMissEntries).W))) 553 554 val main_pipe_req = DecoupledIO(new MainPipeReq) 555 val main_pipe_resp = Flipped(ValidIO(new AtomicsResp)) 556 557 // block probe 558 val probe_addr = Input(UInt(PAddrBits.W)) 559 val probe_block = Output(Bool()) 560 561 val full = Output(Bool()) 562 563 // only for performance counter 564 // This is valid when an mshr has finished replacing a block (w_replace_resp), 565 // but hasn't received Grant from L2 (!w_grantlast) 566 val debug_early_replace = Vec(cfg.nMissEntries, ValidIO(new Bundle() { 567 // info about the block that has been replaced 568 val idx = UInt(idxBits.W) // vaddr 569 val tag = UInt(tagBits.W) // paddr 570 })) 571 }) 572 573 // 128KBL1: FIXME: provide vaddr for l2 574 575 val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge))) 576 577 val req_data_gen = io.req.bits.toMissReqStoreData() 578 val req_data_buffer = RegEnable(req_data_gen, io.req.valid) 579 580 val primary_ready_vec = entries.map(_.io.primary_ready) 581 val secondary_ready_vec = entries.map(_.io.secondary_ready) 582 val secondary_reject_vec = entries.map(_.io.secondary_reject) 583 val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr } 584 585 val merge = Cat(secondary_ready_vec).orR 586 val reject = Cat(secondary_reject_vec).orR 587 val alloc = !reject && !merge && Cat(primary_ready_vec).orR 588 val accept = alloc || merge 589 590 assert(RegNext(PopCount(secondary_ready_vec) <= 1.U)) 591// assert(RegNext(PopCount(secondary_reject_vec) <= 1.U)) 592 // It is possible that one mshr wants to merge a req, while another mshr wants to reject it. 593 // That is, a coming req has the same paddr as that of mshr_0 (merge), 594 // while it has the same set and the same way as mshr_1 (reject). 595 // In this situation, the coming req should be merged by mshr_0 596// assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U)) 597 598 def select_valid_one[T <: Bundle]( 599 in: Seq[DecoupledIO[T]], 600 out: DecoupledIO[T], 601 name: Option[String] = None): Unit = { 602 603 if (name.nonEmpty) { out.suggestName(s"${name.get}_select") } 604 out.valid := Cat(in.map(_.valid)).orR 605 out.bits := ParallelMux(in.map(_.valid) zip in.map(_.bits)) 606 in.map(_.ready := out.ready) 607 assert(!RegNext(out.valid && PopCount(Cat(in.map(_.valid))) > 1.U)) 608 } 609 610 io.mem_grant.ready := false.B 611 612 entries.zipWithIndex.foreach { 613 case (e, i) => 614 val former_primary_ready = if(i == 0) 615 false.B 616 else 617 Cat((0 until i).map(j => entries(j).io.primary_ready)).orR 618 619 e.io.id := i.U 620 e.io.req.valid := io.req.valid 621 e.io.primary_valid := io.req.valid && 622 !merge && 623 !reject && 624 !former_primary_ready && 625 e.io.primary_ready 626 e.io.req.bits := io.req.bits.toMissReqWoStoreData() 627 e.io.req_data := req_data_buffer 628 629 e.io.mem_grant.valid := false.B 630 e.io.mem_grant.bits := DontCare 631 when (io.mem_grant.bits.source === i.U) { 632 e.io.mem_grant <> io.mem_grant 633 } 634 635 e.io.refill_pipe_resp := io.refill_pipe_resp.valid && io.refill_pipe_resp.bits === i.U 636 e.io.replace_pipe_resp := io.replace_pipe_resp.valid && io.replace_pipe_resp.bits === i.U 637 e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U 638 639 io.debug_early_replace(i) := e.io.debug_early_replace 640 } 641 642 io.req.ready := accept 643 io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR 644 io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits)) 645 646 TLArbiter.lowest(edge, io.mem_acquire, entries.map(_.io.mem_acquire):_*) 647 TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*) 648 649 // arbiter_with_pipereg_N_dup(entries.map(_.io.refill_pipe_req), io.refill_pipe_req, 650 // io.refill_pipe_req_dup, 651 // Some("refill_pipe_req")) 652 val out_refill_pipe_req = Wire(Decoupled(new RefillPipeReq)) 653 val out_refill_pipe_req_ctrl = Wire(Decoupled(new RefillPipeReqCtrl)) 654 out_refill_pipe_req_ctrl.valid := out_refill_pipe_req.valid 655 out_refill_pipe_req_ctrl.bits := out_refill_pipe_req.bits.getCtrl 656 out_refill_pipe_req.ready := out_refill_pipe_req_ctrl.ready 657 arbiter(entries.map(_.io.refill_pipe_req), out_refill_pipe_req, Some("refill_pipe_req")) 658 for (dup <- io.refill_pipe_req_dup) { 659 AddPipelineReg(out_refill_pipe_req_ctrl, dup, false.B) 660 } 661 AddPipelineReg(out_refill_pipe_req, io.refill_pipe_req, false.B) 662 663 arbiter_with_pipereg(entries.map(_.io.replace_pipe_req), io.replace_pipe_req, Some("replace_pipe_req")) 664 665 fastArbiter(entries.map(_.io.main_pipe_req), io.main_pipe_req, Some("main_pipe_req")) 666 667 io.probe_block := Cat(probe_block_vec).orR 668 669 io.full := ~Cat(entries.map(_.io.primary_ready)).andR 670 671 if (env.EnableDifftest) { 672 val difftest = Module(new DifftestRefillEvent) 673 difftest.io.clock := clock 674 difftest.io.coreid := io.hartId 675 difftest.io.cacheid := 1.U 676 difftest.io.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done 677 difftest.io.addr := io.refill_to_ldq.bits.addr 678 difftest.io.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.io.data) 679 } 680 681 XSPerfAccumulate("miss_req", io.req.fire()) 682 XSPerfAccumulate("miss_req_allocate", io.req.fire() && alloc) 683 XSPerfAccumulate("miss_req_merge_load", io.req.fire() && merge && io.req.bits.isLoad) 684 XSPerfAccumulate("miss_req_reject_load", io.req.valid && reject && io.req.bits.isLoad) 685 XSPerfAccumulate("probe_blocked_by_miss", io.probe_block) 686 val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W)) 687 val num_valids = PopCount(~Cat(primary_ready_vec).asUInt) 688 when (num_valids > max_inflight) { 689 max_inflight := num_valids 690 } 691 // max inflight (average) = max_inflight_total / cycle cnt 692 XSPerfAccumulate("max_inflight", max_inflight) 693 QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U) 694 io.full := num_valids === cfg.nMissEntries.U 695 XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1) 696 697 val perfValidCount = RegNext(PopCount(entries.map(entry => (!entry.io.primary_ready)))) 698 val perfEvents = Seq( 699 ("dcache_missq_req ", io.req.fire()), 700 ("dcache_missq_1_4_valid", (perfValidCount < (cfg.nMissEntries.U/4.U))), 701 ("dcache_missq_2_4_valid", (perfValidCount > (cfg.nMissEntries.U/4.U)) & (perfValidCount <= (cfg.nMissEntries.U/2.U))), 702 ("dcache_missq_3_4_valid", (perfValidCount > (cfg.nMissEntries.U/2.U)) & (perfValidCount <= (cfg.nMissEntries.U*3.U/4.U))), 703 ("dcache_missq_4_4_valid", (perfValidCount > (cfg.nMissEntries.U*3.U/4.U))), 704 ) 705 generatePerfEvent() 706} 707