1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import freechips.rocketchip.tilelink.ClientStates._ 23import freechips.rocketchip.tilelink.MemoryOpCategories._ 24import freechips.rocketchip.tilelink.TLPermissions._ 25import freechips.rocketchip.tilelink.{ClientMetadata, ClientStates, TLPermissions} 26import utils._ 27import xiangshan.L1CacheErrorInfo 28 29class MainPipeReq(implicit p: Parameters) extends DCacheBundle { 30 val miss = Bool() // only amo miss will refill in main pipe 31 val miss_id = UInt(log2Up(cfg.nMissEntries).W) 32 val miss_param = UInt(TLPermissions.bdWidth.W) 33 val miss_dirty = Bool() 34 val miss_way_en = UInt(DCacheWays.W) 35 36 val probe = Bool() 37 val probe_param = UInt(TLPermissions.bdWidth.W) 38 val probe_need_data = Bool() 39 40 // request info 41 // reqs from Store, AMO use this 42 // probe does not use this 43 val source = UInt(sourceTypeWidth.W) 44 val cmd = UInt(M_SZ.W) 45 // if dcache size > 32KB, vaddr is also needed for store 46 // vaddr is used to get extra index bits 47 val vaddr = UInt(VAddrBits.W) 48 // must be aligned to block 49 val addr = UInt(PAddrBits.W) 50 51 // store 52 val store_data = UInt((cfg.blockBytes * 8).W) 53 val store_mask = UInt(cfg.blockBytes.W) 54 55 // which word does amo work on? 56 val word_idx = UInt(log2Up(cfg.blockBytes * 8 / DataBits).W) 57 val amo_data = UInt(DataBits.W) 58 val amo_mask = UInt((DataBits / 8).W) 59 60 // error 61 val error = Bool() 62 63 // replace 64 val replace = Bool() 65 val replace_way_en = UInt(DCacheWays.W) 66 67 val id = UInt(reqIdWidth.W) 68 69 def isLoad: Bool = source === LOAD_SOURCE.U 70 def isStore: Bool = source === STORE_SOURCE.U 71 def isAMO: Bool = source === AMO_SOURCE.U 72 73 def convertStoreReq(store: DCacheLineReq): MainPipeReq = { 74 val req = Wire(new MainPipeReq) 75 req := DontCare 76 req.miss := false.B 77 req.miss_dirty := false.B 78 req.probe := false.B 79 req.probe_need_data := false.B 80 req.source := STORE_SOURCE.U 81 req.cmd := store.cmd 82 req.addr := store.addr 83 req.vaddr := store.vaddr 84 req.store_data := store.data 85 req.store_mask := store.mask 86 req.replace := false.B 87 req.error := false.B 88 req.id := store.id 89 req 90 } 91} 92 93class MainPipe(implicit p: Parameters) extends DCacheModule with HasPerfEvents { 94 val io = IO(new Bundle() { 95 // probe queue 96 val probe_req = Flipped(DecoupledIO(new MainPipeReq)) 97 // store miss go to miss queue 98 val miss_req = DecoupledIO(new MissReq) 99 // store buffer 100 val store_req = Flipped(DecoupledIO(new DCacheLineReq)) 101 val store_replay_resp = ValidIO(new DCacheLineResp) 102 val store_hit_resp = ValidIO(new DCacheLineResp) 103 val release_update = ValidIO(new ReleaseUpdate) 104 // atmoics 105 val atomic_req = Flipped(DecoupledIO(new MainPipeReq)) 106 val atomic_resp = ValidIO(new AtomicsResp) 107 // replace 108 val replace_req = Flipped(DecoupledIO(new MainPipeReq)) 109 val replace_resp = ValidIO(UInt(log2Up(cfg.nMissEntries).W)) 110 // write-back queue 111 val wb = DecoupledIO(new WritebackReq) 112 113 val data_read = DecoupledIO(new L1BankedDataReadLineReq) 114 val data_resp = Input(Vec(DCacheBanks, new L1BankedDataReadResult())) 115 val readline_error = Input(Bool()) 116 val data_write = DecoupledIO(new L1BankedDataWriteReq) 117 118 val meta_read = DecoupledIO(new MetaReadReq) 119 val meta_resp = Input(Vec(nWays, new Meta)) 120 val meta_write = DecoupledIO(new MetaWriteReq) 121 val error_flag_resp = Input(Vec(nWays, Bool())) 122 val error_flag_write = DecoupledIO(new ErrorWriteReq) 123 124 val tag_read = DecoupledIO(new TagReadReq) 125 val tag_resp = Input(Vec(nWays, UInt(encTagBits.W))) 126 val tag_write = DecoupledIO(new TagWriteReq) 127 128 // update state vec in replacement algo 129 val replace_access = ValidIO(new ReplacementAccessBundle) 130 // find the way to be replaced 131 val replace_way = new ReplacementWayReqIO 132 133 val status = new Bundle() { 134 val s0_set = ValidIO(UInt(idxBits.W)) 135 val s1, s2, s3 = ValidIO(new Bundle() { 136 val set = UInt(idxBits.W) 137 val way_en = UInt(nWays.W) 138 }) 139 } 140 141 // lrsc locked block should block probe 142 val lrsc_locked_block = Output(Valid(UInt(PAddrBits.W))) 143 val invalid_resv_set = Input(Bool()) 144 val update_resv_set = Output(Bool()) 145 val block_lr = Output(Bool()) 146 147 // ecc error 148 val error = Output(new L1CacheErrorInfo()) 149 }) 150 151 // meta array is made of regs, so meta write or read should always be ready 152 assert(RegNext(io.meta_read.ready)) 153 assert(RegNext(io.meta_write.ready)) 154 155 val s1_s0_set_conflict, s2_s0_set_conlict, s3_s0_set_conflict = Wire(Bool()) 156 val set_conflict = s1_s0_set_conflict || s2_s0_set_conlict || s3_s0_set_conflict 157 // check sbuffer store req set_conflict in parallel with req arbiter 158 // it will speed up the generation of store_req.ready, which is in crit. path 159 val s1_s0_set_conflict_store, s2_s0_set_conlict_store, s3_s0_set_conflict_store = Wire(Bool()) 160 val store_set_conflict = s1_s0_set_conflict_store || s2_s0_set_conlict_store || s3_s0_set_conflict_store 161 val s1_ready, s2_ready, s3_ready = Wire(Bool()) 162 163 // convert store req to main pipe req, and select a req from store and probe 164 val store_req = Wire(DecoupledIO(new MainPipeReq)) 165 store_req.bits := (new MainPipeReq).convertStoreReq(io.store_req.bits) 166 store_req.valid := io.store_req.valid 167 io.store_req.ready := store_req.ready 168 169 // s0: read meta and tag 170 val req = Wire(DecoupledIO(new MainPipeReq)) 171 arbiter( 172 in = Seq( 173 io.probe_req, 174 io.replace_req, 175 store_req, // Note: store_req.ready is now manually assigned for better timing 176 io.atomic_req 177 ), 178 out = req, 179 name = Some("main_pipe_req") 180 ) 181 182 val store_idx = get_idx(io.store_req.bits.vaddr) 183 // manually assign store_req.ready for better timing 184 // now store_req set conflict check is done in parallel with req arbiter 185 store_req.ready := io.meta_read.ready && io.tag_read.ready && s1_ready && !store_set_conflict && 186 !io.probe_req.valid && !io.replace_req.valid 187 val s0_req = req.bits 188 val s0_idx = get_idx(s0_req.vaddr) 189 val s0_need_tag = io.tag_read.valid 190 val s0_can_go = io.meta_read.ready && io.tag_read.ready && s1_ready && !set_conflict 191 val s0_fire = req.valid && s0_can_go 192 193 val bank_write = VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, s0_req.store_mask).orR)).asUInt 194 val bank_full_write = VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, s0_req.store_mask).andR)).asUInt 195 val banks_full_overwrite = bank_full_write.andR 196 197 val banked_store_rmask = bank_write & ~bank_full_write 198 val banked_full_rmask = ~0.U(DCacheBanks.W) 199 val banked_none_rmask = 0.U(DCacheBanks.W) 200 201 val store_need_data = !s0_req.probe && s0_req.isStore && banked_store_rmask.orR 202 val probe_need_data = s0_req.probe 203 val amo_need_data = !s0_req.probe && s0_req.isAMO 204 val miss_need_data = s0_req.miss 205 val replace_need_data = s0_req.replace 206 207 val banked_need_data = store_need_data || probe_need_data || amo_need_data || miss_need_data || replace_need_data 208 209 val s0_banked_rmask = Mux(store_need_data, banked_store_rmask, 210 Mux(probe_need_data || amo_need_data || miss_need_data || replace_need_data, 211 banked_full_rmask, 212 banked_none_rmask 213 )) 214 215 // generate wmask here and use it in stage 2 216 val banked_store_wmask = bank_write 217 val banked_full_wmask = ~0.U(DCacheBanks.W) 218 val banked_none_wmask = 0.U(DCacheBanks.W) 219 220 // s1: read data 221 val s1_valid = RegInit(false.B) 222 val s1_need_data = RegEnable(banked_need_data, s0_fire) 223 val s1_req = RegEnable(s0_req, s0_fire) 224 val s1_banked_rmask = RegEnable(s0_banked_rmask, s0_fire) 225 val s1_banked_store_wmask = RegEnable(banked_store_wmask, s0_fire) 226 val s1_need_tag = RegEnable(s0_need_tag, s0_fire) 227 val s1_can_go = s2_ready && (io.data_read.ready || !s1_need_data) 228 val s1_fire = s1_valid && s1_can_go 229 val s1_idx = get_idx(s1_req.vaddr) 230 when (s0_fire) { 231 s1_valid := true.B 232 }.elsewhen (s1_fire) { 233 s1_valid := false.B 234 } 235 s1_ready := !s1_valid || s1_can_go 236 s1_s0_set_conflict := s1_valid && s0_idx === s1_idx 237 s1_s0_set_conflict_store := s1_valid && store_idx === s1_idx 238 239 val meta_resp = Wire(Vec(nWays, (new Meta).asUInt())) 240 val tag_resp = Wire(Vec(nWays, UInt(tagBits.W))) 241 val ecc_resp = Wire(Vec(nWays, UInt(eccTagBits.W))) 242 meta_resp := Mux(RegNext(s0_fire), VecInit(io.meta_resp.map(_.asUInt)), RegNext(meta_resp)) 243 tag_resp := Mux(RegNext(s0_fire), VecInit(io.tag_resp.map(r => r(tagBits - 1, 0))), RegNext(tag_resp)) 244 ecc_resp := Mux(RegNext(s0_fire), VecInit(io.tag_resp.map(r => r(encTagBits - 1, tagBits))), RegNext(ecc_resp)) 245 val enc_tag_resp = Wire(io.tag_resp.cloneType) 246 enc_tag_resp := Mux(RegNext(s0_fire), io.tag_resp, RegNext(enc_tag_resp)) 247 248 def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f)) 249 val s1_tag_eq_way = wayMap((w: Int) => tag_resp(w) === get_tag(s1_req.addr)).asUInt 250 val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && Meta(meta_resp(w)).coh.isValid()).asUInt 251 val s1_tag_match = s1_tag_match_way.orR 252 253 val s1_hit_tag = Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap(w => tag_resp(w))), get_tag(s1_req.addr)) 254 val s1_hit_coh = ClientMetadata(Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap(w => meta_resp(w))), 0.U)) 255 val s1_encTag = Mux1H(s1_tag_match_way, wayMap((w: Int) => enc_tag_resp(w))) 256 val s1_flag_error = Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap(w => io.error_flag_resp(w))), false.B) 257 val s1_tag_error = dcacheParameters.tagCode.decode(s1_encTag).error && s1_need_tag 258 val s1_l2_error = s1_req.error 259 260 // replacement policy 261 val s1_repl_way_en = WireInit(0.U(nWays.W)) 262 s1_repl_way_en := Mux(RegNext(s0_fire), UIntToOH(io.replace_way.way), RegNext(s1_repl_way_en)) 263 val s1_repl_tag = Mux1H(s1_repl_way_en, wayMap(w => tag_resp(w))) 264 val s1_repl_coh = Mux1H(s1_repl_way_en, wayMap(w => meta_resp(w))).asTypeOf(new ClientMetadata) 265 val s1_miss_tag = Mux1H(s1_req.miss_way_en, wayMap(w => tag_resp(w))) 266 val s1_miss_coh = Mux1H(s1_req.miss_way_en, wayMap(w => meta_resp(w))).asTypeOf(new ClientMetadata) 267 268 val s1_repl_way_raw = WireInit(0.U(log2Up(nWays).W)) 269 s1_repl_way_raw := Mux(RegNext(s0_fire), io.replace_way.way, RegNext(s1_repl_way_raw)) 270 271 val s1_need_replacement = (s1_req.miss || s1_req.isStore && !s1_req.probe) && !s1_tag_match 272 val s1_way_en = Mux( 273 s1_req.replace, 274 s1_req.replace_way_en, 275 Mux( 276 s1_req.miss, 277 s1_req.miss_way_en, 278 Mux( 279 s1_need_replacement, 280 s1_repl_way_en, 281 s1_tag_match_way 282 ) 283 ) 284 ) 285 assert(!RegNext(s1_fire && PopCount(s1_way_en) > 1.U)) 286 val s1_tag = Mux( 287 s1_req.replace, 288 get_tag(s1_req.addr), 289 Mux( 290 s1_req.miss, 291 s1_miss_tag, 292 Mux(s1_need_replacement, s1_repl_tag, s1_hit_tag) 293 ) 294 ) 295 val s1_coh = Mux( 296 s1_req.replace, 297 Mux1H(s1_req.replace_way_en, meta_resp.map(ClientMetadata(_))), 298 Mux( 299 s1_req.miss, 300 s1_miss_coh, 301 Mux(s1_need_replacement, s1_repl_coh, s1_hit_coh) 302 ) 303 ) 304 305 val s1_has_permission = s1_hit_coh.onAccess(s1_req.cmd)._1 306 val s1_hit = s1_tag_match && s1_has_permission 307 val s1_pregen_can_go_to_mq = !s1_req.replace && !s1_req.probe && !s1_req.miss && (s1_req.isStore || s1_req.isAMO) && !s1_hit 308 309 // s2: select data, return resp if this is a store miss 310 val s2_valid = RegInit(false.B) 311 val s2_req = RegEnable(s1_req, s1_fire) 312 val s2_tag_match = RegEnable(s1_tag_match, s1_fire) 313 val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_fire) 314 val s2_hit_coh = RegEnable(s1_hit_coh, s1_fire) 315 val (s2_has_permission, _, s2_new_hit_coh) = s2_hit_coh.onAccess(s2_req.cmd) 316 317 val s2_repl_tag = RegEnable(s1_repl_tag, s1_fire) 318 val s2_repl_coh = RegEnable(s1_repl_coh, s1_fire) 319 val s2_repl_way_en = RegEnable(s1_repl_way_en, s1_fire) 320 val s2_need_replacement = RegEnable(s1_need_replacement, s1_fire) 321 val s2_need_data = RegEnable(s1_need_data, s1_fire) 322 val s2_idx = get_idx(s2_req.vaddr) 323 val s2_way_en = RegEnable(s1_way_en, s1_fire) 324 val s2_tag = RegEnable(s1_tag, s1_fire) 325 val s2_coh = RegEnable(s1_coh, s1_fire) 326 val s2_banked_store_wmask = RegEnable(s1_banked_store_wmask, s1_fire) 327 val s2_flag_error = RegEnable(s1_flag_error, s1_fire) 328 val s2_tag_error = RegEnable(s1_tag_error, s1_fire) 329 val s2_l2_error = s2_req.error 330 // s2_data_error will be reported by data array 331 val s2_data_error = io.readline_error && s2_need_data && s2_coh.state =/= ClientStates.Nothing 332 val s2_error = s2_flag_error || s2_tag_error || s2_data_error || s2_l2_error 333 334 val s2_hit = s2_tag_match && s2_has_permission 335 val s2_amo_hit = s2_hit && !s2_req.probe && !s2_req.miss && s2_req.isAMO 336 val s2_store_hit = s2_hit && !s2_req.probe && !s2_req.miss && s2_req.isStore 337 338 s2_s0_set_conlict := s2_valid && s0_idx === s2_idx 339 s2_s0_set_conlict_store := s2_valid && store_idx === s2_idx 340 341 // For a store req, it either hits and goes to s3, or miss and enter miss queue immediately 342 val s2_can_go_to_s3 = (s2_req.replace || s2_req.probe || s2_req.miss || (s2_req.isStore || s2_req.isAMO) && s2_hit) && s3_ready 343 val s2_can_go_to_mq = RegEnable(s1_pregen_can_go_to_mq, s1_fire) 344 assert(RegNext(!(s2_valid && s2_can_go_to_s3 && s2_can_go_to_mq))) 345 val s2_can_go = s2_can_go_to_s3 || s2_can_go_to_mq 346 val s2_fire = s2_valid && s2_can_go 347 val s2_fire_to_s3 = s2_valid && s2_can_go_to_s3 348 when (s1_fire) { 349 s2_valid := true.B 350 }.elsewhen (s2_fire) { 351 s2_valid := false.B 352 } 353 s2_ready := !s2_valid || s2_can_go 354 val replay = !io.miss_req.ready 355 356 val data_resp = Wire(io.data_resp.cloneType) 357 data_resp := Mux(RegNext(s1_fire), io.data_resp, RegNext(data_resp)) 358 val s2_store_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W))) 359 360 def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = { 361 val full_wmask = FillInterleaved(8, wmask) 362 ((~full_wmask & old_data) | (full_wmask & new_data)) 363 } 364 365 val s2_data = WireInit(VecInit((0 until DCacheBanks).map(i => { 366 data_resp(i).raw_data 367 }))) 368 369 for (i <- 0 until DCacheBanks) { 370 val old_data = s2_data(i) 371 val new_data = get_data_of_bank(i, s2_req.store_data) 372 // for amo hit, we should use read out SRAM data 373 // do not merge with store data 374 val wmask = Mux(s2_amo_hit, 0.U(wordBytes.W), get_mask_of_bank(i, s2_req.store_mask)) 375 s2_store_data_merged(i) := mergePutData(old_data, new_data, wmask) 376 } 377 378 val s2_data_word = s2_store_data_merged(s2_req.word_idx) 379 380 // s3: write data, meta and tag 381 val s3_valid = RegInit(false.B) 382 val s3_req = RegEnable(s2_req, s2_fire_to_s3) 383 val s3_idx = get_idx(s3_req.vaddr) 384 val s3_tag = RegEnable(s2_tag, s2_fire_to_s3) 385 val s3_tag_match = RegEnable(s2_tag_match, s2_fire_to_s3) 386 val s3_coh = RegEnable(s2_coh, s2_fire_to_s3) 387 val s3_hit = RegEnable(s2_hit, s2_fire_to_s3) 388 val s3_amo_hit = RegEnable(s2_amo_hit, s2_fire_to_s3) 389 val s3_store_hit = RegEnable(s2_store_hit, s2_fire_to_s3) 390 val s3_hit_coh = RegEnable(s2_hit_coh, s2_fire_to_s3) 391 val s3_new_hit_coh = RegEnable(s2_new_hit_coh, s2_fire_to_s3) 392 val s3_way_en = RegEnable(s2_way_en, s2_fire_to_s3) 393 val s3_banked_store_wmask = RegEnable(s2_banked_store_wmask, s2_fire_to_s3) 394 val s3_store_data_merged = RegEnable(s2_store_data_merged, s2_fire_to_s3) 395 val s3_data_word = RegEnable(s2_data_word, s2_fire_to_s3) 396 val s3_data = RegEnable(s2_data, s2_fire_to_s3) 397 val s3_l2_error = s3_req.error 398 val s3_error = RegEnable(s2_error, s2_fire_to_s3) 399 val (probe_has_dirty_data, probe_shrink_param, probe_new_coh) = s3_coh.onProbe(s3_req.probe_param) 400 val s3_need_replacement = RegEnable(s2_need_replacement, s2_fire_to_s3) 401 402 val miss_update_meta = s3_req.miss 403 val probe_update_meta = s3_req.probe && s3_tag_match && s3_coh =/= probe_new_coh 404 val store_update_meta = s3_req.isStore && !s3_req.probe && s3_hit_coh =/= s3_new_hit_coh 405 val amo_update_meta = s3_req.isAMO && !s3_req.probe && s3_hit_coh =/= s3_new_hit_coh 406 val amo_wait_amoalu = s3_req.isAMO && s3_req.cmd =/= M_XLR && s3_req.cmd =/= M_XSC 407 val update_meta = (miss_update_meta || probe_update_meta || store_update_meta || amo_update_meta) && !s3_req.replace 408 409 def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = { 410 val c = categorize(cmd) 411 MuxLookup(Cat(c, param, dirty), Nothing, Seq( 412 //(effect param) -> (next) 413 Cat(rd, toB, false.B) -> Branch, 414 Cat(rd, toB, true.B) -> Branch, 415 Cat(rd, toT, false.B) -> Trunk, 416 Cat(rd, toT, true.B) -> Dirty, 417 Cat(wi, toT, false.B) -> Trunk, 418 Cat(wi, toT, true.B) -> Dirty, 419 Cat(wr, toT, false.B) -> Dirty, 420 Cat(wr, toT, true.B) -> Dirty)) 421 } 422 val miss_new_coh = ClientMetadata(missCohGen(s3_req.cmd, s3_req.miss_param, s3_req.miss_dirty)) 423 424 val new_coh = Mux( 425 miss_update_meta, 426 miss_new_coh, 427 Mux( 428 probe_update_meta, 429 probe_new_coh, 430 Mux( 431 store_update_meta || amo_update_meta, 432 s3_new_hit_coh, 433 ClientMetadata.onReset 434 ) 435 ) 436 ) 437 438 // LR, SC and AMO 439 val debug_sc_fail_addr = RegInit(0.U) 440 val debug_sc_fail_cnt = RegInit(0.U(8.W)) 441 442 val lrsc_count = RegInit(0.U(log2Ceil(LRSCCycles).W)) 443 val lrsc_valid = lrsc_count > LRSCBackOff.U 444 val lrsc_addr = Reg(UInt()) 445 val s3_lr = !s3_req.probe && s3_req.isAMO && s3_req.cmd === M_XLR 446 val s3_sc = !s3_req.probe && s3_req.isAMO && s3_req.cmd === M_XSC 447 val s3_lrsc_addr_match = lrsc_valid && lrsc_addr === get_block_addr(s3_req.addr) 448 val s3_sc_fail = s3_sc && !s3_lrsc_addr_match 449 val s3_sc_resp = Mux(s3_sc_fail, 1.U, 0.U) 450 451 val s3_can_do_amo = (s3_req.miss && !s3_req.probe && s3_req.source === AMO_SOURCE.U) || s3_amo_hit 452 val s3_can_do_amo_write = s3_can_do_amo && isWrite(s3_req.cmd) && !s3_sc_fail 453 454 when (s3_valid && (s3_lr || s3_sc)) { 455 when (s3_can_do_amo && s3_lr) { 456 lrsc_count := (LRSCCycles - 1).U 457 lrsc_addr := get_block_addr(s3_req.addr) 458 } .otherwise { 459 lrsc_count := 0.U 460 } 461 } .elsewhen (lrsc_count > 0.U) { 462 lrsc_count := lrsc_count - 1.U 463 } 464 465 io.lrsc_locked_block.valid := lrsc_valid 466 io.lrsc_locked_block.bits := lrsc_addr 467 io.block_lr := RegNext(lrsc_count > 0.U) 468 469 // When we update update_resv_set, block all probe req in the next cycle 470 // It should give Probe reservation set addr compare an independent cycle, 471 // which will lead to better timing 472 io.update_resv_set := s3_valid && s3_lr && s3_can_do_amo 473 474 // when we release this block, 475 // we invalidate this reservation set 476 when (io.invalid_resv_set) { 477 lrsc_count := 0.U 478 } 479 480 when (s3_valid) { 481 when (s3_req.addr === debug_sc_fail_addr) { 482 when (s3_sc_fail) { 483 debug_sc_fail_cnt := debug_sc_fail_cnt + 1.U 484 } .elsewhen (s3_sc) { 485 debug_sc_fail_cnt := 0.U 486 } 487 } .otherwise { 488 when (s3_sc_fail) { 489 debug_sc_fail_addr := s3_req.addr 490 debug_sc_fail_cnt := 1.U 491 XSWarn(s3_sc_fail === 100.U, p"L1DCache failed too many SCs in a row 0x${Hexadecimal(debug_sc_fail_addr)}, check if sth went wrong\n") 492 } 493 } 494 } 495 // assert(debug_sc_fail_cnt < 100.U, "L1DCache failed too many SCs in a row") 496 497 val banked_amo_wmask = UIntToOH(s3_req.word_idx) 498// val banked_wmask = s3_banked_store_wmask 499 val banked_wmask = Mux( 500 s3_req.miss, 501 banked_full_wmask, 502 Mux( 503 s3_store_hit, 504 s3_banked_store_wmask, 505 Mux( 506 s3_can_do_amo_write, 507 banked_amo_wmask, 508 banked_none_wmask 509 ) 510 ) 511 ) 512 val update_data = s3_req.miss || s3_store_hit || s3_can_do_amo_write 513 assert(!(banked_wmask.orR && !update_data)) 514 515 // generate write data 516 // AMO hits 517 val s3_s_amoalu = RegInit(false.B) 518 val do_amoalu = amo_wait_amoalu && s3_valid && !s3_s_amoalu 519 val amoalu = Module(new AMOALU(wordBits)) 520 amoalu.io.mask := s3_req.amo_mask 521 amoalu.io.cmd := s3_req.cmd 522 amoalu.io.lhs := s3_data_word 523 amoalu.io.rhs := s3_req.amo_data 524 525 // merge amo write data 526// val amo_bitmask = FillInterleaved(8, s3_req.amo_mask) 527 val s3_amo_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W))) 528 val s3_sc_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W))) 529 for (i <- 0 until DCacheBanks) { 530 val old_data = s3_store_data_merged(i) 531 val new_data = amoalu.io.out 532 val wmask = Mux( 533 s3_req.word_idx === i.U, 534 ~0.U(wordBytes.W), 535 0.U(wordBytes.W) 536 ) 537 s3_amo_data_merged(i) := mergePutData(old_data, new_data, wmask) 538// s3_sc_data_merged(i) := amo_bitmask & s3_req.amo_data | ~amo_bitmask & old_data 539 s3_sc_data_merged(i) := mergePutData(old_data, s3_req.amo_data, 540 Mux(s3_req.word_idx === i.U && !s3_sc_fail, s3_req.amo_mask, 0.U(wordBytes.W)) 541 ) 542 } 543 val s3_amo_data_merged_reg = RegEnable(s3_amo_data_merged, do_amoalu) 544 when(do_amoalu){ 545 s3_s_amoalu := true.B 546 } 547 548 val miss_wb = s3_req.miss && s3_need_replacement && s3_coh.state =/= ClientStates.Nothing 549 val probe_wb = s3_req.probe 550 val replace_wb = s3_req.replace 551 val need_wb = miss_wb || probe_wb || replace_wb 552 553 val (_, miss_shrink_param, _) = s3_coh.onCacheControl(M_FLUSH) 554 val writeback_param = Mux(probe_wb, probe_shrink_param, miss_shrink_param) 555 val writeback_data = if (dcacheParameters.alwaysReleaseData) { 556 s3_tag_match && s3_req.probe && s3_req.probe_need_data || 557 s3_coh === ClientStates.Dirty || (miss_wb || replace_wb) && s3_coh.state =/= ClientStates.Nothing 558 } else { 559 s3_tag_match && s3_req.probe && s3_req.probe_need_data || s3_coh === ClientStates.Dirty 560 } 561 562 val s3_probe_can_go = s3_req.probe && io.wb.ready && (io.meta_write.ready || !probe_update_meta) 563 val s3_store_can_go = s3_req.isStore && !s3_req.probe && (io.meta_write.ready || !store_update_meta) && (io.data_write.ready || !update_data) 564 val s3_amo_can_go = s3_amo_hit && (io.meta_write.ready || !amo_update_meta) && (io.data_write.ready || !update_data) && (s3_s_amoalu || !amo_wait_amoalu) 565 val s3_miss_can_go = s3_req.miss && 566 (io.meta_write.ready || !amo_update_meta) && 567 (io.data_write.ready || !update_data) && 568 (s3_s_amoalu || !amo_wait_amoalu) && 569 io.tag_write.ready && 570 io.wb.ready 571 val s3_replace_nothing = s3_req.replace && s3_coh.state === ClientStates.Nothing 572 val s3_replace_can_go = s3_req.replace && (s3_replace_nothing || io.wb.ready) 573 val s3_can_go = s3_probe_can_go || s3_store_can_go || s3_amo_can_go || s3_miss_can_go || s3_replace_can_go 574 val s3_update_data_cango = s3_store_can_go || s3_amo_can_go || s3_miss_can_go // used to speed up data_write gen 575 val s3_fire = s3_valid && s3_can_go 576 when (s2_fire_to_s3) { 577 s3_valid := true.B 578 }.elsewhen (s3_fire) { 579 s3_valid := false.B 580 } 581 s3_ready := !s3_valid || s3_can_go 582 s3_s0_set_conflict := s3_valid && s3_idx === s0_idx 583 s3_s0_set_conflict_store := s3_valid && s3_idx === store_idx 584 assert(RegNext(!s3_valid || !(s3_req.isStore && !s3_req.probe) || s3_hit)) // miss store should never come to s3 585 586 when(s3_fire) { 587 s3_s_amoalu := false.B 588 } 589 590 req.ready := s0_can_go 591 592 io.meta_read.valid := req.valid && s1_ready && !set_conflict 593 io.meta_read.bits.idx := get_idx(s0_req.vaddr) 594 io.meta_read.bits.way_en := Mux(s0_req.replace, s0_req.replace_way_en, ~0.U(nWays.W)) 595 596 io.tag_read.valid := req.valid && s1_ready && !set_conflict && !s0_req.replace 597 io.tag_read.bits.idx := get_idx(s0_req.vaddr) 598 io.tag_read.bits.way_en := ~0.U(nWays.W) 599 600 io.data_read.valid := s1_valid && s1_need_data && s2_ready 601 io.data_read.bits.rmask := s1_banked_rmask 602 io.data_read.bits.way_en := s1_way_en 603 io.data_read.bits.addr := s1_req.vaddr 604 605 io.miss_req.valid := s2_valid && s2_can_go_to_mq 606 val miss_req = io.miss_req.bits 607 miss_req := DontCare 608 miss_req.source := s2_req.source 609 miss_req.cmd := s2_req.cmd 610 miss_req.addr := s2_req.addr 611 miss_req.vaddr := s2_req.vaddr 612 miss_req.way_en := Mux(s2_tag_match, s2_tag_match_way, s2_repl_way_en) 613 miss_req.store_data := s2_req.store_data 614 miss_req.store_mask := s2_req.store_mask 615 miss_req.word_idx := s2_req.word_idx 616 miss_req.amo_data := s2_req.amo_data 617 miss_req.amo_mask := s2_req.amo_mask 618 miss_req.req_coh := s2_hit_coh 619 miss_req.replace_coh := s2_repl_coh 620 miss_req.replace_tag := s2_repl_tag 621 miss_req.id := s2_req.id 622 miss_req.cancel := false.B 623 624 io.store_replay_resp.valid := s2_valid && s2_can_go_to_mq && replay && s2_req.isStore 625 io.store_replay_resp.bits.data := DontCare 626 io.store_replay_resp.bits.miss := true.B 627 io.store_replay_resp.bits.replay := true.B 628 io.store_replay_resp.bits.id := s2_req.id 629 630 io.store_hit_resp.valid := s3_valid && s3_store_can_go 631 io.store_hit_resp.bits.data := DontCare 632 io.store_hit_resp.bits.miss := false.B 633 io.store_hit_resp.bits.replay := false.B 634 io.store_hit_resp.bits.id := s3_req.id 635 636 io.release_update.valid := s3_valid && (s3_store_can_go || s3_amo_can_go) && s3_hit && update_data 637 io.release_update.bits.addr := s3_req.addr 638 io.release_update.bits.mask := Mux(s3_store_hit, s3_banked_store_wmask, banked_amo_wmask) 639 io.release_update.bits.data := Mux( 640 amo_wait_amoalu, 641 s3_amo_data_merged_reg, 642 Mux( 643 s3_sc, 644 s3_sc_data_merged, 645 s3_store_data_merged 646 ) 647 ).asUInt 648 649 val atomic_hit_resp = Wire(new AtomicsResp) 650 atomic_hit_resp.data := Mux(s3_sc, s3_sc_resp, s3_data_word) 651 atomic_hit_resp.miss := false.B 652 atomic_hit_resp.miss_id := s3_req.miss_id 653 atomic_hit_resp.error := s3_error 654 atomic_hit_resp.replay := false.B 655 atomic_hit_resp.ack_miss_queue := s3_req.miss 656 atomic_hit_resp.id := lrsc_valid 657 val atomic_replay_resp = Wire(new AtomicsResp) 658 atomic_replay_resp.data := DontCare 659 atomic_replay_resp.miss := true.B 660 atomic_replay_resp.miss_id := DontCare 661 atomic_replay_resp.error := false.B 662 atomic_replay_resp.replay := true.B 663 atomic_replay_resp.ack_miss_queue := false.B 664 atomic_replay_resp.id := DontCare 665 val atomic_replay_resp_valid = s2_valid && s2_can_go_to_mq && replay && s2_req.isAMO 666 val atomic_hit_resp_valid = s3_valid && (s3_amo_can_go || s3_miss_can_go && s3_req.isAMO) 667 io.atomic_resp.valid := atomic_replay_resp_valid || atomic_hit_resp_valid 668 io.atomic_resp.bits := Mux(atomic_replay_resp_valid, atomic_replay_resp, atomic_hit_resp) 669 670 io.replace_resp.valid := s3_fire && s3_req.replace 671 io.replace_resp.bits := s3_req.miss_id 672 673 io.meta_write.valid := s3_fire && update_meta 674 io.meta_write.bits.idx := s3_idx 675 io.meta_write.bits.way_en := s3_way_en 676 io.meta_write.bits.meta.coh := new_coh 677 678 io.error_flag_write.valid := s3_fire && update_meta && s3_l2_error 679 io.error_flag_write.bits.idx := s3_idx 680 io.error_flag_write.bits.way_en := s3_way_en 681 io.error_flag_write.bits.error := s3_l2_error 682 683 io.tag_write.valid := s3_fire && s3_req.miss 684 io.tag_write.bits.idx := s3_idx 685 io.tag_write.bits.way_en := s3_way_en 686 io.tag_write.bits.tag := get_tag(s3_req.addr) 687 688 io.data_write.valid := s3_valid && s3_update_data_cango && update_data 689 io.data_write.bits.way_en := s3_way_en 690 io.data_write.bits.addr := s3_req.vaddr 691 io.data_write.bits.wmask := banked_wmask 692 io.data_write.bits.data := Mux( 693 amo_wait_amoalu, 694 s3_amo_data_merged_reg, 695 Mux( 696 s3_sc, 697 s3_sc_data_merged, 698 s3_store_data_merged 699 ) 700 ) 701 assert(RegNext(!io.meta_write.valid || !s3_req.replace)) 702 assert(RegNext(!io.tag_write.valid || !s3_req.replace)) 703 assert(RegNext(!io.data_write.valid || !s3_req.replace)) 704 705 io.wb.valid := s3_valid && ( 706 // replace 707 s3_req.replace && !s3_replace_nothing || 708 // probe can go to wbq 709 s3_req.probe && (io.meta_write.ready || !probe_update_meta) || 710 // amo miss can go to wbq 711 s3_req.miss && 712 (io.meta_write.ready || !amo_update_meta) && 713 (io.data_write.ready || !update_data) && 714 (s3_s_amoalu || !amo_wait_amoalu) && 715 io.tag_write.ready 716 ) && need_wb 717 io.wb.bits.addr := get_block_addr(Cat(s3_tag, get_untag(s3_req.vaddr))) 718 io.wb.bits.param := writeback_param 719 io.wb.bits.voluntary := s3_req.miss || s3_req.replace 720 io.wb.bits.hasData := writeback_data 721 io.wb.bits.dirty := s3_coh === ClientStates.Dirty 722 io.wb.bits.data := s3_data.asUInt() 723 io.wb.bits.delay_release := s3_req.replace 724 io.wb.bits.miss_id := s3_req.miss_id 725 726 io.replace_access.valid := RegNext(s1_fire && (s1_req.isAMO || s1_req.isStore) && !s1_req.probe) 727 io.replace_access.bits.set := s2_idx 728 io.replace_access.bits.way := RegNext(OHToUInt(s1_way_en)) 729 730 io.replace_way.set.valid := RegNext(s0_fire) 731 io.replace_way.set.bits := s1_idx 732 733 // TODO: consider block policy of a finer granularity 734 io.status.s0_set.valid := req.valid 735 io.status.s0_set.bits := get_idx(s0_req.vaddr) 736 io.status.s1.valid := s1_valid 737 io.status.s1.bits.set := s1_idx 738 io.status.s1.bits.way_en := s1_way_en 739 io.status.s2.valid := s2_valid && !s2_req.replace 740 io.status.s2.bits.set := s2_idx 741 io.status.s2.bits.way_en := s2_way_en 742 io.status.s3.valid := s3_valid && !s3_req.replace 743 io.status.s3.bits.set := s3_idx 744 io.status.s3.bits.way_en := s3_way_en 745 746 io.error := 0.U.asTypeOf(new L1CacheErrorInfo()) 747 io.error.report_to_beu := RegNext((s2_tag_error || s2_data_error) && s2_fire) 748 io.error.paddr := RegNext(s2_req.addr) 749 io.error.source.tag := RegNext(s2_tag_error) 750 io.error.source.data := RegNext(s2_data_error) 751 io.error.source.l2 := RegNext(s2_flag_error || s2_l2_error) 752 io.error.opType.store := RegNext(s2_req.isStore && !s2_req.probe) 753 io.error.opType.probe := RegNext(s2_req.probe) 754 io.error.opType.release := RegNext(s2_req.replace) 755 io.error.opType.atom := RegNext(s2_req.isAMO && !s2_req.probe) 756 io.error.valid := RegNext(s2_error && s2_fire) 757 758 val perfEvents = Seq( 759 ("dcache_mp_req ", s0_fire ), 760 ("dcache_mp_total_penalty", PopCount(VecInit(Seq(s0_fire, s1_valid, s2_valid, s3_valid)))) 761 ) 762 generatePerfEvent() 763} 764