1/*************************************************************************************** 2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC) 3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences 4* Copyright (c) 2020-2021 Peng Cheng Laboratory 5* 6* XiangShan is licensed under Mulan PSL v2. 7* You can use this software according to the terms and conditions of the Mulan PSL v2. 8* You may obtain a copy of Mulan PSL v2 at: 9* http://license.coscl.org.cn/MulanPSL2 10* 11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 14* 15* See the Mulan PSL v2 for more details. 16***************************************************************************************/ 17 18package xiangshan.mem 19 20import org.chipsalliance.cde.config.Parameters 21import chisel3._ 22import chisel3.util._ 23import xiangshan._ 24import utils._ 25import utility._ 26import xiangshan.cache._ 27import xiangshan.mem._ 28import xiangshan.backend.Bundles.DynInst 29import difftest._ 30import freechips.rocketchip.util._ 31import xiangshan.backend.fu.FuType._ 32 33class SbufferFlushBundle extends Bundle { 34 val valid = Output(Bool()) 35 val empty = Input(Bool()) 36} 37 38trait HasSbufferConst extends HasXSParameter { 39 val EvictCycles = 1 << 20 40 val SbufferReplayDelayCycles = 16 41 require(isPow2(EvictCycles)) 42 val EvictCountBits = log2Up(EvictCycles+1) 43 val MissqReplayCountBits = log2Up(SbufferReplayDelayCycles) + 1 44 45 // dcache write hit resp has 2 sources 46 // refill pipe resp and main pipe resp (fixed:only main pipe resp) 47 // val NumDcacheWriteResp = 2 // hardcoded 48 val NumDcacheWriteResp = 1 // hardcoded 49 50 val SbufferIndexWidth: Int = log2Up(StoreBufferSize) 51 // paddr = ptag + offset 52 val CacheLineBytes: Int = CacheLineSize / 8 53 val CacheLineWords: Int = CacheLineBytes / DataBytes 54 val OffsetWidth: Int = log2Up(CacheLineBytes) 55 val WordsWidth: Int = log2Up(CacheLineWords) 56 val PTagWidth: Int = PAddrBits - OffsetWidth 57 val VTagWidth: Int = VAddrBits - OffsetWidth 58 val WordOffsetWidth: Int = PAddrBits - WordsWidth 59 60 val CacheLineVWords: Int = CacheLineBytes / VDataBytes 61 val VWordsWidth: Int = log2Up(CacheLineVWords) 62 val VWordWidth: Int = log2Up(VDataBytes) 63 val VWordOffsetWidth: Int = PAddrBits - VWordWidth 64} 65 66class SbufferEntryState (implicit p: Parameters) extends SbufferBundle { 67 val state_valid = Bool() // this entry is active 68 val state_inflight = Bool() // sbuffer is trying to write this entry to dcache 69 val w_timeout = Bool() // with timeout resp, waiting for resend store pipeline req timeout 70 val w_sameblock_inflight = Bool() // same cache block dcache req is inflight 71 72 def isInvalid(): Bool = !state_valid 73 def isValid(): Bool = state_valid 74 def isActive(): Bool = state_valid && !state_inflight 75 def isInflight(): Bool = state_inflight 76 def isDcacheReqCandidate(): Bool = state_valid && !state_inflight && !w_sameblock_inflight 77} 78 79class SbufferBundle(implicit p: Parameters) extends XSBundle with HasSbufferConst 80 81class DataWriteReq(implicit p: Parameters) extends SbufferBundle { 82 // univerisal writemask 83 val wvec = UInt(StoreBufferSize.W) 84 // 2 cycle update 85 val mask = UInt((VLEN/8).W) 86 val data = UInt(VLEN.W) 87 val vwordOffset = UInt(VWordOffsetWidth.W) 88 val wline = Bool() // write full cacheline 89} 90 91class MaskFlushReq(implicit p: Parameters) extends SbufferBundle { 92 // univerisal writemask 93 val wvec = UInt(StoreBufferSize.W) 94} 95 96class SbufferData(implicit p: Parameters) extends XSModule with HasSbufferConst { 97 val io = IO(new Bundle(){ 98 // update data and mask when alloc or merge 99 val writeReq = Vec(EnsbufferWidth, Flipped(ValidIO(new DataWriteReq))) 100 // clean mask when deq 101 val maskFlushReq = Vec(NumDcacheWriteResp, Flipped(ValidIO(new MaskFlushReq))) 102 val dataOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W))))) 103 val maskOut = Output(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, Bool())))) 104 }) 105 106 val data = Reg(Vec(StoreBufferSize, Vec(CacheLineVWords, Vec(VDataBytes, UInt(8.W))))) 107 // val mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool())))) 108 val mask = RegInit( 109 VecInit(Seq.fill(StoreBufferSize)( 110 VecInit(Seq.fill(CacheLineVWords)( 111 VecInit(Seq.fill(VDataBytes)(false.B)) 112 )) 113 )) 114 ) 115 116 // 2 cycle line mask clean 117 for(line <- 0 until StoreBufferSize){ 118 val line_mask_clean_flag = GatedValidRegNext( 119 io.maskFlushReq.map(a => a.valid && a.bits.wvec(line)).reduce(_ || _) 120 ) 121 line_mask_clean_flag.suggestName("line_mask_clean_flag_"+line) 122 when(line_mask_clean_flag){ 123 for(word <- 0 until CacheLineVWords){ 124 for(byte <- 0 until VDataBytes){ 125 mask(line)(word)(byte) := false.B 126 } 127 } 128 } 129 } 130 131 // 2 cycle data / mask update 132 for(i <- 0 until EnsbufferWidth) { 133 val req = io.writeReq(i) 134 for(line <- 0 until StoreBufferSize){ 135 val sbuffer_in_s1_line_wen = req.valid && req.bits.wvec(line) 136 val sbuffer_in_s2_line_wen = GatedValidRegNext(sbuffer_in_s1_line_wen) 137 val line_write_buffer_data = RegEnable(req.bits.data, sbuffer_in_s1_line_wen) 138 val line_write_buffer_wline = RegEnable(req.bits.wline, sbuffer_in_s1_line_wen) 139 val line_write_buffer_mask = RegEnable(req.bits.mask, sbuffer_in_s1_line_wen) 140 val line_write_buffer_offset = RegEnable(req.bits.vwordOffset(VWordsWidth-1, 0), sbuffer_in_s1_line_wen) 141 sbuffer_in_s1_line_wen.suggestName("sbuffer_in_s1_line_wen_"+line) 142 sbuffer_in_s2_line_wen.suggestName("sbuffer_in_s2_line_wen_"+line) 143 line_write_buffer_data.suggestName("line_write_buffer_data_"+line) 144 line_write_buffer_wline.suggestName("line_write_buffer_wline_"+line) 145 line_write_buffer_mask.suggestName("line_write_buffer_mask_"+line) 146 line_write_buffer_offset.suggestName("line_write_buffer_offset_"+line) 147 for(word <- 0 until CacheLineVWords){ 148 for(byte <- 0 until VDataBytes){ 149 val write_byte = sbuffer_in_s2_line_wen && ( 150 line_write_buffer_mask(byte) && (line_write_buffer_offset === word.U) || 151 line_write_buffer_wline 152 ) 153 when(write_byte){ 154 data(line)(word)(byte) := line_write_buffer_data(byte*8+7, byte*8) 155 mask(line)(word)(byte) := true.B 156 } 157 } 158 } 159 } 160 } 161 162 // 1 cycle line mask clean 163 // for(i <- 0 until EnsbufferWidth) { 164 // val req = io.writeReq(i) 165 // when(req.valid){ 166 // for(line <- 0 until StoreBufferSize){ 167 // when( 168 // req.bits.wvec(line) && 169 // req.bits.cleanMask 170 // ){ 171 // for(word <- 0 until CacheLineWords){ 172 // for(byte <- 0 until DataBytes){ 173 // mask(line)(word)(byte) := false.B 174 // val debug_last_cycle_write_byte = RegNext(req.valid && req.bits.wvec(line) && ( 175 // req.bits.mask(byte) && (req.bits.wordOffset(WordsWidth-1, 0) === word.U) || 176 // req.bits.wline 177 // )) 178 // assert(!debug_last_cycle_write_byte) 179 // } 180 // } 181 // } 182 // } 183 // } 184 // } 185 186 io.dataOut := data 187 io.maskOut := mask 188} 189 190class Sbuffer(implicit p: Parameters) 191 extends DCacheModule 192 with HasSbufferConst 193 with HasPerfEvents { 194 val io = IO(new Bundle() { 195 val hartId = Input(UInt(hartIdLen.W)) 196 val in = Vec(EnsbufferWidth, Flipped(Decoupled(new DCacheWordReqWithVaddrAndPfFlag))) //Todo: store logic only support Width == 2 now 197 val vecDifftestInfo = Vec(EnsbufferWidth, Flipped(Decoupled(new DynInst))) 198 val dcache = Flipped(new DCacheToSbufferIO) 199 val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 200 val sqempty = Input(Bool()) 201 val sbempty = Output(Bool()) 202 val flush = Flipped(new SbufferFlushBundle) 203 val csrCtrl = Flipped(new CustomCSRCtrlIO) 204 val store_prefetch = Vec(StorePipelineWidth, DecoupledIO(new StorePrefetchReq)) // to dcache 205 val memSetPattenDetected = Input(Bool()) 206 val force_write = Input(Bool()) 207 }) 208 209 val dataModule = Module(new SbufferData) 210 dataModule.io.writeReq <> DontCare 211 val prefetcher = Module(new StorePfWrapper()) 212 val writeReq = dataModule.io.writeReq 213 214 val ptag = Reg(Vec(StoreBufferSize, UInt(PTagWidth.W))) 215 val vtag = Reg(Vec(StoreBufferSize, UInt(VTagWidth.W))) 216 val debug_mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool())))) 217 val waitInflightMask = Reg(Vec(StoreBufferSize, UInt(StoreBufferSize.W))) 218 val data = dataModule.io.dataOut 219 val mask = dataModule.io.maskOut 220 val stateVec = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U.asTypeOf(new SbufferEntryState)))) 221 val cohCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(EvictCountBits.W)))) 222 val missqReplayCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(MissqReplayCountBits.W)))) 223 224 val sbuffer_out_s0_fire = Wire(Bool()) 225 226 /* 227 idle --[flush] --> drain --[buf empty]--> idle 228 --[buf full]--> replace --[dcache resp]--> idle 229 */ 230 // x_drain_all: drain store queue and sbuffer 231 // x_drain_sbuffer: drain sbuffer only, block store queue to sbuffer write 232 val x_idle :: x_replace :: x_drain_all :: x_drain_sbuffer :: Nil = Enum(4) 233 def needDrain(state: UInt): Bool = 234 state(1) 235 val sbuffer_state = RegInit(x_idle) 236 237 // ---------------------- Store Enq Sbuffer --------------------- 238 239 def getPTag(pa: UInt): UInt = 240 pa(PAddrBits - 1, PAddrBits - PTagWidth) 241 242 def getVTag(va: UInt): UInt = 243 va(VAddrBits - 1, VAddrBits - VTagWidth) 244 245 def getWord(pa: UInt): UInt = 246 pa(PAddrBits-1, 3) 247 248 def getVWord(pa: UInt): UInt = 249 pa(PAddrBits-1, 4) 250 251 def getWordOffset(pa: UInt): UInt = 252 pa(OffsetWidth-1, 3) 253 254 def getVWordOffset(pa: UInt): UInt = 255 pa(OffsetWidth-1, 4) 256 257 def getAddr(ptag: UInt): UInt = 258 Cat(ptag, 0.U((PAddrBits - PTagWidth).W)) 259 260 def getByteOffset(offect: UInt): UInt = 261 Cat(offect(OffsetWidth - 1, 3), 0.U(3.W)) 262 263 def isOneOf(key: UInt, seq: Seq[UInt]): Bool = 264 if(seq.isEmpty) false.B else Cat(seq.map(_===key)).orR 265 266 def widthMap[T <: Data](f: Int => T) = (0 until StoreBufferSize) map f 267 268 // sbuffer entry count 269 270 val plru = new ValidPseudoLRU(StoreBufferSize) 271 val accessIdx = Wire(Vec(EnsbufferWidth + 1, Valid(UInt(SbufferIndexWidth.W)))) 272 273 val candidateVec = VecInit(stateVec.map(s => s.isDcacheReqCandidate())) 274 275 val replaceAlgoIdx = plru.way(candidateVec.reverse)._2 276 val replaceAlgoNotDcacheCandidate = !stateVec(replaceAlgoIdx).isDcacheReqCandidate() 277 278 assert(!(candidateVec.asUInt.orR && replaceAlgoNotDcacheCandidate), "we have way to select, but replace algo selects invalid way") 279 280 val replaceIdx = replaceAlgoIdx 281 plru.access(accessIdx) 282 283 //-------------------------cohCount----------------------------- 284 // insert and merge: cohCount=0 285 // every cycle cohCount+=1 286 // if cohCount(EvictCountBits-1)==1, evict 287 val cohTimeOutMask = VecInit(widthMap(i => cohCount(i)(EvictCountBits - 1) && stateVec(i).isActive())) 288 val (cohTimeOutIdx, cohHasTimeOut) = PriorityEncoderWithFlag(cohTimeOutMask) 289 val cohTimeOutOH = PriorityEncoderOH(cohTimeOutMask) 290 val missqReplayTimeOutMask = VecInit(widthMap(i => missqReplayCount(i)(MissqReplayCountBits - 1) && stateVec(i).w_timeout)) 291 val (missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) = PriorityEncoderWithFlag(missqReplayTimeOutMask) 292 val missqReplayHasTimeOut = GatedValidRegNext(missqReplayHasTimeOutGen) && !GatedValidRegNext(sbuffer_out_s0_fire) 293 val missqReplayTimeOutIdx = RegEnable(missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) 294 295 //-------------------------sbuffer enqueue----------------------------- 296 297 // Now sbuffer enq logic is divided into 3 stages: 298 299 // sbuffer_in_s0: 300 // * read data and meta from store queue 301 // * store them in 2 entry fifo queue 302 303 // sbuffer_in_s1: 304 // * read data and meta from fifo queue 305 // * update sbuffer meta (vtag, ptag, flag) 306 // * prevert that line from being sent to dcache (add a block condition) 307 // * prepare cacheline level write enable signal, RegNext() data and mask 308 309 // sbuffer_in_s2: 310 // * use cacheline level buffer to update sbuffer data and mask 311 // * remove dcache write block (if there is) 312 313 val activeMask = VecInit(stateVec.map(s => s.isActive())) 314 val validMask = VecInit(stateVec.map(s => s.isValid())) 315 val drainIdx = PriorityEncoder(activeMask) 316 317 val inflightMask = VecInit(stateVec.map(s => s.isInflight())) 318 319 val inptags = io.in.map(in => getPTag(in.bits.addr)) 320 val invtags = io.in.map(in => getVTag(in.bits.vaddr)) 321 val sameTag = inptags(0) === inptags(1) && io.in(0).valid && io.in(1).valid && io.in(0).bits.vecValid && io.in(1).bits.vecValid 322 val firstWord = getVWord(io.in(0).bits.addr) 323 val secondWord = getVWord(io.in(1).bits.addr) 324 // merge condition 325 val mergeMask = Wire(Vec(EnsbufferWidth, Vec(StoreBufferSize, Bool()))) 326 val mergeIdx = mergeMask.map(PriorityEncoder(_)) // avoid using mergeIdx for better timing 327 val canMerge = mergeMask.map(ParallelOR(_)) 328 val mergeVec = mergeMask.map(_.asUInt) 329 330 for(i <- 0 until EnsbufferWidth){ 331 mergeMask(i) := widthMap(j => 332 inptags(i) === ptag(j) && activeMask(j) 333 ) 334 assert(!(PopCount(mergeMask(i).asUInt) > 1.U && io.in(i).fire && io.in(i).bits.vecValid)) 335 } 336 337 // insert condition 338 // firstInsert: the first invalid entry 339 // if first entry canMerge or second entry has the same ptag with the first entry, 340 // secondInsert equal the first invalid entry, otherwise, the second invalid entry 341 val invalidMask = VecInit(stateVec.map(s => s.isInvalid())) 342 val evenInvalidMask = GetEvenBits(invalidMask.asUInt) 343 val oddInvalidMask = GetOddBits(invalidMask.asUInt) 344 345 def getFirstOneOH(input: UInt): UInt = { 346 assert(input.getWidth > 1) 347 val output = WireInit(VecInit(input.asBools)) 348 (1 until input.getWidth).map(i => { 349 output(i) := !input(i - 1, 0).orR && input(i) 350 }) 351 output.asUInt 352 } 353 354 val evenRawInsertVec = getFirstOneOH(evenInvalidMask) 355 val oddRawInsertVec = getFirstOneOH(oddInvalidMask) 356 val (evenRawInsertIdx, evenCanInsert) = PriorityEncoderWithFlag(evenInvalidMask) 357 val (oddRawInsertIdx, oddCanInsert) = PriorityEncoderWithFlag(oddInvalidMask) 358 val evenInsertIdx = Cat(evenRawInsertIdx, 0.U(1.W)) // slow to generate, for debug only 359 val oddInsertIdx = Cat(oddRawInsertIdx, 1.U(1.W)) // slow to generate, for debug only 360 val evenInsertVec = GetEvenBits.reverse(evenRawInsertVec) 361 val oddInsertVec = GetOddBits.reverse(oddRawInsertVec) 362 363 val enbufferSelReg = RegInit(false.B) 364 when(io.in(0).valid) { 365 enbufferSelReg := ~enbufferSelReg 366 } 367 368 val firstInsertIdx = Mux(enbufferSelReg, evenInsertIdx, oddInsertIdx) // slow to generate, for debug only 369 val secondInsertIdx = Mux(sameTag, 370 firstInsertIdx, 371 Mux(~enbufferSelReg, evenInsertIdx, oddInsertIdx) 372 ) // slow to generate, for debug only 373 val firstInsertVec = Mux(enbufferSelReg, evenInsertVec, oddInsertVec) 374 val secondInsertVec = Mux(sameTag, 375 firstInsertVec, 376 Mux(~enbufferSelReg, evenInsertVec, oddInsertVec) 377 ) // slow to generate, for debug only 378 val firstCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(enbufferSelReg, evenCanInsert, oddCanInsert) 379 val secondCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(sameTag, 380 firstCanInsert, 381 Mux(~enbufferSelReg, evenCanInsert, oddCanInsert) 382 ) && (EnsbufferWidth >= 1).B 383 val forward_need_uarch_drain = WireInit(false.B) 384 val merge_need_uarch_drain = WireInit(false.B) 385 val do_uarch_drain = GatedValidRegNext(forward_need_uarch_drain) || GatedValidRegNext(GatedValidRegNext(merge_need_uarch_drain)) 386 XSPerfAccumulate("do_uarch_drain", do_uarch_drain) 387 388 io.in(0).ready := firstCanInsert 389 io.in(1).ready := secondCanInsert && io.in(0).ready 390 391 for (i <- 0 until EnsbufferWidth) { 392 // train 393 if (EnableStorePrefetchSPB) { 394 prefetcher.io.sbuffer_enq(i).valid := io.in(i).fire && io.in(i).bits.vecValid 395 prefetcher.io.sbuffer_enq(i).bits := DontCare 396 prefetcher.io.sbuffer_enq(i).bits.vaddr := io.in(i).bits.vaddr 397 } else { 398 prefetcher.io.sbuffer_enq(i).valid := false.B 399 prefetcher.io.sbuffer_enq(i).bits := DontCare 400 } 401 402 // prefetch req 403 if (EnableStorePrefetchAtCommit) { 404 if (EnableAtCommitMissTrigger) { 405 io.store_prefetch(i).valid := prefetcher.io.prefetch_req(i).valid || (io.in(i).fire && io.in(i).bits.vecValid && io.in(i).bits.prefetch) 406 } else { 407 io.store_prefetch(i).valid := prefetcher.io.prefetch_req(i).valid || (io.in(i).fire && io.in(i).bits.vecValid) 408 } 409 io.store_prefetch(i).bits.paddr := DontCare 410 io.store_prefetch(i).bits.vaddr := Mux(prefetcher.io.prefetch_req(i).valid, prefetcher.io.prefetch_req(i).bits.vaddr, io.in(i).bits.vaddr) 411 prefetcher.io.prefetch_req(i).ready := io.store_prefetch(i).ready 412 } else { 413 io.store_prefetch(i) <> prefetcher.io.prefetch_req(i) 414 } 415 io.store_prefetch zip prefetcher.io.prefetch_req drop 2 foreach (x => x._1 <> x._2) 416 } 417 prefetcher.io.memSetPattenDetected := io.memSetPattenDetected 418 419 def wordReqToBufLine( // allocate a new line in sbuffer 420 req: DCacheWordReq, 421 reqptag: UInt, 422 reqvtag: UInt, 423 insertIdx: UInt, 424 insertVec: UInt, 425 wordOffset: UInt 426 ): Unit = { 427 assert(UIntToOH(insertIdx) === insertVec) 428 val sameBlockInflightMask = genSameBlockInflightMask(reqptag) 429 (0 until StoreBufferSize).map(entryIdx => { 430 when(insertVec(entryIdx)){ 431 stateVec(entryIdx).state_valid := true.B 432 stateVec(entryIdx).w_sameblock_inflight := sameBlockInflightMask.orR // set w_sameblock_inflight when a line is first allocated 433 when(sameBlockInflightMask.orR){ 434 waitInflightMask(entryIdx) := sameBlockInflightMask 435 } 436 cohCount(entryIdx) := 0.U 437 // missqReplayCount(insertIdx) := 0.U 438 ptag(entryIdx) := reqptag 439 vtag(entryIdx) := reqvtag // update vtag if a new sbuffer line is allocated 440 } 441 }) 442 } 443 444 def mergeWordReq( // merge write req into an existing line 445 req: DCacheWordReq, 446 reqptag: UInt, 447 reqvtag: UInt, 448 mergeIdx: UInt, 449 mergeVec: UInt, 450 wordOffset: UInt 451 ): Unit = { 452 assert(UIntToOH(mergeIdx) === mergeVec) 453 (0 until StoreBufferSize).map(entryIdx => { 454 when(mergeVec(entryIdx)) { 455 cohCount(entryIdx) := 0.U 456 // missqReplayCount(entryIdx) := 0.U 457 // check if vtag is the same, if not, trigger sbuffer flush 458 when(reqvtag =/= vtag(entryIdx)) { 459 XSDebug("reqvtag =/= sbufvtag req(vtag %x ptag %x) sbuffer(vtag %x ptag %x)\n", 460 reqvtag << OffsetWidth, 461 reqptag << OffsetWidth, 462 vtag(entryIdx) << OffsetWidth, 463 ptag(entryIdx) << OffsetWidth 464 ) 465 merge_need_uarch_drain := true.B 466 } 467 } 468 }) 469 } 470 471 for(((in, vwordOffset), i) <- io.in.zip(Seq(firstWord, secondWord)).zipWithIndex){ 472 writeReq(i).valid := in.fire && in.bits.vecValid 473 writeReq(i).bits.vwordOffset := vwordOffset 474 writeReq(i).bits.mask := in.bits.mask 475 writeReq(i).bits.data := in.bits.data 476 writeReq(i).bits.wline := in.bits.wline 477 val debug_insertIdx = if(i == 0) firstInsertIdx else secondInsertIdx 478 val insertVec = if(i == 0) firstInsertVec else secondInsertVec 479 assert(!((PopCount(insertVec) > 1.U) && in.fire && in.bits.vecValid)) 480 val insertIdx = OHToUInt(insertVec) 481 val accessValid = in.fire && in.bits.vecValid 482 accessIdx(i).valid := RegNext(accessValid) 483 accessIdx(i).bits := RegEnable(Mux(canMerge(i), mergeIdx(i), insertIdx), accessValid) 484 when(accessValid){ 485 when(canMerge(i)){ 486 writeReq(i).bits.wvec := mergeVec(i) 487 mergeWordReq(in.bits, inptags(i), invtags(i), mergeIdx(i), mergeVec(i), vwordOffset) 488 XSDebug(p"merge req $i to line [${mergeIdx(i)}]\n") 489 }.otherwise({ 490 writeReq(i).bits.wvec := insertVec 491 wordReqToBufLine(in.bits, inptags(i), invtags(i), insertIdx, insertVec, vwordOffset) 492 XSDebug(p"insert req $i to line[$insertIdx]\n") 493 assert(debug_insertIdx === insertIdx) 494 }) 495 } 496 } 497 498 499 for(i <- 0 until StoreBufferSize){ 500 XSDebug(stateVec(i).isValid(), 501 p"[$i] timeout:${cohCount(i)(EvictCountBits-1)} state:${stateVec(i)}\n" 502 ) 503 } 504 505 for((req, i) <- io.in.zipWithIndex){ 506 XSDebug(req.fire && req.bits.vecValid, 507 p"accept req [$i]: " + 508 p"addr:${Hexadecimal(req.bits.addr)} " + 509 p"mask:${Binary(shiftMaskToLow(req.bits.addr,req.bits.mask))} " + 510 p"data:${Hexadecimal(shiftDataToLow(req.bits.addr,req.bits.data))}\n" 511 ) 512 XSDebug(req.valid && !req.ready, 513 p"req [$i] blocked by sbuffer\n" 514 ) 515 } 516 517 // for now, when enq, trigger a prefetch (if EnableAtCommitMissTrigger) 518 require(EnsbufferWidth <= StorePipelineWidth) 519 520 // ---------------------- Send Dcache Req --------------------- 521 522 val sbuffer_empty = Cat(invalidMask).andR 523 val sq_empty = !Cat(io.in.map(_.valid)).orR 524 val empty = sbuffer_empty && sq_empty 525 val threshold = Wire(UInt(5.W)) // RegNext(io.csrCtrl.sbuffer_threshold +& 1.U) 526 threshold := Constantin.createRecord(s"StoreBufferThreshold_${p(XSCoreParamsKey).HartId}", initValue = 7) 527 val base = Wire(UInt(5.W)) 528 base := Constantin.createRecord(s"StoreBufferBase_${p(XSCoreParamsKey).HartId}", initValue = 4) 529 val ActiveCount = PopCount(activeMask) 530 val ValidCount = PopCount(validMask) 531 val forceThreshold = Mux(io.force_write, threshold - base, threshold) 532 val do_eviction = GatedValidRegNext(ActiveCount >= forceThreshold || ActiveCount === (StoreBufferSize-1).U || ValidCount === (StoreBufferSize).U, init = false.B) 533 require((StoreBufferThreshold + 1) <= StoreBufferSize) 534 535 XSDebug(p"ActiveCount[$ActiveCount]\n") 536 537 io.sbempty := GatedValidRegNext(empty) 538 io.flush.empty := GatedValidRegNext(empty && io.sqempty) 539 // lru.io.flush := sbuffer_state === x_drain_all && empty 540 switch(sbuffer_state){ 541 is(x_idle){ 542 when(io.flush.valid){ 543 sbuffer_state := x_drain_all 544 }.elsewhen(do_uarch_drain){ 545 sbuffer_state := x_drain_sbuffer 546 }.elsewhen(do_eviction){ 547 sbuffer_state := x_replace 548 } 549 } 550 is(x_drain_all){ 551 when(empty){ 552 sbuffer_state := x_idle 553 } 554 } 555 is(x_drain_sbuffer){ 556 when(io.flush.valid){ 557 sbuffer_state := x_drain_all 558 }.elsewhen(sbuffer_empty){ 559 sbuffer_state := x_idle 560 } 561 } 562 is(x_replace){ 563 when(io.flush.valid){ 564 sbuffer_state := x_drain_all 565 }.elsewhen(do_uarch_drain){ 566 sbuffer_state := x_drain_sbuffer 567 }.elsewhen(!do_eviction){ 568 sbuffer_state := x_idle 569 } 570 } 571 } 572 XSDebug(p"sbuffer state:${sbuffer_state} do eviction:${do_eviction} empty:${empty}\n") 573 574 def noSameBlockInflight(idx: UInt): Bool = { 575 // stateVec(idx) itself must not be s_inflight 576 !Cat(widthMap(i => inflightMask(i) && ptag(idx) === ptag(i))).orR 577 } 578 579 def genSameBlockInflightMask(ptag_in: UInt): UInt = { 580 val mask = VecInit(widthMap(i => inflightMask(i) && ptag_in === ptag(i))).asUInt // quite slow, use it with care 581 assert(!(PopCount(mask) > 1.U)) 582 mask 583 } 584 585 def haveSameBlockInflight(ptag_in: UInt): Bool = { 586 genSameBlockInflightMask(ptag_in).orR 587 } 588 589 // --------------------------------------------------------------------------- 590 // sbuffer to dcache pipeline 591 // --------------------------------------------------------------------------- 592 593 // Now sbuffer deq logic is divided into 2 stages: 594 595 // sbuffer_out_s0: 596 // * read data and meta from sbuffer 597 // * RegNext() them 598 // * set line state to inflight 599 600 // sbuffer_out_s1: 601 // * send write req to dcache 602 603 // sbuffer_out_extra: 604 // * receive write result from dcache 605 // * update line state 606 607 val sbuffer_out_s1_ready = Wire(Bool()) 608 609 // --------------------------------------------------------------------------- 610 // sbuffer_out_s0 611 // --------------------------------------------------------------------------- 612 613 val need_drain = needDrain(sbuffer_state) 614 val need_replace = do_eviction || (sbuffer_state === x_replace) 615 val sbuffer_out_s0_evictionIdx = Mux(missqReplayHasTimeOut, 616 missqReplayTimeOutIdx, 617 Mux(need_drain, 618 drainIdx, 619 Mux(cohHasTimeOut, cohTimeOutIdx, replaceIdx) 620 ) 621 ) 622 623 // If there is a inflight dcache req which has same ptag with sbuffer_out_s0_evictionIdx's ptag, 624 // current eviction should be blocked. 625 val sbuffer_out_s0_valid = missqReplayHasTimeOut || 626 stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate() && 627 (need_drain || cohHasTimeOut || need_replace) 628 assert(!( 629 stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate() && 630 !noSameBlockInflight(sbuffer_out_s0_evictionIdx) 631 )) 632 val sbuffer_out_s0_cango = sbuffer_out_s1_ready 633 sbuffer_out_s0_fire := sbuffer_out_s0_valid && sbuffer_out_s0_cango 634 635 // --------------------------------------------------------------------------- 636 // sbuffer_out_s1 637 // --------------------------------------------------------------------------- 638 639 // TODO: use EnsbufferWidth 640 val shouldWaitWriteFinish = GatedValidRegNext(VecInit((0 until EnsbufferWidth).map{i => 641 (writeReq(i).bits.wvec.asUInt & UIntToOH(sbuffer_out_s0_evictionIdx).asUInt).orR && 642 writeReq(i).valid 643 }).asUInt.orR) 644 // block dcache write if read / write hazard 645 val blockDcacheWrite = shouldWaitWriteFinish 646 647 val sbuffer_out_s1_valid = RegInit(false.B) 648 sbuffer_out_s1_ready := io.dcache.req.ready && !blockDcacheWrite || !sbuffer_out_s1_valid 649 val sbuffer_out_s1_fire = io.dcache.req.fire 650 651 // when sbuffer_out_s1_fire, send dcache req stored in pipeline reg to dcache 652 when(sbuffer_out_s1_fire){ 653 sbuffer_out_s1_valid := false.B 654 } 655 // when sbuffer_out_s0_fire, read dcache req data and store them in a pipeline reg 656 when(sbuffer_out_s0_cango){ 657 sbuffer_out_s1_valid := sbuffer_out_s0_valid 658 } 659 when(sbuffer_out_s0_fire){ 660 stateVec(sbuffer_out_s0_evictionIdx).state_inflight := true.B 661 stateVec(sbuffer_out_s0_evictionIdx).w_timeout := false.B 662 // stateVec(sbuffer_out_s0_evictionIdx).s_pipe_req := true.B 663 XSDebug(p"$sbuffer_out_s0_evictionIdx will be sent to Dcache\n") 664 } 665 666 XSDebug(p"need drain:$need_drain cohHasTimeOut: $cohHasTimeOut need replace:$need_replace\n") 667 XSDebug(p"drainIdx:$drainIdx tIdx:$cohTimeOutIdx replIdx:$replaceIdx " + 668 p"blocked:${!noSameBlockInflight(sbuffer_out_s0_evictionIdx)} v:${activeMask(sbuffer_out_s0_evictionIdx)}\n") 669 XSDebug(p"sbuffer_out_s0_valid:$sbuffer_out_s0_valid evictIdx:$sbuffer_out_s0_evictionIdx dcache ready:${io.dcache.req.ready}\n") 670 // Note: if other dcache req in the same block are inflight, 671 // the lru update may not accurate 672 accessIdx(EnsbufferWidth).valid := invalidMask(replaceIdx) || ( 673 need_replace && !need_drain && !cohHasTimeOut && !missqReplayHasTimeOut && sbuffer_out_s0_cango && activeMask(replaceIdx)) 674 accessIdx(EnsbufferWidth).bits := replaceIdx 675 val sbuffer_out_s1_evictionIdx = RegEnable(sbuffer_out_s0_evictionIdx, sbuffer_out_s0_fire) 676 val sbuffer_out_s1_evictionPTag = RegEnable(ptag(sbuffer_out_s0_evictionIdx), sbuffer_out_s0_fire) 677 val sbuffer_out_s1_evictionVTag = RegEnable(vtag(sbuffer_out_s0_evictionIdx), sbuffer_out_s0_fire) 678 679 io.dcache.req.valid := sbuffer_out_s1_valid && !blockDcacheWrite 680 io.dcache.req.bits := DontCare 681 io.dcache.req.bits.cmd := MemoryOpConstants.M_XWR 682 io.dcache.req.bits.addr := getAddr(sbuffer_out_s1_evictionPTag) 683 io.dcache.req.bits.vaddr := getAddr(sbuffer_out_s1_evictionVTag) 684 io.dcache.req.bits.data := data(sbuffer_out_s1_evictionIdx).asUInt 685 io.dcache.req.bits.mask := mask(sbuffer_out_s1_evictionIdx).asUInt 686 io.dcache.req.bits.id := sbuffer_out_s1_evictionIdx 687 688 when (sbuffer_out_s1_fire) { 689 assert(!(io.dcache.req.bits.vaddr === 0.U)) 690 assert(!(io.dcache.req.bits.addr === 0.U)) 691 } 692 693 XSDebug(sbuffer_out_s1_fire, 694 p"send buf [$sbuffer_out_s1_evictionIdx] to Dcache, req fire\n" 695 ) 696 697 // update sbuffer status according to dcache resp source 698 699 def id_to_sbuffer_id(id: UInt): UInt = { 700 require(id.getWidth >= log2Up(StoreBufferSize)) 701 id(log2Up(StoreBufferSize)-1, 0) 702 } 703 704 // hit resp 705 io.dcache.hit_resps.map(resp => { 706 val dcache_resp_id = resp.bits.id 707 when (resp.fire) { 708 stateVec(dcache_resp_id).state_inflight := false.B 709 stateVec(dcache_resp_id).state_valid := false.B 710 assert(!resp.bits.replay) 711 assert(!resp.bits.miss) // not need to resp if miss, to be opted 712 assert(stateVec(dcache_resp_id).state_inflight === true.B) 713 } 714 715 // Update w_sameblock_inflight flag is delayed for 1 cycle 716 // 717 // When a new req allocate a new line in sbuffer, sameblock_inflight check will ignore 718 // current dcache.hit_resps. Then, in the next cycle, we have plenty of time to check 719 // if the same block is still inflight 720 (0 until StoreBufferSize).map(i => { 721 when( 722 stateVec(i).w_sameblock_inflight && 723 stateVec(i).state_valid && 724 GatedValidRegNext(resp.fire) && 725 waitInflightMask(i) === UIntToOH(RegEnable(id_to_sbuffer_id(dcache_resp_id), resp.fire)) 726 ){ 727 stateVec(i).w_sameblock_inflight := false.B 728 } 729 }) 730 }) 731 732 io.dcache.hit_resps.zip(dataModule.io.maskFlushReq).map{case (resp, maskFlush) => { 733 maskFlush.valid := resp.fire 734 maskFlush.bits.wvec := UIntToOH(resp.bits.id) 735 }} 736 737 // replay resp 738 val replay_resp_id = io.dcache.replay_resp.bits.id 739 when (io.dcache.replay_resp.fire) { 740 missqReplayCount(replay_resp_id) := 0.U 741 stateVec(replay_resp_id).w_timeout := true.B 742 // waiting for timeout 743 assert(io.dcache.replay_resp.bits.replay) 744 assert(stateVec(replay_resp_id).state_inflight === true.B) 745 } 746 747 // TODO: reuse cohCount 748 (0 until StoreBufferSize).map(i => { 749 when(stateVec(i).w_timeout && stateVec(i).state_inflight && !missqReplayCount(i)(MissqReplayCountBits-1)) { 750 missqReplayCount(i) := missqReplayCount(i) + 1.U 751 } 752 when(activeMask(i) && !cohTimeOutMask(i)){ 753 cohCount(i) := cohCount(i)+1.U 754 } 755 }) 756 757 if (env.EnableDifftest) { 758 // hit resp 759 io.dcache.hit_resps.zipWithIndex.map{case (resp, index) => { 760 val difftest = DifftestModule(new DiffSbufferEvent, delay = 1) 761 val dcache_resp_id = resp.bits.id 762 difftest.coreid := io.hartId 763 difftest.index := index.U 764 difftest.valid := resp.fire 765 difftest.addr := getAddr(ptag(dcache_resp_id)) 766 difftest.data := data(dcache_resp_id).asTypeOf(Vec(CacheLineBytes, UInt(8.W))) 767 difftest.mask := mask(dcache_resp_id).asUInt 768 }} 769 } 770 771 // ---------------------- Load Data Forward --------------------- 772 val mismatch = Wire(Vec(LoadPipelineWidth, Bool())) 773 XSPerfAccumulate("vaddr_match_failed", mismatch(0) || mismatch(1)) 774 for ((forward, i) <- io.forward.zipWithIndex) { 775 val vtag_matches = VecInit(widthMap(w => vtag(w) === getVTag(forward.vaddr))) 776 // ptag_matches uses paddr from dtlb, which is far from sbuffer 777 val ptag_matches = VecInit(widthMap(w => RegEnable(ptag(w), forward.valid) === RegEnable(getPTag(forward.paddr), forward.valid))) 778 val tag_matches = vtag_matches 779 val tag_mismatch = GatedValidRegNext(forward.valid) && VecInit(widthMap(w => 780 GatedValidRegNext(vtag_matches(w)) =/= ptag_matches(w) && GatedValidRegNext((activeMask(w) || inflightMask(w))) 781 )).asUInt.orR 782 mismatch(i) := tag_mismatch 783 when (tag_mismatch) { 784 XSDebug("forward tag mismatch: pmatch %x vmatch %x vaddr %x paddr %x\n", 785 RegNext(ptag_matches.asUInt), 786 RegNext(vtag_matches.asUInt), 787 RegNext(forward.vaddr), 788 RegNext(forward.paddr) 789 ) 790 forward_need_uarch_drain := true.B 791 } 792 val valid_tag_matches = widthMap(w => tag_matches(w) && activeMask(w)) 793 val inflight_tag_matches = widthMap(w => tag_matches(w) && inflightMask(w)) 794 val line_offset_mask = UIntToOH(getVWordOffset(forward.paddr)) 795 796 val valid_tag_match_reg = valid_tag_matches.map(RegEnable(_, forward.valid)) 797 val inflight_tag_match_reg = inflight_tag_matches.map(RegEnable(_, forward.valid)) 798 val forward_mask_candidate_reg = RegEnable( 799 VecInit(mask.map(entry => entry(getVWordOffset(forward.paddr)))), 800 forward.valid 801 ) 802 val forward_data_candidate_reg = RegEnable( 803 VecInit(data.map(entry => entry(getVWordOffset(forward.paddr)))), 804 forward.valid 805 ) 806 807 val selectedValidMask = Mux1H(valid_tag_match_reg, forward_mask_candidate_reg) 808 val selectedValidData = Mux1H(valid_tag_match_reg, forward_data_candidate_reg) 809 selectedValidMask.suggestName("selectedValidMask_"+i) 810 selectedValidData.suggestName("selectedValidData_"+i) 811 812 val selectedInflightMask = Mux1H(inflight_tag_match_reg, forward_mask_candidate_reg) 813 val selectedInflightData = Mux1H(inflight_tag_match_reg, forward_data_candidate_reg) 814 selectedInflightMask.suggestName("selectedInflightMask_"+i) 815 selectedInflightData.suggestName("selectedInflightData_"+i) 816 817 // currently not being used 818 val selectedInflightMaskFast = Mux1H(line_offset_mask, Mux1H(inflight_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool())))) 819 val selectedValidMaskFast = Mux1H(line_offset_mask, Mux1H(valid_tag_matches, mask).asTypeOf(Vec(CacheLineVWords, Vec(VDataBytes, Bool())))) 820 821 forward.dataInvalid := false.B // data in store line merge buffer is always ready 822 forward.matchInvalid := tag_mismatch // paddr / vaddr cam result does not match 823 for (j <- 0 until VDataBytes) { 824 forward.forwardMask(j) := false.B 825 forward.forwardData(j) := DontCare 826 827 // valid entries have higher priority than inflight entries 828 when(selectedInflightMask(j)) { 829 forward.forwardMask(j) := true.B 830 forward.forwardData(j) := selectedInflightData(j) 831 } 832 when(selectedValidMask(j)) { 833 forward.forwardMask(j) := true.B 834 forward.forwardData(j) := selectedValidData(j) 835 } 836 837 forward.forwardMaskFast(j) := selectedInflightMaskFast(j) || selectedValidMaskFast(j) 838 } 839 forward.addrInvalid := DontCare 840 } 841 842 for (i <- 0 until StoreBufferSize) { 843 XSDebug("sbf entry " + i + " : ptag %x vtag %x valid %x active %x inflight %x w_timeout %x\n", 844 ptag(i) << OffsetWidth, 845 vtag(i) << OffsetWidth, 846 stateVec(i).isValid(), 847 activeMask(i), 848 inflightMask(i), 849 stateVec(i).w_timeout 850 ) 851 } 852 853 /* 854 * 855 ********************************************************** 856 * ------------- ------------- * 857 * | XiangShan | | NEMU | * 858 * ------------- ------------- * 859 * | | * 860 * V V * 861 * ----- ----- * 862 * | Q | | Q | * 863 * | U | | U | * 864 * | E | | E | * 865 * | U | | U | * 866 * | E | | E | * 867 * | | | | * 868 * ----- ----- * 869 * | | * 870 * | -------------- | * 871 * |>>>>>>>>| DIFFTEST |<<<<<<<<<| * 872 * -------------- * 873 ********************************************************** 874 */ 875 // Initialize when unenabled difftest. 876 for (i <- 0 until EnsbufferWidth) { 877 io.vecDifftestInfo(i) := DontCare 878 } 879 if (env.EnableDifftest) { 880 val VecMemFLOWMaxNumber = 16 881 val WlineMaxNumber = blockWords 882 883 def UIntSlice(in: UInt, High: UInt, Low: UInt): UInt = { 884 val maxNum = in.getWidth 885 val result = Wire(Vec(maxNum, Bool())) 886 887 for (i <- 0 until maxNum) { 888 when (Low + i.U <= High) { 889 result(i) := in(Low + i.U) 890 }.otherwise{ 891 result(i) := 0.U 892 } 893 } 894 895 result.asUInt 896 } 897 898 // To align with 'nemu', we need: 899 // For 'unit-store' and 'whole' vector store instr, we re-split here, 900 // and for the res, we do nothing. 901 for (i <- 0 until EnsbufferWidth) { 902 io.vecDifftestInfo(i).ready := io.in(i).ready 903 904 val uop = io.vecDifftestInfo(i).bits 905 906 val isVse = isVStore(uop.fuType) && LSUOpType.isUStride(uop.fuOpType) 907 val isVsm = isVStore(uop.fuType) && VstuType.isMasked(uop.fuOpType) 908 val isVsr = isVStore(uop.fuType) && VstuType.isWhole(uop.fuOpType) 909 910 val vpu = uop.vpu 911 val veew = uop.vpu.veew 912 val eew = EewLog2(veew) 913 val EEB = (1.U << eew).asUInt //Only when VLEN=128 effective element byte 914 val EEWBits = (EEB << 3.U).asUInt 915 val nf = Mux(isVsr, 0.U, vpu.nf) 916 917 val isSegment = nf =/= 0.U && !isVsm 918 val isVSLine = (isVse || isVsm || isVsr) && !isSegment 919 val isWline = io.in(i).bits.wline 920 921 // The number of stores generated by a uop theroy. 922 // No other vector instructions need to be considered. 923 val flow = Mux( 924 isVSLine, 925 (16.U >> eew).asUInt, 926 0.U 927 ) 928 929 val rawData = io.in(i).bits.data 930 val rawMask = io.in(i).bits.mask 931 val rawAddr = io.in(i).bits.addr 932 933 // A common difftest interface for scalar and vector instr 934 val difftestCommon = DifftestModule(new DiffStoreEvent, delay = 2, dontCare = true) 935 when (isVSLine) { 936 val splitMask = UIntSlice(rawMask, EEB - 1.U, 0.U)(7,0) // Byte 937 val splitData = UIntSlice(rawData, EEWBits - 1.U, 0.U)(63,0) // Double word 938 val storeCommit = io.in(i).fire && splitMask.orR && io.in(i).bits.vecValid 939 val waddr = rawAddr 940 val wmask = splitMask 941 val wdata = splitData & MaskExpand(splitMask) 942 943 difftestCommon.coreid := io.hartId 944 difftestCommon.index := (i*VecMemFLOWMaxNumber).U 945 difftestCommon.valid := storeCommit 946 difftestCommon.addr := waddr 947 difftestCommon.data := wdata 948 difftestCommon.mask := wmask 949 950 } .elsewhen (!isWline) { 951 val storeCommit = io.in(i).fire 952 val waddr = ZeroExt(Cat(io.in(i).bits.addr(PAddrBits - 1, 3), 0.U(3.W)), 64) 953 val sbufferMask = shiftMaskToLow(io.in(i).bits.addr, io.in(i).bits.mask) 954 val sbufferData = shiftDataToLow(io.in(i).bits.addr, io.in(i).bits.data) 955 val wmask = sbufferMask 956 val wdata = sbufferData & MaskExpand(sbufferMask) 957 958 difftestCommon.coreid := io.hartId 959 difftestCommon.index := (i*VecMemFLOWMaxNumber).U 960 difftestCommon.valid := storeCommit && io.in(i).bits.vecValid 961 difftestCommon.addr := waddr 962 difftestCommon.data := wdata 963 difftestCommon.mask := wmask 964 965 } 966 967 for (index <- 0 until WlineMaxNumber) { 968 val difftest = DifftestModule(new DiffStoreEvent, delay = 2, dontCare = true) 969 970 val storeCommit = io.in(i).fire && io.in(i).bits.vecValid 971 val blockAddr = get_block_addr(io.in(i).bits.addr) 972 973 when (isWline) { 974 difftest.coreid := io.hartId 975 difftest.index := (i*VecMemFLOWMaxNumber + index).U 976 difftest.valid := storeCommit 977 difftest.addr := blockAddr + (index.U << wordOffBits) 978 difftest.data := io.in(i).bits.data 979 difftest.mask := ((1 << wordBytes) - 1).U 980 981 assert(!storeCommit || (io.in(i).bits.data === 0.U), "wline only supports whole zero write now") 982 } 983 } 984 985 // Only the interface used by the 'unit-store' and 'whole' vector store instr 986 for (index <- 1 until VecMemFLOWMaxNumber) { 987 val difftest = DifftestModule(new DiffStoreEvent, delay = 2, dontCare = true) 988 989 // I've already done something process with 'mask' outside: 990 // Different cases of 'vm' have been considered: 991 // Any valid store will definitely not have all 0 masks, 992 // and the extra part due to unaligned access must have a mask of 0 993 when (index.U < flow && isVSLine) { 994 // Make NEMU-difftest happy 995 val shiftIndex = EEB*index.U 996 val shiftFlag = shiftIndex(2,0).orR // Double word Flag 997 val shiftBytes = Mux(shiftFlag, shiftIndex(2,0), 0.U) 998 val shiftBits = shiftBytes << 3.U 999 val splitMask = UIntSlice(rawMask, (EEB*(index+1).U - 1.U), EEB*index.U)(7,0) // Byte 1000 val splitData = UIntSlice(rawData, (EEWBits*(index+1).U - 1.U), EEWBits*index.U)(63,0) // Double word 1001 val storeCommit = io.in(i).fire && splitMask.orR && io.in(i).bits.vecValid 1002 val waddr = Cat(rawAddr(PAddrBits - 1, 4), Cat(shiftIndex(3), 0.U(3.W))) 1003 val wmask = splitMask << shiftBytes 1004 val wdata = (splitData & MaskExpand(splitMask)) << shiftBits 1005 1006 difftest.coreid := io.hartId 1007 difftest.index := (i*VecMemFLOWMaxNumber+index).U 1008 difftest.valid := storeCommit 1009 difftest.addr := waddr 1010 difftest.data := wdata 1011 difftest.mask := wmask 1012 1013 } 1014 } 1015 } 1016 } 1017 1018 val perf_valid_entry_count = RegNext(PopCount(VecInit(stateVec.map(s => !s.isInvalid())).asUInt)) 1019 XSPerfHistogram("util", perf_valid_entry_count, true.B, 0, StoreBufferSize, 1) 1020 XSPerfAccumulate("sbuffer_req_valid", PopCount(VecInit(io.in.map(_.valid)).asUInt)) 1021 XSPerfAccumulate("sbuffer_req_fire", PopCount(VecInit(io.in.map(_.fire)).asUInt)) 1022 XSPerfAccumulate("sbuffer_req_fire_vecinvalid", PopCount(VecInit(io.in.map(data => data.fire && !data.bits.vecValid)).asUInt)) 1023 XSPerfAccumulate("sbuffer_merge", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && canMerge(i)})).asUInt)) 1024 XSPerfAccumulate("sbuffer_newline", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && !canMerge(i)})).asUInt)) 1025 XSPerfAccumulate("dcache_req_valid", io.dcache.req.valid) 1026 XSPerfAccumulate("dcache_req_fire", io.dcache.req.fire) 1027 XSPerfAccumulate("sbuffer_idle", sbuffer_state === x_idle) 1028 XSPerfAccumulate("sbuffer_flush", sbuffer_state === x_drain_sbuffer) 1029 XSPerfAccumulate("sbuffer_replace", sbuffer_state === x_replace) 1030 XSPerfAccumulate("evenCanInsert", evenCanInsert) 1031 XSPerfAccumulate("oddCanInsert", oddCanInsert) 1032 XSPerfAccumulate("mainpipe_resp_valid", io.dcache.main_pipe_hit_resp.fire) 1033 //XSPerfAccumulate("refill_resp_valid", io.dcache.refill_hit_resp.fire) 1034 XSPerfAccumulate("replay_resp_valid", io.dcache.replay_resp.fire) 1035 XSPerfAccumulate("coh_timeout", cohHasTimeOut) 1036 1037 // val (store_latency_sample, store_latency) = TransactionLatencyCounter(io.lsu.req.fire, io.lsu.resp.fire) 1038 // XSPerfHistogram("store_latency", store_latency, store_latency_sample, 0, 100, 10) 1039 // XSPerfAccumulate("store_req", io.lsu.req.fire) 1040 1041 val perfEvents = Seq( 1042 ("sbuffer_req_valid ", PopCount(VecInit(io.in.map(_.valid)).asUInt) ), 1043 ("sbuffer_req_fire ", PopCount(VecInit(io.in.map(_.fire)).asUInt) ), 1044 ("sbuffer_merge ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && canMerge(i)})).asUInt) ), 1045 ("sbuffer_newline ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire && !canMerge(i)})).asUInt) ), 1046 ("dcache_req_valid ", io.dcache.req.valid ), 1047 ("dcache_req_fire ", io.dcache.req.fire ), 1048 ("sbuffer_idle ", sbuffer_state === x_idle ), 1049 ("sbuffer_flush ", sbuffer_state === x_drain_sbuffer ), 1050 ("sbuffer_replace ", sbuffer_state === x_replace ), 1051 ("mpipe_resp_valid ", io.dcache.main_pipe_hit_resp.fire ), 1052 //("refill_resp_valid ", io.dcache.refill_hit_resp.fire ), 1053 ("replay_resp_valid ", io.dcache.replay_resp.fire ), 1054 ("coh_timeout ", cohHasTimeOut ), 1055 ("sbuffer_1_4_valid ", (perf_valid_entry_count < (StoreBufferSize.U/4.U)) ), 1056 ("sbuffer_2_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/4.U)) & (perf_valid_entry_count <= (StoreBufferSize.U/2.U)) ), 1057 ("sbuffer_3_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/2.U)) & (perf_valid_entry_count <= (StoreBufferSize.U*3.U/4.U))), 1058 ("sbuffer_full_valid", (perf_valid_entry_count > (StoreBufferSize.U*3.U/4.U))) 1059 ) 1060 generatePerfEvent() 1061 1062} 1063