xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MainPipe.scala (revision eb163ef08fc5ac1da1f32d948699bd6de053e444)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.tilelink.ClientStates._
23import freechips.rocketchip.tilelink.MemoryOpCategories._
24import freechips.rocketchip.tilelink.TLPermissions._
25import freechips.rocketchip.tilelink.{ClientMetadata, ClientStates, TLPermissions}
26import utils._
27import xiangshan.L1CacheErrorInfo
28
29class MainPipeReq(implicit p: Parameters) extends DCacheBundle {
30  val miss = Bool() // only amo miss will refill in main pipe
31  val miss_id = UInt(log2Up(cfg.nMissEntries).W)
32  val miss_param = UInt(TLPermissions.bdWidth.W)
33  val miss_dirty = Bool()
34  val miss_way_en = UInt(DCacheWays.W)
35
36  val probe = Bool()
37  val probe_param = UInt(TLPermissions.bdWidth.W)
38  val probe_need_data = Bool()
39
40  // request info
41  // reqs from Store, AMO use this
42  // probe does not use this
43  val source = UInt(sourceTypeWidth.W)
44  val cmd = UInt(M_SZ.W)
45  // if dcache size > 32KB, vaddr is also needed for store
46  // vaddr is used to get extra index bits
47  val vaddr  = UInt(VAddrBits.W)
48  // must be aligned to block
49  val addr   = UInt(PAddrBits.W)
50
51  // store
52  val store_data = UInt((cfg.blockBytes * 8).W)
53  val store_mask = UInt(cfg.blockBytes.W)
54
55  // which word does amo work on?
56  val word_idx = UInt(log2Up(cfg.blockBytes * 8 / DataBits).W)
57  val amo_data   = UInt(DataBits.W)
58  val amo_mask   = UInt((DataBits / 8).W)
59
60  // error
61  val error = Bool()
62
63  // replace
64  val replace = Bool()
65  val replace_way_en = UInt(DCacheWays.W)
66
67  val id = UInt(reqIdWidth.W)
68
69  def isLoad: Bool = source === LOAD_SOURCE.U
70  def isStore: Bool = source === STORE_SOURCE.U
71  def isAMO: Bool = source === AMO_SOURCE.U
72
73  def convertStoreReq(store: DCacheLineReq): MainPipeReq = {
74    val req = Wire(new MainPipeReq)
75    req := DontCare
76    req.miss := false.B
77    req.miss_dirty := false.B
78    req.probe := false.B
79    req.probe_need_data := false.B
80    req.source := STORE_SOURCE.U
81    req.cmd := store.cmd
82    req.addr := store.addr
83    req.vaddr := store.vaddr
84    req.store_data := store.data
85    req.store_mask := store.mask
86    req.replace := false.B
87    req.error := false.B
88    req.id := store.id
89    req
90  }
91}
92
93class MainPipe(implicit p: Parameters) extends DCacheModule with HasPerfEvents {
94  val io = IO(new Bundle() {
95    // probe queue
96    val probe_req = Flipped(DecoupledIO(new MainPipeReq))
97    // store miss go to miss queue
98    val miss_req = DecoupledIO(new MissReq)
99    // store buffer
100    val store_req = Flipped(DecoupledIO(new DCacheLineReq))
101    val store_replay_resp = ValidIO(new DCacheLineResp)
102    val store_hit_resp = ValidIO(new DCacheLineResp)
103    val release_update = ValidIO(new ReleaseUpdate)
104    // atmoics
105    val atomic_req = Flipped(DecoupledIO(new MainPipeReq))
106    val atomic_resp = ValidIO(new AtomicsResp)
107    // replace
108    val replace_req = Flipped(DecoupledIO(new MainPipeReq))
109    val replace_resp = ValidIO(UInt(log2Up(cfg.nMissEntries).W))
110    // write-back queue
111    val wb = DecoupledIO(new WritebackReq)
112
113    val data_read_intend = Output(Bool())
114    val data_read = DecoupledIO(new L1BankedDataReadLineReq)
115    val data_resp = Input(Vec(DCacheBanks, new L1BankedDataReadResult()))
116    val readline_error_delayed = Input(Bool())
117    val data_write = DecoupledIO(new L1BankedDataWriteReq)
118
119    val meta_read = DecoupledIO(new MetaReadReq)
120    val meta_resp = Input(Vec(nWays, new Meta))
121    val meta_write = DecoupledIO(new MetaWriteReq)
122    val error_flag_resp = Input(Vec(nWays, Bool()))
123    val error_flag_write = DecoupledIO(new ErrorWriteReq)
124
125    val tag_read = DecoupledIO(new TagReadReq)
126    val tag_resp = Input(Vec(nWays, UInt(encTagBits.W)))
127    val tag_write = DecoupledIO(new TagWriteReq)
128    val tag_write_intend = Output(new Bool())
129
130    // update state vec in replacement algo
131    val replace_access = ValidIO(new ReplacementAccessBundle)
132    // find the way to be replaced
133    val replace_way = new ReplacementWayReqIO
134
135    val status = new Bundle() {
136      val s0_set = ValidIO(UInt(idxBits.W))
137      val s1, s2, s3 = ValidIO(new Bundle() {
138        val set = UInt(idxBits.W)
139        val way_en = UInt(nWays.W)
140      })
141    }
142
143    // lrsc locked block should block probe
144    val lrsc_locked_block = Output(Valid(UInt(PAddrBits.W)))
145    val invalid_resv_set = Input(Bool())
146    val update_resv_set = Output(Bool())
147    val block_lr = Output(Bool())
148
149    // ecc error
150    val error = Output(new L1CacheErrorInfo())
151  })
152
153  // meta array is made of regs, so meta write or read should always be ready
154  assert(RegNext(io.meta_read.ready))
155  assert(RegNext(io.meta_write.ready))
156
157  val s1_s0_set_conflict, s2_s0_set_conlict, s3_s0_set_conflict = Wire(Bool())
158  val set_conflict = s1_s0_set_conflict || s2_s0_set_conlict || s3_s0_set_conflict
159  // check sbuffer store req set_conflict in parallel with req arbiter
160  // it will speed up the generation of store_req.ready, which is in crit. path
161  val s1_s0_set_conflict_store, s2_s0_set_conlict_store, s3_s0_set_conflict_store = Wire(Bool())
162  val store_set_conflict = s1_s0_set_conflict_store || s2_s0_set_conlict_store || s3_s0_set_conflict_store
163  val s1_ready, s2_ready, s3_ready = Wire(Bool())
164
165  // convert store req to main pipe req, and select a req from store and probe
166  val store_req = Wire(DecoupledIO(new MainPipeReq))
167  store_req.bits := (new MainPipeReq).convertStoreReq(io.store_req.bits)
168  store_req.valid := io.store_req.valid
169  io.store_req.ready := store_req.ready
170
171  // s0: read meta and tag
172  val req = Wire(DecoupledIO(new MainPipeReq))
173  arbiter(
174    in = Seq(
175      io.probe_req,
176      io.replace_req,
177      store_req, // Note: store_req.ready is now manually assigned for better timing
178      io.atomic_req
179    ),
180    out = req,
181    name = Some("main_pipe_req")
182  )
183
184  val store_idx = get_idx(io.store_req.bits.vaddr)
185  // manually assign store_req.ready for better timing
186  // now store_req set conflict check is done in parallel with req arbiter
187  store_req.ready := io.meta_read.ready && io.tag_read.ready && s1_ready && !store_set_conflict &&
188    !io.probe_req.valid && !io.replace_req.valid
189  val s0_req = req.bits
190  val s0_idx = get_idx(s0_req.vaddr)
191  val s0_need_tag = io.tag_read.valid
192  val s0_can_go = io.meta_read.ready && io.tag_read.ready && s1_ready && !set_conflict
193  val s0_fire = req.valid && s0_can_go
194
195  val bank_write = VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, s0_req.store_mask).orR)).asUInt
196  val bank_full_write = VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, s0_req.store_mask).andR)).asUInt
197  val banks_full_overwrite = bank_full_write.andR
198
199  val banked_store_rmask = bank_write & ~bank_full_write
200  val banked_full_rmask = ~0.U(DCacheBanks.W)
201  val banked_none_rmask = 0.U(DCacheBanks.W)
202
203  val store_need_data = !s0_req.probe && s0_req.isStore && banked_store_rmask.orR
204  val probe_need_data = s0_req.probe
205  val amo_need_data = !s0_req.probe && s0_req.isAMO
206  val miss_need_data = s0_req.miss
207  val replace_need_data = s0_req.replace
208
209  val banked_need_data = store_need_data || probe_need_data || amo_need_data || miss_need_data || replace_need_data
210
211  val s0_banked_rmask = Mux(store_need_data, banked_store_rmask,
212    Mux(probe_need_data || amo_need_data || miss_need_data || replace_need_data,
213      banked_full_rmask,
214      banked_none_rmask
215    ))
216
217  // generate wmask here and use it in stage 2
218  val banked_store_wmask = bank_write
219  val banked_full_wmask = ~0.U(DCacheBanks.W)
220  val banked_none_wmask = 0.U(DCacheBanks.W)
221
222  // s1: read data
223  val s1_valid = RegInit(false.B)
224  val s1_need_data = RegEnable(banked_need_data, s0_fire)
225  val s1_req = RegEnable(s0_req, s0_fire)
226  val s1_banked_rmask = RegEnable(s0_banked_rmask, s0_fire)
227  val s1_banked_store_wmask = RegEnable(banked_store_wmask, s0_fire)
228  val s1_need_tag = RegEnable(s0_need_tag, s0_fire)
229  val s1_can_go = s2_ready && (io.data_read.ready || !s1_need_data)
230  val s1_fire = s1_valid && s1_can_go
231  val s1_idx = get_idx(s1_req.vaddr)
232  when (s0_fire) {
233    s1_valid := true.B
234  }.elsewhen (s1_fire) {
235    s1_valid := false.B
236  }
237  s1_ready := !s1_valid || s1_can_go
238  s1_s0_set_conflict := s1_valid && s0_idx === s1_idx
239  s1_s0_set_conflict_store := s1_valid && store_idx === s1_idx
240
241  val meta_resp = Wire(Vec(nWays, (new Meta).asUInt()))
242  val tag_resp = Wire(Vec(nWays, UInt(tagBits.W)))
243  val ecc_resp = Wire(Vec(nWays, UInt(eccTagBits.W)))
244  meta_resp := Mux(RegNext(s0_fire), VecInit(io.meta_resp.map(_.asUInt)), RegNext(meta_resp))
245  tag_resp := Mux(RegNext(s0_fire), VecInit(io.tag_resp.map(r => r(tagBits - 1, 0))), RegNext(tag_resp))
246  ecc_resp := Mux(RegNext(s0_fire), VecInit(io.tag_resp.map(r => r(encTagBits - 1, tagBits))), RegNext(ecc_resp))
247  val enc_tag_resp = Wire(io.tag_resp.cloneType)
248  enc_tag_resp := Mux(RegNext(s0_fire), io.tag_resp, RegNext(enc_tag_resp))
249
250  def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f))
251  val s1_tag_eq_way = wayMap((w: Int) => tag_resp(w) === get_tag(s1_req.addr)).asUInt
252  val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && Meta(meta_resp(w)).coh.isValid()).asUInt
253  val s1_tag_match = s1_tag_match_way.orR
254
255  val s1_hit_tag = Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap(w => tag_resp(w))), get_tag(s1_req.addr))
256  val s1_hit_coh = ClientMetadata(Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap(w => meta_resp(w))), 0.U))
257  val s1_encTag = Mux1H(s1_tag_match_way, wayMap((w: Int) => enc_tag_resp(w)))
258  val s1_flag_error = Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap(w => io.error_flag_resp(w))), false.B)
259  val s1_l2_error = s1_req.error
260
261  // replacement policy
262  val s1_repl_way_en = WireInit(0.U(nWays.W))
263  s1_repl_way_en := Mux(RegNext(s0_fire), UIntToOH(io.replace_way.way), RegNext(s1_repl_way_en))
264  val s1_repl_tag = Mux1H(s1_repl_way_en, wayMap(w => tag_resp(w)))
265  val s1_repl_coh = Mux1H(s1_repl_way_en, wayMap(w => meta_resp(w))).asTypeOf(new ClientMetadata)
266  val s1_miss_tag = Mux1H(s1_req.miss_way_en, wayMap(w => tag_resp(w)))
267  val s1_miss_coh = Mux1H(s1_req.miss_way_en, wayMap(w => meta_resp(w))).asTypeOf(new ClientMetadata)
268
269  val s1_repl_way_raw = WireInit(0.U(log2Up(nWays).W))
270  s1_repl_way_raw := Mux(RegNext(s0_fire), io.replace_way.way, RegNext(s1_repl_way_raw))
271
272  val s1_need_replacement = (s1_req.miss || s1_req.isStore && !s1_req.probe) && !s1_tag_match
273  val s1_way_en = Mux(
274    s1_req.replace,
275    s1_req.replace_way_en,
276    Mux(
277      s1_req.miss,
278      s1_req.miss_way_en,
279      Mux(
280        s1_need_replacement,
281        s1_repl_way_en,
282        s1_tag_match_way
283      )
284    )
285  )
286  assert(!RegNext(s1_fire && PopCount(s1_way_en) > 1.U))
287  val s1_tag = Mux(
288    s1_req.replace,
289    get_tag(s1_req.addr),
290    Mux(
291      s1_req.miss,
292      s1_miss_tag,
293      Mux(s1_need_replacement, s1_repl_tag, s1_hit_tag)
294    )
295  )
296  val s1_coh = Mux(
297    s1_req.replace,
298    Mux1H(s1_req.replace_way_en, meta_resp.map(ClientMetadata(_))),
299    Mux(
300      s1_req.miss,
301      s1_miss_coh,
302      Mux(s1_need_replacement, s1_repl_coh, s1_hit_coh)
303    )
304  )
305
306  val s1_has_permission = s1_hit_coh.onAccess(s1_req.cmd)._1
307  val s1_hit = s1_tag_match && s1_has_permission
308  val s1_pregen_can_go_to_mq = !s1_req.replace && !s1_req.probe && !s1_req.miss && (s1_req.isStore || s1_req.isAMO) && !s1_hit
309
310  // s2: select data, return resp if this is a store miss
311  val s2_valid = RegInit(false.B)
312  val s2_req = RegEnable(s1_req, s1_fire)
313  val s2_tag_match = RegEnable(s1_tag_match, s1_fire)
314  val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_fire)
315  val s2_hit_coh = RegEnable(s1_hit_coh, s1_fire)
316  val (s2_has_permission, _, s2_new_hit_coh) = s2_hit_coh.onAccess(s2_req.cmd)
317
318  val s2_repl_tag = RegEnable(s1_repl_tag, s1_fire)
319  val s2_repl_coh = RegEnable(s1_repl_coh, s1_fire)
320  val s2_repl_way_en = RegEnable(s1_repl_way_en, s1_fire)
321  val s2_need_replacement = RegEnable(s1_need_replacement, s1_fire)
322  val s2_need_data = RegEnable(s1_need_data, s1_fire)
323  val s2_need_tag = RegEnable(s1_need_tag, s1_fire)
324  val s2_encTag = RegEnable(s1_encTag, s1_fire)
325  val s2_idx = get_idx(s2_req.vaddr)
326  val s2_way_en = RegEnable(s1_way_en, s1_fire)
327  val s2_tag = RegEnable(s1_tag, s1_fire)
328  val s2_coh = RegEnable(s1_coh, s1_fire)
329  val s2_banked_store_wmask = RegEnable(s1_banked_store_wmask, s1_fire)
330  val s2_flag_error = RegEnable(s1_flag_error, s1_fire)
331  val s2_tag_error = dcacheParameters.tagCode.decode(s2_encTag).error && s2_need_tag
332  val s2_l2_error = s2_req.error
333  val s2_error = s2_flag_error || s2_tag_error || s2_l2_error // data_error not included
334
335  val s2_may_report_data_error = s2_need_data && s2_coh.state =/= ClientStates.Nothing
336
337  val s2_hit = s2_tag_match && s2_has_permission
338  val s2_amo_hit = s2_hit && !s2_req.probe && !s2_req.miss && s2_req.isAMO
339  val s2_store_hit = s2_hit && !s2_req.probe && !s2_req.miss && s2_req.isStore
340
341  s2_s0_set_conlict := s2_valid && s0_idx === s2_idx
342  s2_s0_set_conlict_store := s2_valid && store_idx === s2_idx
343
344  // For a store req, it either hits and goes to s3, or miss and enter miss queue immediately
345  val s2_can_go_to_s3 = (s2_req.replace || s2_req.probe || s2_req.miss || (s2_req.isStore || s2_req.isAMO) && s2_hit) && s3_ready
346  val s2_can_go_to_mq = RegEnable(s1_pregen_can_go_to_mq, s1_fire)
347  assert(RegNext(!(s2_valid && s2_can_go_to_s3 && s2_can_go_to_mq)))
348  val s2_can_go = s2_can_go_to_s3 || s2_can_go_to_mq
349  val s2_fire = s2_valid && s2_can_go
350  val s2_fire_to_s3 = s2_valid && s2_can_go_to_s3
351  when (s1_fire) {
352    s2_valid := true.B
353  }.elsewhen (s2_fire) {
354    s2_valid := false.B
355  }
356  s2_ready := !s2_valid || s2_can_go
357  val replay = !io.miss_req.ready
358
359  val data_resp = Wire(io.data_resp.cloneType)
360  data_resp := Mux(RegNext(s1_fire), io.data_resp, RegNext(data_resp))
361  val s2_store_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
362
363  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
364    val full_wmask = FillInterleaved(8, wmask)
365    ((~full_wmask & old_data) | (full_wmask & new_data))
366  }
367
368  val s2_data = WireInit(VecInit((0 until DCacheBanks).map(i => {
369    data_resp(i).raw_data
370  })))
371
372  for (i <- 0 until DCacheBanks) {
373    val old_data = s2_data(i)
374    val new_data = get_data_of_bank(i, s2_req.store_data)
375    // for amo hit, we should use read out SRAM data
376    // do not merge with store data
377    val wmask = Mux(s2_amo_hit, 0.U(wordBytes.W), get_mask_of_bank(i, s2_req.store_mask))
378    s2_store_data_merged(i) := mergePutData(old_data, new_data, wmask)
379  }
380
381  val s2_data_word = s2_store_data_merged(s2_req.word_idx)
382
383  // s3: write data, meta and tag
384  val s3_valid = RegInit(false.B)
385  val s3_req = RegEnable(s2_req, s2_fire_to_s3)
386  val s3_idx = get_idx(s3_req.vaddr)
387  val s3_tag = RegEnable(s2_tag, s2_fire_to_s3)
388  val s3_tag_match = RegEnable(s2_tag_match, s2_fire_to_s3)
389  val s3_coh = RegEnable(s2_coh, s2_fire_to_s3)
390  val s3_hit = RegEnable(s2_hit, s2_fire_to_s3)
391  val s3_amo_hit = RegEnable(s2_amo_hit, s2_fire_to_s3)
392  val s3_store_hit = RegEnable(s2_store_hit, s2_fire_to_s3)
393  val s3_hit_coh = RegEnable(s2_hit_coh, s2_fire_to_s3)
394  val s3_new_hit_coh = RegEnable(s2_new_hit_coh, s2_fire_to_s3)
395  val s3_way_en = RegEnable(s2_way_en, s2_fire_to_s3)
396  val s3_banked_store_wmask = RegEnable(s2_banked_store_wmask, s2_fire_to_s3)
397  val s3_store_data_merged = RegEnable(s2_store_data_merged, s2_fire_to_s3)
398  val s3_data_word = RegEnable(s2_data_word, s2_fire_to_s3)
399  val s3_data = RegEnable(s2_data, s2_fire_to_s3)
400  val s3_l2_error = s3_req.error
401  // data_error will be reported by data array 1 cycle after data read resp
402  val s3_data_error = Wire(Bool())
403  s3_data_error := Mux(RegNext(RegNext(s1_fire)), // ecc check result is generated 2 cycle after read req
404    io.readline_error_delayed && RegNext(s2_may_report_data_error),
405    RegNext(s3_data_error) // do not update s3_data_error if !s1_fire
406  )
407  // error signal for amo inst
408  // s3_error = s3_flag_error || s3_tag_error || s3_l2_error || s3_data_error
409  val s3_error = RegEnable(s2_error, s2_fire_to_s3) || s3_data_error
410  val (probe_has_dirty_data, probe_shrink_param, probe_new_coh) = s3_coh.onProbe(s3_req.probe_param)
411  val s3_need_replacement = RegEnable(s2_need_replacement, s2_fire_to_s3)
412
413  val miss_update_meta = s3_req.miss
414  val probe_update_meta = s3_req.probe && s3_tag_match && s3_coh =/= probe_new_coh
415  val store_update_meta = s3_req.isStore && !s3_req.probe && s3_hit_coh =/= s3_new_hit_coh
416  val amo_update_meta = s3_req.isAMO && !s3_req.probe && s3_hit_coh =/= s3_new_hit_coh
417  val amo_wait_amoalu = s3_req.isAMO && s3_req.cmd =/= M_XLR && s3_req.cmd =/= M_XSC
418  val update_meta = (miss_update_meta || probe_update_meta || store_update_meta || amo_update_meta) && !s3_req.replace
419
420  def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = {
421    val c = categorize(cmd)
422    MuxLookup(Cat(c, param, dirty), Nothing, Seq(
423      //(effect param) -> (next)
424      Cat(rd, toB, false.B)  -> Branch,
425      Cat(rd, toB, true.B)   -> Branch,
426      Cat(rd, toT, false.B)  -> Trunk,
427      Cat(rd, toT, true.B)   -> Dirty,
428      Cat(wi, toT, false.B)  -> Trunk,
429      Cat(wi, toT, true.B)   -> Dirty,
430      Cat(wr, toT, false.B)  -> Dirty,
431      Cat(wr, toT, true.B)   -> Dirty))
432  }
433  val miss_new_coh = ClientMetadata(missCohGen(s3_req.cmd, s3_req.miss_param, s3_req.miss_dirty))
434
435  val new_coh = Mux(
436    miss_update_meta,
437    miss_new_coh,
438    Mux(
439      probe_update_meta,
440      probe_new_coh,
441      Mux(
442        store_update_meta || amo_update_meta,
443        s3_new_hit_coh,
444        ClientMetadata.onReset
445      )
446    )
447  )
448
449  // LR, SC and AMO
450  val debug_sc_fail_addr = RegInit(0.U)
451  val debug_sc_fail_cnt  = RegInit(0.U(8.W))
452
453  val lrsc_count = RegInit(0.U(log2Ceil(LRSCCycles).W))
454  val lrsc_valid = lrsc_count > LRSCBackOff.U
455  val lrsc_addr  = Reg(UInt())
456  val s3_lr = !s3_req.probe && s3_req.isAMO && s3_req.cmd === M_XLR
457  val s3_sc = !s3_req.probe && s3_req.isAMO && s3_req.cmd === M_XSC
458  val s3_lrsc_addr_match = lrsc_valid && lrsc_addr === get_block_addr(s3_req.addr)
459  val s3_sc_fail = s3_sc && !s3_lrsc_addr_match
460  val s3_sc_resp = Mux(s3_sc_fail, 1.U, 0.U)
461
462  val s3_can_do_amo = (s3_req.miss && !s3_req.probe && s3_req.source === AMO_SOURCE.U) || s3_amo_hit
463  val s3_can_do_amo_write = s3_can_do_amo && isWrite(s3_req.cmd) && !s3_sc_fail
464
465  when (s3_valid && (s3_lr || s3_sc)) {
466    when (s3_can_do_amo && s3_lr) {
467      lrsc_count := (LRSCCycles - 1).U
468      lrsc_addr := get_block_addr(s3_req.addr)
469    } .otherwise {
470      lrsc_count := 0.U
471    }
472  } .elsewhen (io.invalid_resv_set) {
473    // when we release this block,
474    // we invalidate this reservation set
475    lrsc_count := 0.U
476  } .elsewhen (lrsc_count > 0.U) {
477    lrsc_count := lrsc_count - 1.U
478  }
479
480  io.lrsc_locked_block.valid := lrsc_valid
481  io.lrsc_locked_block.bits  := lrsc_addr
482  io.block_lr := RegNext(lrsc_count > 0.U)
483
484  // When we update update_resv_set, block all probe req in the next cycle
485  // It should give Probe reservation set addr compare an independent cycle,
486  // which will lead to better timing
487  io.update_resv_set := s3_valid && s3_lr && s3_can_do_amo
488
489  when (s3_valid) {
490    when (s3_req.addr === debug_sc_fail_addr) {
491      when (s3_sc_fail) {
492        debug_sc_fail_cnt := debug_sc_fail_cnt + 1.U
493      } .elsewhen (s3_sc) {
494        debug_sc_fail_cnt := 0.U
495      }
496    } .otherwise {
497      when (s3_sc_fail) {
498        debug_sc_fail_addr := s3_req.addr
499        debug_sc_fail_cnt  := 1.U
500        XSWarn(s3_sc_fail === 100.U, p"L1DCache failed too many SCs in a row 0x${Hexadecimal(debug_sc_fail_addr)}, check if sth went wrong\n")
501      }
502    }
503  }
504  // assert(debug_sc_fail_cnt < 100.U, "L1DCache failed too many SCs in a row")
505
506  val banked_amo_wmask = UIntToOH(s3_req.word_idx)
507//  val banked_wmask = s3_banked_store_wmask
508  val banked_wmask = Mux(
509    s3_req.miss,
510    banked_full_wmask,
511    Mux(
512      s3_store_hit,
513      s3_banked_store_wmask,
514      Mux(
515        s3_can_do_amo_write,
516        banked_amo_wmask,
517        banked_none_wmask
518      )
519    )
520  )
521  val update_data = s3_req.miss || s3_store_hit || s3_can_do_amo_write
522  assert(!(banked_wmask.orR && !update_data))
523
524  // generate write data
525  // AMO hits
526  val s3_s_amoalu = RegInit(false.B)
527  val do_amoalu = amo_wait_amoalu && s3_valid && !s3_s_amoalu
528  val amoalu   = Module(new AMOALU(wordBits))
529  amoalu.io.mask := s3_req.amo_mask
530  amoalu.io.cmd  := s3_req.cmd
531  amoalu.io.lhs  := s3_data_word
532  amoalu.io.rhs  := s3_req.amo_data
533
534  // merge amo write data
535//  val amo_bitmask = FillInterleaved(8, s3_req.amo_mask)
536  val s3_amo_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
537  val s3_sc_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
538  for (i <- 0 until DCacheBanks) {
539    val old_data = s3_store_data_merged(i)
540    val new_data = amoalu.io.out
541    val wmask = Mux(
542      s3_req.word_idx === i.U,
543      ~0.U(wordBytes.W),
544      0.U(wordBytes.W)
545    )
546    s3_amo_data_merged(i) := mergePutData(old_data, new_data, wmask)
547//    s3_sc_data_merged(i) := amo_bitmask & s3_req.amo_data | ~amo_bitmask & old_data
548    s3_sc_data_merged(i) := mergePutData(old_data, s3_req.amo_data,
549      Mux(s3_req.word_idx === i.U && !s3_sc_fail, s3_req.amo_mask, 0.U(wordBytes.W))
550    )
551  }
552  val s3_amo_data_merged_reg = RegEnable(s3_amo_data_merged, do_amoalu)
553  when(do_amoalu){
554    s3_s_amoalu := true.B
555  }
556
557  val miss_wb = s3_req.miss && s3_need_replacement && s3_coh.state =/= ClientStates.Nothing
558  val probe_wb = s3_req.probe
559  val replace_wb = s3_req.replace
560  val need_wb = miss_wb || probe_wb || replace_wb
561
562  val (_, miss_shrink_param, _) = s3_coh.onCacheControl(M_FLUSH)
563  val writeback_param = Mux(probe_wb, probe_shrink_param, miss_shrink_param)
564  val writeback_data = if (dcacheParameters.alwaysReleaseData) {
565    s3_tag_match && s3_req.probe && s3_req.probe_need_data ||
566      s3_coh === ClientStates.Dirty || (miss_wb || replace_wb) && s3_coh.state =/= ClientStates.Nothing
567  } else {
568    s3_tag_match && s3_req.probe && s3_req.probe_need_data || s3_coh === ClientStates.Dirty
569  }
570
571  val s3_probe_can_go = s3_req.probe && io.wb.ready && (io.meta_write.ready || !probe_update_meta)
572  val s3_store_can_go = s3_req.isStore && !s3_req.probe && (io.meta_write.ready || !store_update_meta) && (io.data_write.ready || !update_data)
573  val s3_amo_can_go = s3_amo_hit && (io.meta_write.ready || !amo_update_meta) && (io.data_write.ready || !update_data) && (s3_s_amoalu || !amo_wait_amoalu)
574  val s3_miss_can_go = s3_req.miss &&
575    (io.meta_write.ready || !amo_update_meta) &&
576    (io.data_write.ready || !update_data) &&
577    (s3_s_amoalu || !amo_wait_amoalu) &&
578    io.tag_write.ready &&
579    io.wb.ready
580  val s3_replace_nothing = s3_req.replace && s3_coh.state === ClientStates.Nothing
581  val s3_replace_can_go = s3_req.replace && (s3_replace_nothing || io.wb.ready)
582  val s3_can_go = s3_probe_can_go || s3_store_can_go || s3_amo_can_go || s3_miss_can_go || s3_replace_can_go
583  val s3_update_data_cango = s3_store_can_go || s3_amo_can_go || s3_miss_can_go // used to speed up data_write gen
584  val s3_fire = s3_valid && s3_can_go
585  when (s2_fire_to_s3) {
586    s3_valid := true.B
587  }.elsewhen (s3_fire) {
588    s3_valid := false.B
589  }
590  s3_ready := !s3_valid || s3_can_go
591  s3_s0_set_conflict := s3_valid && s3_idx === s0_idx
592  s3_s0_set_conflict_store := s3_valid && s3_idx === store_idx
593  assert(RegNext(!s3_valid || !(s3_req.isStore && !s3_req.probe) || s3_hit)) // miss store should never come to s3
594
595  when(s3_fire) {
596    s3_s_amoalu := false.B
597  }
598
599  req.ready := s0_can_go
600
601  io.meta_read.valid := req.valid && s1_ready && !set_conflict
602  io.meta_read.bits.idx := get_idx(s0_req.vaddr)
603  io.meta_read.bits.way_en := Mux(s0_req.replace, s0_req.replace_way_en, ~0.U(nWays.W))
604
605  io.tag_read.valid := req.valid && s1_ready && !set_conflict && !s0_req.replace
606  io.tag_read.bits.idx := get_idx(s0_req.vaddr)
607  io.tag_read.bits.way_en := ~0.U(nWays.W)
608
609  io.data_read_intend := s1_valid && s1_need_data
610  io.data_read.valid := s1_valid && s1_need_data && s2_ready
611  io.data_read.bits.rmask := s1_banked_rmask
612  io.data_read.bits.way_en := s1_way_en
613  io.data_read.bits.addr := s1_req.vaddr
614
615  io.miss_req.valid := s2_valid && s2_can_go_to_mq
616  val miss_req = io.miss_req.bits
617  miss_req := DontCare
618  miss_req.source := s2_req.source
619  miss_req.cmd := s2_req.cmd
620  miss_req.addr := s2_req.addr
621  miss_req.vaddr := s2_req.vaddr
622  miss_req.way_en := Mux(s2_tag_match, s2_tag_match_way, s2_repl_way_en)
623  miss_req.store_data := s2_req.store_data
624  miss_req.store_mask := s2_req.store_mask
625  miss_req.word_idx := s2_req.word_idx
626  miss_req.amo_data := s2_req.amo_data
627  miss_req.amo_mask := s2_req.amo_mask
628  miss_req.req_coh := s2_hit_coh
629  miss_req.replace_coh := s2_repl_coh
630  miss_req.replace_tag := s2_repl_tag
631  miss_req.id := s2_req.id
632  miss_req.cancel := false.B
633
634  io.store_replay_resp.valid := s2_valid && s2_can_go_to_mq && replay && s2_req.isStore
635  io.store_replay_resp.bits.data := DontCare
636  io.store_replay_resp.bits.miss := true.B
637  io.store_replay_resp.bits.replay := true.B
638  io.store_replay_resp.bits.id := s2_req.id
639
640  io.store_hit_resp.valid := s3_valid && s3_store_can_go
641  io.store_hit_resp.bits.data := DontCare
642  io.store_hit_resp.bits.miss := false.B
643  io.store_hit_resp.bits.replay := false.B
644  io.store_hit_resp.bits.id := s3_req.id
645
646  io.release_update.valid := s3_valid && (s3_store_can_go || s3_amo_can_go) && s3_hit && update_data
647  io.release_update.bits.addr := s3_req.addr
648  io.release_update.bits.mask := Mux(s3_store_hit, s3_banked_store_wmask, banked_amo_wmask)
649  io.release_update.bits.data := Mux(
650    amo_wait_amoalu,
651    s3_amo_data_merged_reg,
652    Mux(
653      s3_sc,
654      s3_sc_data_merged,
655      s3_store_data_merged
656    )
657  ).asUInt
658
659  val atomic_hit_resp = Wire(new AtomicsResp)
660  atomic_hit_resp.data := Mux(s3_sc, s3_sc_resp, s3_data_word)
661  atomic_hit_resp.miss := false.B
662  atomic_hit_resp.miss_id := s3_req.miss_id
663  atomic_hit_resp.error := s3_error
664  atomic_hit_resp.replay := false.B
665  atomic_hit_resp.ack_miss_queue := s3_req.miss
666  atomic_hit_resp.id := lrsc_valid
667  val atomic_replay_resp = Wire(new AtomicsResp)
668  atomic_replay_resp.data := DontCare
669  atomic_replay_resp.miss := true.B
670  atomic_replay_resp.miss_id := DontCare
671  atomic_replay_resp.error := false.B
672  atomic_replay_resp.replay := true.B
673  atomic_replay_resp.ack_miss_queue := false.B
674  atomic_replay_resp.id := DontCare
675  val atomic_replay_resp_valid = s2_valid && s2_can_go_to_mq && replay && s2_req.isAMO
676  val atomic_hit_resp_valid = s3_valid && (s3_amo_can_go || s3_miss_can_go && s3_req.isAMO)
677  io.atomic_resp.valid := atomic_replay_resp_valid || atomic_hit_resp_valid
678  io.atomic_resp.bits := Mux(atomic_replay_resp_valid, atomic_replay_resp, atomic_hit_resp)
679
680  io.replace_resp.valid := s3_fire && s3_req.replace
681  io.replace_resp.bits := s3_req.miss_id
682
683  io.meta_write.valid := s3_fire && update_meta
684  io.meta_write.bits.idx := s3_idx
685  io.meta_write.bits.way_en := s3_way_en
686  io.meta_write.bits.meta.coh := new_coh
687
688  io.error_flag_write.valid := s3_fire && update_meta && s3_l2_error
689  io.error_flag_write.bits.idx := s3_idx
690  io.error_flag_write.bits.way_en := s3_way_en
691  io.error_flag_write.bits.error := s3_l2_error
692
693  io.tag_write.valid := s3_fire && s3_req.miss
694  io.tag_write.bits.idx := s3_idx
695  io.tag_write.bits.way_en := s3_way_en
696  io.tag_write.bits.tag := get_tag(s3_req.addr)
697
698  io.tag_write_intend := s3_req.miss && s3_valid
699  XSPerfAccumulate("fake_tag_write_intend", io.tag_write_intend && !io.tag_write.valid)
700  XSPerfAccumulate("mainpipe_tag_write", io.tag_write.valid)
701
702  assert(!RegNext(io.tag_write.valid && !io.tag_write_intend))
703
704  io.data_write.valid := s3_valid && s3_update_data_cango && update_data
705  io.data_write.bits.way_en := s3_way_en
706  io.data_write.bits.addr := s3_req.vaddr
707  io.data_write.bits.wmask := banked_wmask
708  io.data_write.bits.data := Mux(
709    amo_wait_amoalu,
710    s3_amo_data_merged_reg,
711    Mux(
712      s3_sc,
713      s3_sc_data_merged,
714      s3_store_data_merged
715    )
716  )
717  assert(RegNext(!io.meta_write.valid || !s3_req.replace))
718  assert(RegNext(!io.tag_write.valid || !s3_req.replace))
719  assert(RegNext(!io.data_write.valid || !s3_req.replace))
720
721  io.wb.valid := s3_valid && (
722    // replace
723    s3_req.replace && !s3_replace_nothing ||
724    // probe can go to wbq
725    s3_req.probe && (io.meta_write.ready || !probe_update_meta) ||
726      // amo miss can go to wbq
727      s3_req.miss &&
728        (io.meta_write.ready || !amo_update_meta) &&
729        (io.data_write.ready || !update_data) &&
730        (s3_s_amoalu || !amo_wait_amoalu) &&
731        io.tag_write.ready
732    ) && need_wb
733  io.wb.bits.addr := get_block_addr(Cat(s3_tag, get_untag(s3_req.vaddr)))
734  io.wb.bits.param := writeback_param
735  io.wb.bits.voluntary := s3_req.miss || s3_req.replace
736  io.wb.bits.hasData := writeback_data
737  io.wb.bits.dirty := s3_coh === ClientStates.Dirty
738  io.wb.bits.data := s3_data.asUInt()
739  io.wb.bits.delay_release := s3_req.replace
740  io.wb.bits.miss_id := s3_req.miss_id
741
742  io.replace_access.valid := RegNext(s1_fire && (s1_req.isAMO || s1_req.isStore) && !s1_req.probe)
743  io.replace_access.bits.set := s2_idx
744  io.replace_access.bits.way := RegNext(OHToUInt(s1_way_en))
745
746  io.replace_way.set.valid := RegNext(s0_fire)
747  io.replace_way.set.bits := s1_idx
748
749  // TODO: consider block policy of a finer granularity
750  io.status.s0_set.valid := req.valid
751  io.status.s0_set.bits := get_idx(s0_req.vaddr)
752  io.status.s1.valid := s1_valid
753  io.status.s1.bits.set := s1_idx
754  io.status.s1.bits.way_en := s1_way_en
755  io.status.s2.valid := s2_valid && !s2_req.replace
756  io.status.s2.bits.set := s2_idx
757  io.status.s2.bits.way_en := s2_way_en
758  io.status.s3.valid := s3_valid && !s3_req.replace
759  io.status.s3.bits.set := s3_idx
760  io.status.s3.bits.way_en := s3_way_en
761
762  // report error to beu and csr, 1 cycle after read data resp
763  io.error := 0.U.asTypeOf(new L1CacheErrorInfo())
764  // report error, update error csr
765  io.error.valid := s3_error && RegNext(s2_fire)
766  // only tag_error and data_error will be reported to beu
767  // l2_error should not be reported (l2 will report that)
768  io.error.report_to_beu := (RegEnable(s2_tag_error, s2_fire) || s3_data_error) && RegNext(s2_fire)
769  io.error.paddr := RegEnable(s2_req.addr, s2_fire)
770  io.error.source.tag := RegEnable(s2_tag_error, s2_fire)
771  io.error.source.data := s3_data_error
772  io.error.source.l2 := RegEnable(s2_flag_error || s2_l2_error, s2_fire)
773  io.error.opType.store := RegEnable(s2_req.isStore && !s2_req.probe, s2_fire)
774  io.error.opType.probe := RegEnable(s2_req.probe, s2_fire)
775  io.error.opType.release := RegEnable(s2_req.replace, s2_fire)
776  io.error.opType.atom := RegEnable(s2_req.isAMO && !s2_req.probe, s2_fire)
777
778  val perfEvents = Seq(
779    ("dcache_mp_req          ", s0_fire                                                      ),
780    ("dcache_mp_total_penalty", PopCount(VecInit(Seq(s0_fire, s1_valid, s2_valid, s3_valid))))
781  )
782  generatePerfEvent()
783}
784